1 //===--- CGCall.cpp - Encapsulate calling convention details --------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // These classes wrap the information about a call or function 10 // definition used to handle ABI compliancy. 11 // 12 //===----------------------------------------------------------------------===// 13 14 #include "CGCall.h" 15 #include "ABIInfo.h" 16 #include "CGBlocks.h" 17 #include "CGCXXABI.h" 18 #include "CGCleanup.h" 19 #include "CGRecordLayout.h" 20 #include "CodeGenFunction.h" 21 #include "CodeGenModule.h" 22 #include "TargetInfo.h" 23 #include "clang/AST/Attr.h" 24 #include "clang/AST/Decl.h" 25 #include "clang/AST/DeclCXX.h" 26 #include "clang/AST/DeclObjC.h" 27 #include "clang/Basic/CodeGenOptions.h" 28 #include "clang/Basic/TargetBuiltins.h" 29 #include "clang/Basic/TargetInfo.h" 30 #include "clang/CodeGen/CGFunctionInfo.h" 31 #include "clang/CodeGen/SwiftCallingConv.h" 32 #include "llvm/ADT/StringExtras.h" 33 #include "llvm/Analysis/ValueTracking.h" 34 #include "llvm/IR/Assumptions.h" 35 #include "llvm/IR/Attributes.h" 36 #include "llvm/IR/CallingConv.h" 37 #include "llvm/IR/DataLayout.h" 38 #include "llvm/IR/InlineAsm.h" 39 #include "llvm/IR/IntrinsicInst.h" 40 #include "llvm/IR/Intrinsics.h" 41 #include "llvm/Transforms/Utils/Local.h" 42 using namespace clang; 43 using namespace CodeGen; 44 45 /***/ 46 47 unsigned CodeGenTypes::ClangCallConvToLLVMCallConv(CallingConv CC) { 48 switch (CC) { 49 default: return llvm::CallingConv::C; 50 case CC_X86StdCall: return llvm::CallingConv::X86_StdCall; 51 case CC_X86FastCall: return llvm::CallingConv::X86_FastCall; 52 case CC_X86RegCall: return llvm::CallingConv::X86_RegCall; 53 case CC_X86ThisCall: return llvm::CallingConv::X86_ThisCall; 54 case CC_Win64: return llvm::CallingConv::Win64; 55 case CC_X86_64SysV: return llvm::CallingConv::X86_64_SysV; 56 case CC_AAPCS: return llvm::CallingConv::ARM_AAPCS; 57 case CC_AAPCS_VFP: return llvm::CallingConv::ARM_AAPCS_VFP; 58 case CC_IntelOclBicc: return llvm::CallingConv::Intel_OCL_BI; 59 // TODO: Add support for __pascal to LLVM. 60 case CC_X86Pascal: return llvm::CallingConv::C; 61 // TODO: Add support for __vectorcall to LLVM. 62 case CC_X86VectorCall: return llvm::CallingConv::X86_VectorCall; 63 case CC_AArch64VectorCall: return llvm::CallingConv::AArch64_VectorCall; 64 case CC_SpirFunction: return llvm::CallingConv::SPIR_FUNC; 65 case CC_OpenCLKernel: return CGM.getTargetCodeGenInfo().getOpenCLKernelCallingConv(); 66 case CC_PreserveMost: return llvm::CallingConv::PreserveMost; 67 case CC_PreserveAll: return llvm::CallingConv::PreserveAll; 68 case CC_Swift: return llvm::CallingConv::Swift; 69 } 70 } 71 72 /// Derives the 'this' type for codegen purposes, i.e. ignoring method CVR 73 /// qualification. Either or both of RD and MD may be null. A null RD indicates 74 /// that there is no meaningful 'this' type, and a null MD can occur when 75 /// calling a method pointer. 76 CanQualType CodeGenTypes::DeriveThisType(const CXXRecordDecl *RD, 77 const CXXMethodDecl *MD) { 78 QualType RecTy; 79 if (RD) 80 RecTy = Context.getTagDeclType(RD)->getCanonicalTypeInternal(); 81 else 82 RecTy = Context.VoidTy; 83 84 if (MD) 85 RecTy = Context.getAddrSpaceQualType(RecTy, MD->getMethodQualifiers().getAddressSpace()); 86 return Context.getPointerType(CanQualType::CreateUnsafe(RecTy)); 87 } 88 89 /// Returns the canonical formal type of the given C++ method. 90 static CanQual<FunctionProtoType> GetFormalType(const CXXMethodDecl *MD) { 91 return MD->getType()->getCanonicalTypeUnqualified() 92 .getAs<FunctionProtoType>(); 93 } 94 95 /// Returns the "extra-canonicalized" return type, which discards 96 /// qualifiers on the return type. Codegen doesn't care about them, 97 /// and it makes ABI code a little easier to be able to assume that 98 /// all parameter and return types are top-level unqualified. 99 static CanQualType GetReturnType(QualType RetTy) { 100 return RetTy->getCanonicalTypeUnqualified().getUnqualifiedType(); 101 } 102 103 /// Arrange the argument and result information for a value of the given 104 /// unprototyped freestanding function type. 105 const CGFunctionInfo & 106 CodeGenTypes::arrangeFreeFunctionType(CanQual<FunctionNoProtoType> FTNP) { 107 // When translating an unprototyped function type, always use a 108 // variadic type. 109 return arrangeLLVMFunctionInfo(FTNP->getReturnType().getUnqualifiedType(), 110 /*instanceMethod=*/false, 111 /*chainCall=*/false, None, 112 FTNP->getExtInfo(), {}, RequiredArgs(0)); 113 } 114 115 static void addExtParameterInfosForCall( 116 llvm::SmallVectorImpl<FunctionProtoType::ExtParameterInfo> ¶mInfos, 117 const FunctionProtoType *proto, 118 unsigned prefixArgs, 119 unsigned totalArgs) { 120 assert(proto->hasExtParameterInfos()); 121 assert(paramInfos.size() <= prefixArgs); 122 assert(proto->getNumParams() + prefixArgs <= totalArgs); 123 124 paramInfos.reserve(totalArgs); 125 126 // Add default infos for any prefix args that don't already have infos. 127 paramInfos.resize(prefixArgs); 128 129 // Add infos for the prototype. 130 for (const auto &ParamInfo : proto->getExtParameterInfos()) { 131 paramInfos.push_back(ParamInfo); 132 // pass_object_size params have no parameter info. 133 if (ParamInfo.hasPassObjectSize()) 134 paramInfos.emplace_back(); 135 } 136 137 assert(paramInfos.size() <= totalArgs && 138 "Did we forget to insert pass_object_size args?"); 139 // Add default infos for the variadic and/or suffix arguments. 140 paramInfos.resize(totalArgs); 141 } 142 143 /// Adds the formal parameters in FPT to the given prefix. If any parameter in 144 /// FPT has pass_object_size attrs, then we'll add parameters for those, too. 145 static void appendParameterTypes(const CodeGenTypes &CGT, 146 SmallVectorImpl<CanQualType> &prefix, 147 SmallVectorImpl<FunctionProtoType::ExtParameterInfo> ¶mInfos, 148 CanQual<FunctionProtoType> FPT) { 149 // Fast path: don't touch param info if we don't need to. 150 if (!FPT->hasExtParameterInfos()) { 151 assert(paramInfos.empty() && 152 "We have paramInfos, but the prototype doesn't?"); 153 prefix.append(FPT->param_type_begin(), FPT->param_type_end()); 154 return; 155 } 156 157 unsigned PrefixSize = prefix.size(); 158 // In the vast majority of cases, we'll have precisely FPT->getNumParams() 159 // parameters; the only thing that can change this is the presence of 160 // pass_object_size. So, we preallocate for the common case. 161 prefix.reserve(prefix.size() + FPT->getNumParams()); 162 163 auto ExtInfos = FPT->getExtParameterInfos(); 164 assert(ExtInfos.size() == FPT->getNumParams()); 165 for (unsigned I = 0, E = FPT->getNumParams(); I != E; ++I) { 166 prefix.push_back(FPT->getParamType(I)); 167 if (ExtInfos[I].hasPassObjectSize()) 168 prefix.push_back(CGT.getContext().getSizeType()); 169 } 170 171 addExtParameterInfosForCall(paramInfos, FPT.getTypePtr(), PrefixSize, 172 prefix.size()); 173 } 174 175 /// Arrange the LLVM function layout for a value of the given function 176 /// type, on top of any implicit parameters already stored. 177 static const CGFunctionInfo & 178 arrangeLLVMFunctionInfo(CodeGenTypes &CGT, bool instanceMethod, 179 SmallVectorImpl<CanQualType> &prefix, 180 CanQual<FunctionProtoType> FTP) { 181 SmallVector<FunctionProtoType::ExtParameterInfo, 16> paramInfos; 182 RequiredArgs Required = RequiredArgs::forPrototypePlus(FTP, prefix.size()); 183 // FIXME: Kill copy. 184 appendParameterTypes(CGT, prefix, paramInfos, FTP); 185 CanQualType resultType = FTP->getReturnType().getUnqualifiedType(); 186 187 return CGT.arrangeLLVMFunctionInfo(resultType, instanceMethod, 188 /*chainCall=*/false, prefix, 189 FTP->getExtInfo(), paramInfos, 190 Required); 191 } 192 193 /// Arrange the argument and result information for a value of the 194 /// given freestanding function type. 195 const CGFunctionInfo & 196 CodeGenTypes::arrangeFreeFunctionType(CanQual<FunctionProtoType> FTP) { 197 SmallVector<CanQualType, 16> argTypes; 198 return ::arrangeLLVMFunctionInfo(*this, /*instanceMethod=*/false, argTypes, 199 FTP); 200 } 201 202 static CallingConv getCallingConventionForDecl(const ObjCMethodDecl *D, 203 bool IsWindows) { 204 // Set the appropriate calling convention for the Function. 205 if (D->hasAttr<StdCallAttr>()) 206 return CC_X86StdCall; 207 208 if (D->hasAttr<FastCallAttr>()) 209 return CC_X86FastCall; 210 211 if (D->hasAttr<RegCallAttr>()) 212 return CC_X86RegCall; 213 214 if (D->hasAttr<ThisCallAttr>()) 215 return CC_X86ThisCall; 216 217 if (D->hasAttr<VectorCallAttr>()) 218 return CC_X86VectorCall; 219 220 if (D->hasAttr<PascalAttr>()) 221 return CC_X86Pascal; 222 223 if (PcsAttr *PCS = D->getAttr<PcsAttr>()) 224 return (PCS->getPCS() == PcsAttr::AAPCS ? CC_AAPCS : CC_AAPCS_VFP); 225 226 if (D->hasAttr<AArch64VectorPcsAttr>()) 227 return CC_AArch64VectorCall; 228 229 if (D->hasAttr<IntelOclBiccAttr>()) 230 return CC_IntelOclBicc; 231 232 if (D->hasAttr<MSABIAttr>()) 233 return IsWindows ? CC_C : CC_Win64; 234 235 if (D->hasAttr<SysVABIAttr>()) 236 return IsWindows ? CC_X86_64SysV : CC_C; 237 238 if (D->hasAttr<PreserveMostAttr>()) 239 return CC_PreserveMost; 240 241 if (D->hasAttr<PreserveAllAttr>()) 242 return CC_PreserveAll; 243 244 return CC_C; 245 } 246 247 /// Arrange the argument and result information for a call to an 248 /// unknown C++ non-static member function of the given abstract type. 249 /// (A null RD means we don't have any meaningful "this" argument type, 250 /// so fall back to a generic pointer type). 251 /// The member function must be an ordinary function, i.e. not a 252 /// constructor or destructor. 253 const CGFunctionInfo & 254 CodeGenTypes::arrangeCXXMethodType(const CXXRecordDecl *RD, 255 const FunctionProtoType *FTP, 256 const CXXMethodDecl *MD) { 257 SmallVector<CanQualType, 16> argTypes; 258 259 // Add the 'this' pointer. 260 argTypes.push_back(DeriveThisType(RD, MD)); 261 262 return ::arrangeLLVMFunctionInfo( 263 *this, true, argTypes, 264 FTP->getCanonicalTypeUnqualified().getAs<FunctionProtoType>()); 265 } 266 267 /// Set calling convention for CUDA/HIP kernel. 268 static void setCUDAKernelCallingConvention(CanQualType &FTy, CodeGenModule &CGM, 269 const FunctionDecl *FD) { 270 if (FD->hasAttr<CUDAGlobalAttr>()) { 271 const FunctionType *FT = FTy->getAs<FunctionType>(); 272 CGM.getTargetCodeGenInfo().setCUDAKernelCallingConvention(FT); 273 FTy = FT->getCanonicalTypeUnqualified(); 274 } 275 } 276 277 /// Arrange the argument and result information for a declaration or 278 /// definition of the given C++ non-static member function. The 279 /// member function must be an ordinary function, i.e. not a 280 /// constructor or destructor. 281 const CGFunctionInfo & 282 CodeGenTypes::arrangeCXXMethodDeclaration(const CXXMethodDecl *MD) { 283 assert(!isa<CXXConstructorDecl>(MD) && "wrong method for constructors!"); 284 assert(!isa<CXXDestructorDecl>(MD) && "wrong method for destructors!"); 285 286 CanQualType FT = GetFormalType(MD).getAs<Type>(); 287 setCUDAKernelCallingConvention(FT, CGM, MD); 288 auto prototype = FT.getAs<FunctionProtoType>(); 289 290 if (MD->isInstance()) { 291 // The abstract case is perfectly fine. 292 const CXXRecordDecl *ThisType = TheCXXABI.getThisArgumentTypeForMethod(MD); 293 return arrangeCXXMethodType(ThisType, prototype.getTypePtr(), MD); 294 } 295 296 return arrangeFreeFunctionType(prototype); 297 } 298 299 bool CodeGenTypes::inheritingCtorHasParams( 300 const InheritedConstructor &Inherited, CXXCtorType Type) { 301 // Parameters are unnecessary if we're constructing a base class subobject 302 // and the inherited constructor lives in a virtual base. 303 return Type == Ctor_Complete || 304 !Inherited.getShadowDecl()->constructsVirtualBase() || 305 !Target.getCXXABI().hasConstructorVariants(); 306 } 307 308 const CGFunctionInfo & 309 CodeGenTypes::arrangeCXXStructorDeclaration(GlobalDecl GD) { 310 auto *MD = cast<CXXMethodDecl>(GD.getDecl()); 311 312 SmallVector<CanQualType, 16> argTypes; 313 SmallVector<FunctionProtoType::ExtParameterInfo, 16> paramInfos; 314 argTypes.push_back(DeriveThisType(MD->getParent(), MD)); 315 316 bool PassParams = true; 317 318 if (auto *CD = dyn_cast<CXXConstructorDecl>(MD)) { 319 // A base class inheriting constructor doesn't get forwarded arguments 320 // needed to construct a virtual base (or base class thereof). 321 if (auto Inherited = CD->getInheritedConstructor()) 322 PassParams = inheritingCtorHasParams(Inherited, GD.getCtorType()); 323 } 324 325 CanQual<FunctionProtoType> FTP = GetFormalType(MD); 326 327 // Add the formal parameters. 328 if (PassParams) 329 appendParameterTypes(*this, argTypes, paramInfos, FTP); 330 331 CGCXXABI::AddedStructorArgCounts AddedArgs = 332 TheCXXABI.buildStructorSignature(GD, argTypes); 333 if (!paramInfos.empty()) { 334 // Note: prefix implies after the first param. 335 if (AddedArgs.Prefix) 336 paramInfos.insert(paramInfos.begin() + 1, AddedArgs.Prefix, 337 FunctionProtoType::ExtParameterInfo{}); 338 if (AddedArgs.Suffix) 339 paramInfos.append(AddedArgs.Suffix, 340 FunctionProtoType::ExtParameterInfo{}); 341 } 342 343 RequiredArgs required = 344 (PassParams && MD->isVariadic() ? RequiredArgs(argTypes.size()) 345 : RequiredArgs::All); 346 347 FunctionType::ExtInfo extInfo = FTP->getExtInfo(); 348 CanQualType resultType = TheCXXABI.HasThisReturn(GD) 349 ? argTypes.front() 350 : TheCXXABI.hasMostDerivedReturn(GD) 351 ? CGM.getContext().VoidPtrTy 352 : Context.VoidTy; 353 return arrangeLLVMFunctionInfo(resultType, /*instanceMethod=*/true, 354 /*chainCall=*/false, argTypes, extInfo, 355 paramInfos, required); 356 } 357 358 static SmallVector<CanQualType, 16> 359 getArgTypesForCall(ASTContext &ctx, const CallArgList &args) { 360 SmallVector<CanQualType, 16> argTypes; 361 for (auto &arg : args) 362 argTypes.push_back(ctx.getCanonicalParamType(arg.Ty)); 363 return argTypes; 364 } 365 366 static SmallVector<CanQualType, 16> 367 getArgTypesForDeclaration(ASTContext &ctx, const FunctionArgList &args) { 368 SmallVector<CanQualType, 16> argTypes; 369 for (auto &arg : args) 370 argTypes.push_back(ctx.getCanonicalParamType(arg->getType())); 371 return argTypes; 372 } 373 374 static llvm::SmallVector<FunctionProtoType::ExtParameterInfo, 16> 375 getExtParameterInfosForCall(const FunctionProtoType *proto, 376 unsigned prefixArgs, unsigned totalArgs) { 377 llvm::SmallVector<FunctionProtoType::ExtParameterInfo, 16> result; 378 if (proto->hasExtParameterInfos()) { 379 addExtParameterInfosForCall(result, proto, prefixArgs, totalArgs); 380 } 381 return result; 382 } 383 384 /// Arrange a call to a C++ method, passing the given arguments. 385 /// 386 /// ExtraPrefixArgs is the number of ABI-specific args passed after the `this` 387 /// parameter. 388 /// ExtraSuffixArgs is the number of ABI-specific args passed at the end of 389 /// args. 390 /// PassProtoArgs indicates whether `args` has args for the parameters in the 391 /// given CXXConstructorDecl. 392 const CGFunctionInfo & 393 CodeGenTypes::arrangeCXXConstructorCall(const CallArgList &args, 394 const CXXConstructorDecl *D, 395 CXXCtorType CtorKind, 396 unsigned ExtraPrefixArgs, 397 unsigned ExtraSuffixArgs, 398 bool PassProtoArgs) { 399 // FIXME: Kill copy. 400 SmallVector<CanQualType, 16> ArgTypes; 401 for (const auto &Arg : args) 402 ArgTypes.push_back(Context.getCanonicalParamType(Arg.Ty)); 403 404 // +1 for implicit this, which should always be args[0]. 405 unsigned TotalPrefixArgs = 1 + ExtraPrefixArgs; 406 407 CanQual<FunctionProtoType> FPT = GetFormalType(D); 408 RequiredArgs Required = PassProtoArgs 409 ? RequiredArgs::forPrototypePlus( 410 FPT, TotalPrefixArgs + ExtraSuffixArgs) 411 : RequiredArgs::All; 412 413 GlobalDecl GD(D, CtorKind); 414 CanQualType ResultType = TheCXXABI.HasThisReturn(GD) 415 ? ArgTypes.front() 416 : TheCXXABI.hasMostDerivedReturn(GD) 417 ? CGM.getContext().VoidPtrTy 418 : Context.VoidTy; 419 420 FunctionType::ExtInfo Info = FPT->getExtInfo(); 421 llvm::SmallVector<FunctionProtoType::ExtParameterInfo, 16> ParamInfos; 422 // If the prototype args are elided, we should only have ABI-specific args, 423 // which never have param info. 424 if (PassProtoArgs && FPT->hasExtParameterInfos()) { 425 // ABI-specific suffix arguments are treated the same as variadic arguments. 426 addExtParameterInfosForCall(ParamInfos, FPT.getTypePtr(), TotalPrefixArgs, 427 ArgTypes.size()); 428 } 429 return arrangeLLVMFunctionInfo(ResultType, /*instanceMethod=*/true, 430 /*chainCall=*/false, ArgTypes, Info, 431 ParamInfos, Required); 432 } 433 434 /// Arrange the argument and result information for the declaration or 435 /// definition of the given function. 436 const CGFunctionInfo & 437 CodeGenTypes::arrangeFunctionDeclaration(const FunctionDecl *FD) { 438 if (const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(FD)) 439 if (MD->isInstance()) 440 return arrangeCXXMethodDeclaration(MD); 441 442 CanQualType FTy = FD->getType()->getCanonicalTypeUnqualified(); 443 444 assert(isa<FunctionType>(FTy)); 445 setCUDAKernelCallingConvention(FTy, CGM, FD); 446 447 // When declaring a function without a prototype, always use a 448 // non-variadic type. 449 if (CanQual<FunctionNoProtoType> noProto = FTy.getAs<FunctionNoProtoType>()) { 450 return arrangeLLVMFunctionInfo( 451 noProto->getReturnType(), /*instanceMethod=*/false, 452 /*chainCall=*/false, None, noProto->getExtInfo(), {},RequiredArgs::All); 453 } 454 455 return arrangeFreeFunctionType(FTy.castAs<FunctionProtoType>()); 456 } 457 458 /// Arrange the argument and result information for the declaration or 459 /// definition of an Objective-C method. 460 const CGFunctionInfo & 461 CodeGenTypes::arrangeObjCMethodDeclaration(const ObjCMethodDecl *MD) { 462 // It happens that this is the same as a call with no optional 463 // arguments, except also using the formal 'self' type. 464 return arrangeObjCMessageSendSignature(MD, MD->getSelfDecl()->getType()); 465 } 466 467 /// Arrange the argument and result information for the function type 468 /// through which to perform a send to the given Objective-C method, 469 /// using the given receiver type. The receiver type is not always 470 /// the 'self' type of the method or even an Objective-C pointer type. 471 /// This is *not* the right method for actually performing such a 472 /// message send, due to the possibility of optional arguments. 473 const CGFunctionInfo & 474 CodeGenTypes::arrangeObjCMessageSendSignature(const ObjCMethodDecl *MD, 475 QualType receiverType) { 476 SmallVector<CanQualType, 16> argTys; 477 SmallVector<FunctionProtoType::ExtParameterInfo, 4> extParamInfos(2); 478 argTys.push_back(Context.getCanonicalParamType(receiverType)); 479 argTys.push_back(Context.getCanonicalParamType(Context.getObjCSelType())); 480 // FIXME: Kill copy? 481 for (const auto *I : MD->parameters()) { 482 argTys.push_back(Context.getCanonicalParamType(I->getType())); 483 auto extParamInfo = FunctionProtoType::ExtParameterInfo().withIsNoEscape( 484 I->hasAttr<NoEscapeAttr>()); 485 extParamInfos.push_back(extParamInfo); 486 } 487 488 FunctionType::ExtInfo einfo; 489 bool IsWindows = getContext().getTargetInfo().getTriple().isOSWindows(); 490 einfo = einfo.withCallingConv(getCallingConventionForDecl(MD, IsWindows)); 491 492 if (getContext().getLangOpts().ObjCAutoRefCount && 493 MD->hasAttr<NSReturnsRetainedAttr>()) 494 einfo = einfo.withProducesResult(true); 495 496 RequiredArgs required = 497 (MD->isVariadic() ? RequiredArgs(argTys.size()) : RequiredArgs::All); 498 499 return arrangeLLVMFunctionInfo( 500 GetReturnType(MD->getReturnType()), /*instanceMethod=*/false, 501 /*chainCall=*/false, argTys, einfo, extParamInfos, required); 502 } 503 504 const CGFunctionInfo & 505 CodeGenTypes::arrangeUnprototypedObjCMessageSend(QualType returnType, 506 const CallArgList &args) { 507 auto argTypes = getArgTypesForCall(Context, args); 508 FunctionType::ExtInfo einfo; 509 510 return arrangeLLVMFunctionInfo( 511 GetReturnType(returnType), /*instanceMethod=*/false, 512 /*chainCall=*/false, argTypes, einfo, {}, RequiredArgs::All); 513 } 514 515 const CGFunctionInfo & 516 CodeGenTypes::arrangeGlobalDeclaration(GlobalDecl GD) { 517 // FIXME: Do we need to handle ObjCMethodDecl? 518 const FunctionDecl *FD = cast<FunctionDecl>(GD.getDecl()); 519 520 if (isa<CXXConstructorDecl>(GD.getDecl()) || 521 isa<CXXDestructorDecl>(GD.getDecl())) 522 return arrangeCXXStructorDeclaration(GD); 523 524 return arrangeFunctionDeclaration(FD); 525 } 526 527 /// Arrange a thunk that takes 'this' as the first parameter followed by 528 /// varargs. Return a void pointer, regardless of the actual return type. 529 /// The body of the thunk will end in a musttail call to a function of the 530 /// correct type, and the caller will bitcast the function to the correct 531 /// prototype. 532 const CGFunctionInfo & 533 CodeGenTypes::arrangeUnprototypedMustTailThunk(const CXXMethodDecl *MD) { 534 assert(MD->isVirtual() && "only methods have thunks"); 535 CanQual<FunctionProtoType> FTP = GetFormalType(MD); 536 CanQualType ArgTys[] = {DeriveThisType(MD->getParent(), MD)}; 537 return arrangeLLVMFunctionInfo(Context.VoidTy, /*instanceMethod=*/false, 538 /*chainCall=*/false, ArgTys, 539 FTP->getExtInfo(), {}, RequiredArgs(1)); 540 } 541 542 const CGFunctionInfo & 543 CodeGenTypes::arrangeMSCtorClosure(const CXXConstructorDecl *CD, 544 CXXCtorType CT) { 545 assert(CT == Ctor_CopyingClosure || CT == Ctor_DefaultClosure); 546 547 CanQual<FunctionProtoType> FTP = GetFormalType(CD); 548 SmallVector<CanQualType, 2> ArgTys; 549 const CXXRecordDecl *RD = CD->getParent(); 550 ArgTys.push_back(DeriveThisType(RD, CD)); 551 if (CT == Ctor_CopyingClosure) 552 ArgTys.push_back(*FTP->param_type_begin()); 553 if (RD->getNumVBases() > 0) 554 ArgTys.push_back(Context.IntTy); 555 CallingConv CC = Context.getDefaultCallingConvention( 556 /*IsVariadic=*/false, /*IsCXXMethod=*/true); 557 return arrangeLLVMFunctionInfo(Context.VoidTy, /*instanceMethod=*/true, 558 /*chainCall=*/false, ArgTys, 559 FunctionType::ExtInfo(CC), {}, 560 RequiredArgs::All); 561 } 562 563 /// Arrange a call as unto a free function, except possibly with an 564 /// additional number of formal parameters considered required. 565 static const CGFunctionInfo & 566 arrangeFreeFunctionLikeCall(CodeGenTypes &CGT, 567 CodeGenModule &CGM, 568 const CallArgList &args, 569 const FunctionType *fnType, 570 unsigned numExtraRequiredArgs, 571 bool chainCall) { 572 assert(args.size() >= numExtraRequiredArgs); 573 574 llvm::SmallVector<FunctionProtoType::ExtParameterInfo, 16> paramInfos; 575 576 // In most cases, there are no optional arguments. 577 RequiredArgs required = RequiredArgs::All; 578 579 // If we have a variadic prototype, the required arguments are the 580 // extra prefix plus the arguments in the prototype. 581 if (const FunctionProtoType *proto = dyn_cast<FunctionProtoType>(fnType)) { 582 if (proto->isVariadic()) 583 required = RequiredArgs::forPrototypePlus(proto, numExtraRequiredArgs); 584 585 if (proto->hasExtParameterInfos()) 586 addExtParameterInfosForCall(paramInfos, proto, numExtraRequiredArgs, 587 args.size()); 588 589 // If we don't have a prototype at all, but we're supposed to 590 // explicitly use the variadic convention for unprototyped calls, 591 // treat all of the arguments as required but preserve the nominal 592 // possibility of variadics. 593 } else if (CGM.getTargetCodeGenInfo() 594 .isNoProtoCallVariadic(args, 595 cast<FunctionNoProtoType>(fnType))) { 596 required = RequiredArgs(args.size()); 597 } 598 599 // FIXME: Kill copy. 600 SmallVector<CanQualType, 16> argTypes; 601 for (const auto &arg : args) 602 argTypes.push_back(CGT.getContext().getCanonicalParamType(arg.Ty)); 603 return CGT.arrangeLLVMFunctionInfo(GetReturnType(fnType->getReturnType()), 604 /*instanceMethod=*/false, chainCall, 605 argTypes, fnType->getExtInfo(), paramInfos, 606 required); 607 } 608 609 /// Figure out the rules for calling a function with the given formal 610 /// type using the given arguments. The arguments are necessary 611 /// because the function might be unprototyped, in which case it's 612 /// target-dependent in crazy ways. 613 const CGFunctionInfo & 614 CodeGenTypes::arrangeFreeFunctionCall(const CallArgList &args, 615 const FunctionType *fnType, 616 bool chainCall) { 617 return arrangeFreeFunctionLikeCall(*this, CGM, args, fnType, 618 chainCall ? 1 : 0, chainCall); 619 } 620 621 /// A block function is essentially a free function with an 622 /// extra implicit argument. 623 const CGFunctionInfo & 624 CodeGenTypes::arrangeBlockFunctionCall(const CallArgList &args, 625 const FunctionType *fnType) { 626 return arrangeFreeFunctionLikeCall(*this, CGM, args, fnType, 1, 627 /*chainCall=*/false); 628 } 629 630 const CGFunctionInfo & 631 CodeGenTypes::arrangeBlockFunctionDeclaration(const FunctionProtoType *proto, 632 const FunctionArgList ¶ms) { 633 auto paramInfos = getExtParameterInfosForCall(proto, 1, params.size()); 634 auto argTypes = getArgTypesForDeclaration(Context, params); 635 636 return arrangeLLVMFunctionInfo(GetReturnType(proto->getReturnType()), 637 /*instanceMethod*/ false, /*chainCall*/ false, 638 argTypes, proto->getExtInfo(), paramInfos, 639 RequiredArgs::forPrototypePlus(proto, 1)); 640 } 641 642 const CGFunctionInfo & 643 CodeGenTypes::arrangeBuiltinFunctionCall(QualType resultType, 644 const CallArgList &args) { 645 // FIXME: Kill copy. 646 SmallVector<CanQualType, 16> argTypes; 647 for (const auto &Arg : args) 648 argTypes.push_back(Context.getCanonicalParamType(Arg.Ty)); 649 return arrangeLLVMFunctionInfo( 650 GetReturnType(resultType), /*instanceMethod=*/false, 651 /*chainCall=*/false, argTypes, FunctionType::ExtInfo(), 652 /*paramInfos=*/ {}, RequiredArgs::All); 653 } 654 655 const CGFunctionInfo & 656 CodeGenTypes::arrangeBuiltinFunctionDeclaration(QualType resultType, 657 const FunctionArgList &args) { 658 auto argTypes = getArgTypesForDeclaration(Context, args); 659 660 return arrangeLLVMFunctionInfo( 661 GetReturnType(resultType), /*instanceMethod=*/false, /*chainCall=*/false, 662 argTypes, FunctionType::ExtInfo(), {}, RequiredArgs::All); 663 } 664 665 const CGFunctionInfo & 666 CodeGenTypes::arrangeBuiltinFunctionDeclaration(CanQualType resultType, 667 ArrayRef<CanQualType> argTypes) { 668 return arrangeLLVMFunctionInfo( 669 resultType, /*instanceMethod=*/false, /*chainCall=*/false, 670 argTypes, FunctionType::ExtInfo(), {}, RequiredArgs::All); 671 } 672 673 /// Arrange a call to a C++ method, passing the given arguments. 674 /// 675 /// numPrefixArgs is the number of ABI-specific prefix arguments we have. It 676 /// does not count `this`. 677 const CGFunctionInfo & 678 CodeGenTypes::arrangeCXXMethodCall(const CallArgList &args, 679 const FunctionProtoType *proto, 680 RequiredArgs required, 681 unsigned numPrefixArgs) { 682 assert(numPrefixArgs + 1 <= args.size() && 683 "Emitting a call with less args than the required prefix?"); 684 // Add one to account for `this`. It's a bit awkward here, but we don't count 685 // `this` in similar places elsewhere. 686 auto paramInfos = 687 getExtParameterInfosForCall(proto, numPrefixArgs + 1, args.size()); 688 689 // FIXME: Kill copy. 690 auto argTypes = getArgTypesForCall(Context, args); 691 692 FunctionType::ExtInfo info = proto->getExtInfo(); 693 return arrangeLLVMFunctionInfo( 694 GetReturnType(proto->getReturnType()), /*instanceMethod=*/true, 695 /*chainCall=*/false, argTypes, info, paramInfos, required); 696 } 697 698 const CGFunctionInfo &CodeGenTypes::arrangeNullaryFunction() { 699 return arrangeLLVMFunctionInfo( 700 getContext().VoidTy, /*instanceMethod=*/false, /*chainCall=*/false, 701 None, FunctionType::ExtInfo(), {}, RequiredArgs::All); 702 } 703 704 const CGFunctionInfo & 705 CodeGenTypes::arrangeCall(const CGFunctionInfo &signature, 706 const CallArgList &args) { 707 assert(signature.arg_size() <= args.size()); 708 if (signature.arg_size() == args.size()) 709 return signature; 710 711 SmallVector<FunctionProtoType::ExtParameterInfo, 16> paramInfos; 712 auto sigParamInfos = signature.getExtParameterInfos(); 713 if (!sigParamInfos.empty()) { 714 paramInfos.append(sigParamInfos.begin(), sigParamInfos.end()); 715 paramInfos.resize(args.size()); 716 } 717 718 auto argTypes = getArgTypesForCall(Context, args); 719 720 assert(signature.getRequiredArgs().allowsOptionalArgs()); 721 return arrangeLLVMFunctionInfo(signature.getReturnType(), 722 signature.isInstanceMethod(), 723 signature.isChainCall(), 724 argTypes, 725 signature.getExtInfo(), 726 paramInfos, 727 signature.getRequiredArgs()); 728 } 729 730 namespace clang { 731 namespace CodeGen { 732 void computeSPIRKernelABIInfo(CodeGenModule &CGM, CGFunctionInfo &FI); 733 } 734 } 735 736 /// Arrange the argument and result information for an abstract value 737 /// of a given function type. This is the method which all of the 738 /// above functions ultimately defer to. 739 const CGFunctionInfo & 740 CodeGenTypes::arrangeLLVMFunctionInfo(CanQualType resultType, 741 bool instanceMethod, 742 bool chainCall, 743 ArrayRef<CanQualType> argTypes, 744 FunctionType::ExtInfo info, 745 ArrayRef<FunctionProtoType::ExtParameterInfo> paramInfos, 746 RequiredArgs required) { 747 assert(llvm::all_of(argTypes, 748 [](CanQualType T) { return T.isCanonicalAsParam(); })); 749 750 // Lookup or create unique function info. 751 llvm::FoldingSetNodeID ID; 752 CGFunctionInfo::Profile(ID, instanceMethod, chainCall, info, paramInfos, 753 required, resultType, argTypes); 754 755 void *insertPos = nullptr; 756 CGFunctionInfo *FI = FunctionInfos.FindNodeOrInsertPos(ID, insertPos); 757 if (FI) 758 return *FI; 759 760 unsigned CC = ClangCallConvToLLVMCallConv(info.getCC()); 761 762 // Construct the function info. We co-allocate the ArgInfos. 763 FI = CGFunctionInfo::create(CC, instanceMethod, chainCall, info, 764 paramInfos, resultType, argTypes, required); 765 FunctionInfos.InsertNode(FI, insertPos); 766 767 bool inserted = FunctionsBeingProcessed.insert(FI).second; 768 (void)inserted; 769 assert(inserted && "Recursively being processed?"); 770 771 // Compute ABI information. 772 if (CC == llvm::CallingConv::SPIR_KERNEL) { 773 // Force target independent argument handling for the host visible 774 // kernel functions. 775 computeSPIRKernelABIInfo(CGM, *FI); 776 } else if (info.getCC() == CC_Swift) { 777 swiftcall::computeABIInfo(CGM, *FI); 778 } else { 779 getABIInfo().computeInfo(*FI); 780 } 781 782 // Loop over all of the computed argument and return value info. If any of 783 // them are direct or extend without a specified coerce type, specify the 784 // default now. 785 ABIArgInfo &retInfo = FI->getReturnInfo(); 786 if (retInfo.canHaveCoerceToType() && retInfo.getCoerceToType() == nullptr) 787 retInfo.setCoerceToType(ConvertType(FI->getReturnType())); 788 789 for (auto &I : FI->arguments()) 790 if (I.info.canHaveCoerceToType() && I.info.getCoerceToType() == nullptr) 791 I.info.setCoerceToType(ConvertType(I.type)); 792 793 bool erased = FunctionsBeingProcessed.erase(FI); (void)erased; 794 assert(erased && "Not in set?"); 795 796 return *FI; 797 } 798 799 CGFunctionInfo *CGFunctionInfo::create(unsigned llvmCC, 800 bool instanceMethod, 801 bool chainCall, 802 const FunctionType::ExtInfo &info, 803 ArrayRef<ExtParameterInfo> paramInfos, 804 CanQualType resultType, 805 ArrayRef<CanQualType> argTypes, 806 RequiredArgs required) { 807 assert(paramInfos.empty() || paramInfos.size() == argTypes.size()); 808 assert(!required.allowsOptionalArgs() || 809 required.getNumRequiredArgs() <= argTypes.size()); 810 811 void *buffer = 812 operator new(totalSizeToAlloc<ArgInfo, ExtParameterInfo>( 813 argTypes.size() + 1, paramInfos.size())); 814 815 CGFunctionInfo *FI = new(buffer) CGFunctionInfo(); 816 FI->CallingConvention = llvmCC; 817 FI->EffectiveCallingConvention = llvmCC; 818 FI->ASTCallingConvention = info.getCC(); 819 FI->InstanceMethod = instanceMethod; 820 FI->ChainCall = chainCall; 821 FI->CmseNSCall = info.getCmseNSCall(); 822 FI->NoReturn = info.getNoReturn(); 823 FI->ReturnsRetained = info.getProducesResult(); 824 FI->NoCallerSavedRegs = info.getNoCallerSavedRegs(); 825 FI->NoCfCheck = info.getNoCfCheck(); 826 FI->Required = required; 827 FI->HasRegParm = info.getHasRegParm(); 828 FI->RegParm = info.getRegParm(); 829 FI->ArgStruct = nullptr; 830 FI->ArgStructAlign = 0; 831 FI->NumArgs = argTypes.size(); 832 FI->HasExtParameterInfos = !paramInfos.empty(); 833 FI->getArgsBuffer()[0].type = resultType; 834 for (unsigned i = 0, e = argTypes.size(); i != e; ++i) 835 FI->getArgsBuffer()[i + 1].type = argTypes[i]; 836 for (unsigned i = 0, e = paramInfos.size(); i != e; ++i) 837 FI->getExtParameterInfosBuffer()[i] = paramInfos[i]; 838 return FI; 839 } 840 841 /***/ 842 843 namespace { 844 // ABIArgInfo::Expand implementation. 845 846 // Specifies the way QualType passed as ABIArgInfo::Expand is expanded. 847 struct TypeExpansion { 848 enum TypeExpansionKind { 849 // Elements of constant arrays are expanded recursively. 850 TEK_ConstantArray, 851 // Record fields are expanded recursively (but if record is a union, only 852 // the field with the largest size is expanded). 853 TEK_Record, 854 // For complex types, real and imaginary parts are expanded recursively. 855 TEK_Complex, 856 // All other types are not expandable. 857 TEK_None 858 }; 859 860 const TypeExpansionKind Kind; 861 862 TypeExpansion(TypeExpansionKind K) : Kind(K) {} 863 virtual ~TypeExpansion() {} 864 }; 865 866 struct ConstantArrayExpansion : TypeExpansion { 867 QualType EltTy; 868 uint64_t NumElts; 869 870 ConstantArrayExpansion(QualType EltTy, uint64_t NumElts) 871 : TypeExpansion(TEK_ConstantArray), EltTy(EltTy), NumElts(NumElts) {} 872 static bool classof(const TypeExpansion *TE) { 873 return TE->Kind == TEK_ConstantArray; 874 } 875 }; 876 877 struct RecordExpansion : TypeExpansion { 878 SmallVector<const CXXBaseSpecifier *, 1> Bases; 879 880 SmallVector<const FieldDecl *, 1> Fields; 881 882 RecordExpansion(SmallVector<const CXXBaseSpecifier *, 1> &&Bases, 883 SmallVector<const FieldDecl *, 1> &&Fields) 884 : TypeExpansion(TEK_Record), Bases(std::move(Bases)), 885 Fields(std::move(Fields)) {} 886 static bool classof(const TypeExpansion *TE) { 887 return TE->Kind == TEK_Record; 888 } 889 }; 890 891 struct ComplexExpansion : TypeExpansion { 892 QualType EltTy; 893 894 ComplexExpansion(QualType EltTy) : TypeExpansion(TEK_Complex), EltTy(EltTy) {} 895 static bool classof(const TypeExpansion *TE) { 896 return TE->Kind == TEK_Complex; 897 } 898 }; 899 900 struct NoExpansion : TypeExpansion { 901 NoExpansion() : TypeExpansion(TEK_None) {} 902 static bool classof(const TypeExpansion *TE) { 903 return TE->Kind == TEK_None; 904 } 905 }; 906 } // namespace 907 908 static std::unique_ptr<TypeExpansion> 909 getTypeExpansion(QualType Ty, const ASTContext &Context) { 910 if (const ConstantArrayType *AT = Context.getAsConstantArrayType(Ty)) { 911 return std::make_unique<ConstantArrayExpansion>( 912 AT->getElementType(), AT->getSize().getZExtValue()); 913 } 914 if (const RecordType *RT = Ty->getAs<RecordType>()) { 915 SmallVector<const CXXBaseSpecifier *, 1> Bases; 916 SmallVector<const FieldDecl *, 1> Fields; 917 const RecordDecl *RD = RT->getDecl(); 918 assert(!RD->hasFlexibleArrayMember() && 919 "Cannot expand structure with flexible array."); 920 if (RD->isUnion()) { 921 // Unions can be here only in degenerative cases - all the fields are same 922 // after flattening. Thus we have to use the "largest" field. 923 const FieldDecl *LargestFD = nullptr; 924 CharUnits UnionSize = CharUnits::Zero(); 925 926 for (const auto *FD : RD->fields()) { 927 if (FD->isZeroLengthBitField(Context)) 928 continue; 929 assert(!FD->isBitField() && 930 "Cannot expand structure with bit-field members."); 931 CharUnits FieldSize = Context.getTypeSizeInChars(FD->getType()); 932 if (UnionSize < FieldSize) { 933 UnionSize = FieldSize; 934 LargestFD = FD; 935 } 936 } 937 if (LargestFD) 938 Fields.push_back(LargestFD); 939 } else { 940 if (const auto *CXXRD = dyn_cast<CXXRecordDecl>(RD)) { 941 assert(!CXXRD->isDynamicClass() && 942 "cannot expand vtable pointers in dynamic classes"); 943 for (const CXXBaseSpecifier &BS : CXXRD->bases()) 944 Bases.push_back(&BS); 945 } 946 947 for (const auto *FD : RD->fields()) { 948 if (FD->isZeroLengthBitField(Context)) 949 continue; 950 assert(!FD->isBitField() && 951 "Cannot expand structure with bit-field members."); 952 Fields.push_back(FD); 953 } 954 } 955 return std::make_unique<RecordExpansion>(std::move(Bases), 956 std::move(Fields)); 957 } 958 if (const ComplexType *CT = Ty->getAs<ComplexType>()) { 959 return std::make_unique<ComplexExpansion>(CT->getElementType()); 960 } 961 return std::make_unique<NoExpansion>(); 962 } 963 964 static int getExpansionSize(QualType Ty, const ASTContext &Context) { 965 auto Exp = getTypeExpansion(Ty, Context); 966 if (auto CAExp = dyn_cast<ConstantArrayExpansion>(Exp.get())) { 967 return CAExp->NumElts * getExpansionSize(CAExp->EltTy, Context); 968 } 969 if (auto RExp = dyn_cast<RecordExpansion>(Exp.get())) { 970 int Res = 0; 971 for (auto BS : RExp->Bases) 972 Res += getExpansionSize(BS->getType(), Context); 973 for (auto FD : RExp->Fields) 974 Res += getExpansionSize(FD->getType(), Context); 975 return Res; 976 } 977 if (isa<ComplexExpansion>(Exp.get())) 978 return 2; 979 assert(isa<NoExpansion>(Exp.get())); 980 return 1; 981 } 982 983 void 984 CodeGenTypes::getExpandedTypes(QualType Ty, 985 SmallVectorImpl<llvm::Type *>::iterator &TI) { 986 auto Exp = getTypeExpansion(Ty, Context); 987 if (auto CAExp = dyn_cast<ConstantArrayExpansion>(Exp.get())) { 988 for (int i = 0, n = CAExp->NumElts; i < n; i++) { 989 getExpandedTypes(CAExp->EltTy, TI); 990 } 991 } else if (auto RExp = dyn_cast<RecordExpansion>(Exp.get())) { 992 for (auto BS : RExp->Bases) 993 getExpandedTypes(BS->getType(), TI); 994 for (auto FD : RExp->Fields) 995 getExpandedTypes(FD->getType(), TI); 996 } else if (auto CExp = dyn_cast<ComplexExpansion>(Exp.get())) { 997 llvm::Type *EltTy = ConvertType(CExp->EltTy); 998 *TI++ = EltTy; 999 *TI++ = EltTy; 1000 } else { 1001 assert(isa<NoExpansion>(Exp.get())); 1002 *TI++ = ConvertType(Ty); 1003 } 1004 } 1005 1006 static void forConstantArrayExpansion(CodeGenFunction &CGF, 1007 ConstantArrayExpansion *CAE, 1008 Address BaseAddr, 1009 llvm::function_ref<void(Address)> Fn) { 1010 CharUnits EltSize = CGF.getContext().getTypeSizeInChars(CAE->EltTy); 1011 CharUnits EltAlign = 1012 BaseAddr.getAlignment().alignmentOfArrayElement(EltSize); 1013 1014 for (int i = 0, n = CAE->NumElts; i < n; i++) { 1015 llvm::Value *EltAddr = 1016 CGF.Builder.CreateConstGEP2_32(nullptr, BaseAddr.getPointer(), 0, i); 1017 Fn(Address(EltAddr, EltAlign)); 1018 } 1019 } 1020 1021 void CodeGenFunction::ExpandTypeFromArgs(QualType Ty, LValue LV, 1022 llvm::Function::arg_iterator &AI) { 1023 assert(LV.isSimple() && 1024 "Unexpected non-simple lvalue during struct expansion."); 1025 1026 auto Exp = getTypeExpansion(Ty, getContext()); 1027 if (auto CAExp = dyn_cast<ConstantArrayExpansion>(Exp.get())) { 1028 forConstantArrayExpansion( 1029 *this, CAExp, LV.getAddress(*this), [&](Address EltAddr) { 1030 LValue LV = MakeAddrLValue(EltAddr, CAExp->EltTy); 1031 ExpandTypeFromArgs(CAExp->EltTy, LV, AI); 1032 }); 1033 } else if (auto RExp = dyn_cast<RecordExpansion>(Exp.get())) { 1034 Address This = LV.getAddress(*this); 1035 for (const CXXBaseSpecifier *BS : RExp->Bases) { 1036 // Perform a single step derived-to-base conversion. 1037 Address Base = 1038 GetAddressOfBaseClass(This, Ty->getAsCXXRecordDecl(), &BS, &BS + 1, 1039 /*NullCheckValue=*/false, SourceLocation()); 1040 LValue SubLV = MakeAddrLValue(Base, BS->getType()); 1041 1042 // Recurse onto bases. 1043 ExpandTypeFromArgs(BS->getType(), SubLV, AI); 1044 } 1045 for (auto FD : RExp->Fields) { 1046 // FIXME: What are the right qualifiers here? 1047 LValue SubLV = EmitLValueForFieldInitialization(LV, FD); 1048 ExpandTypeFromArgs(FD->getType(), SubLV, AI); 1049 } 1050 } else if (isa<ComplexExpansion>(Exp.get())) { 1051 auto realValue = &*AI++; 1052 auto imagValue = &*AI++; 1053 EmitStoreOfComplex(ComplexPairTy(realValue, imagValue), LV, /*init*/ true); 1054 } else { 1055 // Call EmitStoreOfScalar except when the lvalue is a bitfield to emit a 1056 // primitive store. 1057 assert(isa<NoExpansion>(Exp.get())); 1058 if (LV.isBitField()) 1059 EmitStoreThroughLValue(RValue::get(&*AI++), LV); 1060 else 1061 EmitStoreOfScalar(&*AI++, LV); 1062 } 1063 } 1064 1065 void CodeGenFunction::ExpandTypeToArgs( 1066 QualType Ty, CallArg Arg, llvm::FunctionType *IRFuncTy, 1067 SmallVectorImpl<llvm::Value *> &IRCallArgs, unsigned &IRCallArgPos) { 1068 auto Exp = getTypeExpansion(Ty, getContext()); 1069 if (auto CAExp = dyn_cast<ConstantArrayExpansion>(Exp.get())) { 1070 Address Addr = Arg.hasLValue() ? Arg.getKnownLValue().getAddress(*this) 1071 : Arg.getKnownRValue().getAggregateAddress(); 1072 forConstantArrayExpansion( 1073 *this, CAExp, Addr, [&](Address EltAddr) { 1074 CallArg EltArg = CallArg( 1075 convertTempToRValue(EltAddr, CAExp->EltTy, SourceLocation()), 1076 CAExp->EltTy); 1077 ExpandTypeToArgs(CAExp->EltTy, EltArg, IRFuncTy, IRCallArgs, 1078 IRCallArgPos); 1079 }); 1080 } else if (auto RExp = dyn_cast<RecordExpansion>(Exp.get())) { 1081 Address This = Arg.hasLValue() ? Arg.getKnownLValue().getAddress(*this) 1082 : Arg.getKnownRValue().getAggregateAddress(); 1083 for (const CXXBaseSpecifier *BS : RExp->Bases) { 1084 // Perform a single step derived-to-base conversion. 1085 Address Base = 1086 GetAddressOfBaseClass(This, Ty->getAsCXXRecordDecl(), &BS, &BS + 1, 1087 /*NullCheckValue=*/false, SourceLocation()); 1088 CallArg BaseArg = CallArg(RValue::getAggregate(Base), BS->getType()); 1089 1090 // Recurse onto bases. 1091 ExpandTypeToArgs(BS->getType(), BaseArg, IRFuncTy, IRCallArgs, 1092 IRCallArgPos); 1093 } 1094 1095 LValue LV = MakeAddrLValue(This, Ty); 1096 for (auto FD : RExp->Fields) { 1097 CallArg FldArg = 1098 CallArg(EmitRValueForField(LV, FD, SourceLocation()), FD->getType()); 1099 ExpandTypeToArgs(FD->getType(), FldArg, IRFuncTy, IRCallArgs, 1100 IRCallArgPos); 1101 } 1102 } else if (isa<ComplexExpansion>(Exp.get())) { 1103 ComplexPairTy CV = Arg.getKnownRValue().getComplexVal(); 1104 IRCallArgs[IRCallArgPos++] = CV.first; 1105 IRCallArgs[IRCallArgPos++] = CV.second; 1106 } else { 1107 assert(isa<NoExpansion>(Exp.get())); 1108 auto RV = Arg.getKnownRValue(); 1109 assert(RV.isScalar() && 1110 "Unexpected non-scalar rvalue during struct expansion."); 1111 1112 // Insert a bitcast as needed. 1113 llvm::Value *V = RV.getScalarVal(); 1114 if (IRCallArgPos < IRFuncTy->getNumParams() && 1115 V->getType() != IRFuncTy->getParamType(IRCallArgPos)) 1116 V = Builder.CreateBitCast(V, IRFuncTy->getParamType(IRCallArgPos)); 1117 1118 IRCallArgs[IRCallArgPos++] = V; 1119 } 1120 } 1121 1122 /// Create a temporary allocation for the purposes of coercion. 1123 static Address CreateTempAllocaForCoercion(CodeGenFunction &CGF, llvm::Type *Ty, 1124 CharUnits MinAlign, 1125 const Twine &Name = "tmp") { 1126 // Don't use an alignment that's worse than what LLVM would prefer. 1127 auto PrefAlign = CGF.CGM.getDataLayout().getPrefTypeAlignment(Ty); 1128 CharUnits Align = std::max(MinAlign, CharUnits::fromQuantity(PrefAlign)); 1129 1130 return CGF.CreateTempAlloca(Ty, Align, Name + ".coerce"); 1131 } 1132 1133 /// EnterStructPointerForCoercedAccess - Given a struct pointer that we are 1134 /// accessing some number of bytes out of it, try to gep into the struct to get 1135 /// at its inner goodness. Dive as deep as possible without entering an element 1136 /// with an in-memory size smaller than DstSize. 1137 static Address 1138 EnterStructPointerForCoercedAccess(Address SrcPtr, 1139 llvm::StructType *SrcSTy, 1140 uint64_t DstSize, CodeGenFunction &CGF) { 1141 // We can't dive into a zero-element struct. 1142 if (SrcSTy->getNumElements() == 0) return SrcPtr; 1143 1144 llvm::Type *FirstElt = SrcSTy->getElementType(0); 1145 1146 // If the first elt is at least as large as what we're looking for, or if the 1147 // first element is the same size as the whole struct, we can enter it. The 1148 // comparison must be made on the store size and not the alloca size. Using 1149 // the alloca size may overstate the size of the load. 1150 uint64_t FirstEltSize = 1151 CGF.CGM.getDataLayout().getTypeStoreSize(FirstElt); 1152 if (FirstEltSize < DstSize && 1153 FirstEltSize < CGF.CGM.getDataLayout().getTypeStoreSize(SrcSTy)) 1154 return SrcPtr; 1155 1156 // GEP into the first element. 1157 SrcPtr = CGF.Builder.CreateStructGEP(SrcPtr, 0, "coerce.dive"); 1158 1159 // If the first element is a struct, recurse. 1160 llvm::Type *SrcTy = SrcPtr.getElementType(); 1161 if (llvm::StructType *SrcSTy = dyn_cast<llvm::StructType>(SrcTy)) 1162 return EnterStructPointerForCoercedAccess(SrcPtr, SrcSTy, DstSize, CGF); 1163 1164 return SrcPtr; 1165 } 1166 1167 /// CoerceIntOrPtrToIntOrPtr - Convert a value Val to the specific Ty where both 1168 /// are either integers or pointers. This does a truncation of the value if it 1169 /// is too large or a zero extension if it is too small. 1170 /// 1171 /// This behaves as if the value were coerced through memory, so on big-endian 1172 /// targets the high bits are preserved in a truncation, while little-endian 1173 /// targets preserve the low bits. 1174 static llvm::Value *CoerceIntOrPtrToIntOrPtr(llvm::Value *Val, 1175 llvm::Type *Ty, 1176 CodeGenFunction &CGF) { 1177 if (Val->getType() == Ty) 1178 return Val; 1179 1180 if (isa<llvm::PointerType>(Val->getType())) { 1181 // If this is Pointer->Pointer avoid conversion to and from int. 1182 if (isa<llvm::PointerType>(Ty)) 1183 return CGF.Builder.CreateBitCast(Val, Ty, "coerce.val"); 1184 1185 // Convert the pointer to an integer so we can play with its width. 1186 Val = CGF.Builder.CreatePtrToInt(Val, CGF.IntPtrTy, "coerce.val.pi"); 1187 } 1188 1189 llvm::Type *DestIntTy = Ty; 1190 if (isa<llvm::PointerType>(DestIntTy)) 1191 DestIntTy = CGF.IntPtrTy; 1192 1193 if (Val->getType() != DestIntTy) { 1194 const llvm::DataLayout &DL = CGF.CGM.getDataLayout(); 1195 if (DL.isBigEndian()) { 1196 // Preserve the high bits on big-endian targets. 1197 // That is what memory coercion does. 1198 uint64_t SrcSize = DL.getTypeSizeInBits(Val->getType()); 1199 uint64_t DstSize = DL.getTypeSizeInBits(DestIntTy); 1200 1201 if (SrcSize > DstSize) { 1202 Val = CGF.Builder.CreateLShr(Val, SrcSize - DstSize, "coerce.highbits"); 1203 Val = CGF.Builder.CreateTrunc(Val, DestIntTy, "coerce.val.ii"); 1204 } else { 1205 Val = CGF.Builder.CreateZExt(Val, DestIntTy, "coerce.val.ii"); 1206 Val = CGF.Builder.CreateShl(Val, DstSize - SrcSize, "coerce.highbits"); 1207 } 1208 } else { 1209 // Little-endian targets preserve the low bits. No shifts required. 1210 Val = CGF.Builder.CreateIntCast(Val, DestIntTy, false, "coerce.val.ii"); 1211 } 1212 } 1213 1214 if (isa<llvm::PointerType>(Ty)) 1215 Val = CGF.Builder.CreateIntToPtr(Val, Ty, "coerce.val.ip"); 1216 return Val; 1217 } 1218 1219 1220 1221 /// CreateCoercedLoad - Create a load from \arg SrcPtr interpreted as 1222 /// a pointer to an object of type \arg Ty, known to be aligned to 1223 /// \arg SrcAlign bytes. 1224 /// 1225 /// This safely handles the case when the src type is smaller than the 1226 /// destination type; in this situation the values of bits which not 1227 /// present in the src are undefined. 1228 static llvm::Value *CreateCoercedLoad(Address Src, llvm::Type *Ty, 1229 CodeGenFunction &CGF) { 1230 llvm::Type *SrcTy = Src.getElementType(); 1231 1232 // If SrcTy and Ty are the same, just do a load. 1233 if (SrcTy == Ty) 1234 return CGF.Builder.CreateLoad(Src); 1235 1236 llvm::TypeSize DstSize = CGF.CGM.getDataLayout().getTypeAllocSize(Ty); 1237 1238 if (llvm::StructType *SrcSTy = dyn_cast<llvm::StructType>(SrcTy)) { 1239 Src = EnterStructPointerForCoercedAccess(Src, SrcSTy, 1240 DstSize.getFixedSize(), CGF); 1241 SrcTy = Src.getElementType(); 1242 } 1243 1244 llvm::TypeSize SrcSize = CGF.CGM.getDataLayout().getTypeAllocSize(SrcTy); 1245 1246 // If the source and destination are integer or pointer types, just do an 1247 // extension or truncation to the desired type. 1248 if ((isa<llvm::IntegerType>(Ty) || isa<llvm::PointerType>(Ty)) && 1249 (isa<llvm::IntegerType>(SrcTy) || isa<llvm::PointerType>(SrcTy))) { 1250 llvm::Value *Load = CGF.Builder.CreateLoad(Src); 1251 return CoerceIntOrPtrToIntOrPtr(Load, Ty, CGF); 1252 } 1253 1254 // If load is legal, just bitcast the src pointer. 1255 if (!SrcSize.isScalable() && !DstSize.isScalable() && 1256 SrcSize.getFixedSize() >= DstSize.getFixedSize()) { 1257 // Generally SrcSize is never greater than DstSize, since this means we are 1258 // losing bits. However, this can happen in cases where the structure has 1259 // additional padding, for example due to a user specified alignment. 1260 // 1261 // FIXME: Assert that we aren't truncating non-padding bits when have access 1262 // to that information. 1263 Src = CGF.Builder.CreateBitCast(Src, 1264 Ty->getPointerTo(Src.getAddressSpace())); 1265 return CGF.Builder.CreateLoad(Src); 1266 } 1267 1268 // If coercing a fixed vector to a scalable vector for ABI compatibility, and 1269 // the types match, use the llvm.experimental.vector.insert intrinsic to 1270 // perform the conversion. 1271 if (auto *ScalableDst = dyn_cast<llvm::ScalableVectorType>(Ty)) { 1272 if (auto *FixedSrc = dyn_cast<llvm::FixedVectorType>(SrcTy)) { 1273 if (ScalableDst->getElementType() == FixedSrc->getElementType()) { 1274 auto *Load = CGF.Builder.CreateLoad(Src); 1275 auto *UndefVec = llvm::UndefValue::get(ScalableDst); 1276 auto *Zero = llvm::Constant::getNullValue(CGF.CGM.Int64Ty); 1277 return CGF.Builder.CreateInsertVector(ScalableDst, UndefVec, Load, Zero, 1278 "castScalableSve"); 1279 } 1280 } 1281 } 1282 1283 // Otherwise do coercion through memory. This is stupid, but simple. 1284 Address Tmp = 1285 CreateTempAllocaForCoercion(CGF, Ty, Src.getAlignment(), Src.getName()); 1286 CGF.Builder.CreateMemCpy( 1287 Tmp.getPointer(), Tmp.getAlignment().getAsAlign(), Src.getPointer(), 1288 Src.getAlignment().getAsAlign(), 1289 llvm::ConstantInt::get(CGF.IntPtrTy, SrcSize.getKnownMinSize())); 1290 return CGF.Builder.CreateLoad(Tmp); 1291 } 1292 1293 // Function to store a first-class aggregate into memory. We prefer to 1294 // store the elements rather than the aggregate to be more friendly to 1295 // fast-isel. 1296 // FIXME: Do we need to recurse here? 1297 void CodeGenFunction::EmitAggregateStore(llvm::Value *Val, Address Dest, 1298 bool DestIsVolatile) { 1299 // Prefer scalar stores to first-class aggregate stores. 1300 if (llvm::StructType *STy = dyn_cast<llvm::StructType>(Val->getType())) { 1301 for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) { 1302 Address EltPtr = Builder.CreateStructGEP(Dest, i); 1303 llvm::Value *Elt = Builder.CreateExtractValue(Val, i); 1304 Builder.CreateStore(Elt, EltPtr, DestIsVolatile); 1305 } 1306 } else { 1307 Builder.CreateStore(Val, Dest, DestIsVolatile); 1308 } 1309 } 1310 1311 /// CreateCoercedStore - Create a store to \arg DstPtr from \arg Src, 1312 /// where the source and destination may have different types. The 1313 /// destination is known to be aligned to \arg DstAlign bytes. 1314 /// 1315 /// This safely handles the case when the src type is larger than the 1316 /// destination type; the upper bits of the src will be lost. 1317 static void CreateCoercedStore(llvm::Value *Src, 1318 Address Dst, 1319 bool DstIsVolatile, 1320 CodeGenFunction &CGF) { 1321 llvm::Type *SrcTy = Src->getType(); 1322 llvm::Type *DstTy = Dst.getElementType(); 1323 if (SrcTy == DstTy) { 1324 CGF.Builder.CreateStore(Src, Dst, DstIsVolatile); 1325 return; 1326 } 1327 1328 llvm::TypeSize SrcSize = CGF.CGM.getDataLayout().getTypeAllocSize(SrcTy); 1329 1330 if (llvm::StructType *DstSTy = dyn_cast<llvm::StructType>(DstTy)) { 1331 Dst = EnterStructPointerForCoercedAccess(Dst, DstSTy, 1332 SrcSize.getFixedSize(), CGF); 1333 DstTy = Dst.getElementType(); 1334 } 1335 1336 llvm::PointerType *SrcPtrTy = llvm::dyn_cast<llvm::PointerType>(SrcTy); 1337 llvm::PointerType *DstPtrTy = llvm::dyn_cast<llvm::PointerType>(DstTy); 1338 if (SrcPtrTy && DstPtrTy && 1339 SrcPtrTy->getAddressSpace() != DstPtrTy->getAddressSpace()) { 1340 Src = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(Src, DstTy); 1341 CGF.Builder.CreateStore(Src, Dst, DstIsVolatile); 1342 return; 1343 } 1344 1345 // If the source and destination are integer or pointer types, just do an 1346 // extension or truncation to the desired type. 1347 if ((isa<llvm::IntegerType>(SrcTy) || isa<llvm::PointerType>(SrcTy)) && 1348 (isa<llvm::IntegerType>(DstTy) || isa<llvm::PointerType>(DstTy))) { 1349 Src = CoerceIntOrPtrToIntOrPtr(Src, DstTy, CGF); 1350 CGF.Builder.CreateStore(Src, Dst, DstIsVolatile); 1351 return; 1352 } 1353 1354 llvm::TypeSize DstSize = CGF.CGM.getDataLayout().getTypeAllocSize(DstTy); 1355 1356 // If store is legal, just bitcast the src pointer. 1357 if (isa<llvm::ScalableVectorType>(SrcTy) || 1358 isa<llvm::ScalableVectorType>(DstTy) || 1359 SrcSize.getFixedSize() <= DstSize.getFixedSize()) { 1360 Dst = CGF.Builder.CreateElementBitCast(Dst, SrcTy); 1361 CGF.EmitAggregateStore(Src, Dst, DstIsVolatile); 1362 } else { 1363 // Otherwise do coercion through memory. This is stupid, but 1364 // simple. 1365 1366 // Generally SrcSize is never greater than DstSize, since this means we are 1367 // losing bits. However, this can happen in cases where the structure has 1368 // additional padding, for example due to a user specified alignment. 1369 // 1370 // FIXME: Assert that we aren't truncating non-padding bits when have access 1371 // to that information. 1372 Address Tmp = CreateTempAllocaForCoercion(CGF, SrcTy, Dst.getAlignment()); 1373 CGF.Builder.CreateStore(Src, Tmp); 1374 CGF.Builder.CreateMemCpy( 1375 Dst.getPointer(), Dst.getAlignment().getAsAlign(), Tmp.getPointer(), 1376 Tmp.getAlignment().getAsAlign(), 1377 llvm::ConstantInt::get(CGF.IntPtrTy, DstSize.getFixedSize())); 1378 } 1379 } 1380 1381 static Address emitAddressAtOffset(CodeGenFunction &CGF, Address addr, 1382 const ABIArgInfo &info) { 1383 if (unsigned offset = info.getDirectOffset()) { 1384 addr = CGF.Builder.CreateElementBitCast(addr, CGF.Int8Ty); 1385 addr = CGF.Builder.CreateConstInBoundsByteGEP(addr, 1386 CharUnits::fromQuantity(offset)); 1387 addr = CGF.Builder.CreateElementBitCast(addr, info.getCoerceToType()); 1388 } 1389 return addr; 1390 } 1391 1392 namespace { 1393 1394 /// Encapsulates information about the way function arguments from 1395 /// CGFunctionInfo should be passed to actual LLVM IR function. 1396 class ClangToLLVMArgMapping { 1397 static const unsigned InvalidIndex = ~0U; 1398 unsigned InallocaArgNo; 1399 unsigned SRetArgNo; 1400 unsigned TotalIRArgs; 1401 1402 /// Arguments of LLVM IR function corresponding to single Clang argument. 1403 struct IRArgs { 1404 unsigned PaddingArgIndex; 1405 // Argument is expanded to IR arguments at positions 1406 // [FirstArgIndex, FirstArgIndex + NumberOfArgs). 1407 unsigned FirstArgIndex; 1408 unsigned NumberOfArgs; 1409 1410 IRArgs() 1411 : PaddingArgIndex(InvalidIndex), FirstArgIndex(InvalidIndex), 1412 NumberOfArgs(0) {} 1413 }; 1414 1415 SmallVector<IRArgs, 8> ArgInfo; 1416 1417 public: 1418 ClangToLLVMArgMapping(const ASTContext &Context, const CGFunctionInfo &FI, 1419 bool OnlyRequiredArgs = false) 1420 : InallocaArgNo(InvalidIndex), SRetArgNo(InvalidIndex), TotalIRArgs(0), 1421 ArgInfo(OnlyRequiredArgs ? FI.getNumRequiredArgs() : FI.arg_size()) { 1422 construct(Context, FI, OnlyRequiredArgs); 1423 } 1424 1425 bool hasInallocaArg() const { return InallocaArgNo != InvalidIndex; } 1426 unsigned getInallocaArgNo() const { 1427 assert(hasInallocaArg()); 1428 return InallocaArgNo; 1429 } 1430 1431 bool hasSRetArg() const { return SRetArgNo != InvalidIndex; } 1432 unsigned getSRetArgNo() const { 1433 assert(hasSRetArg()); 1434 return SRetArgNo; 1435 } 1436 1437 unsigned totalIRArgs() const { return TotalIRArgs; } 1438 1439 bool hasPaddingArg(unsigned ArgNo) const { 1440 assert(ArgNo < ArgInfo.size()); 1441 return ArgInfo[ArgNo].PaddingArgIndex != InvalidIndex; 1442 } 1443 unsigned getPaddingArgNo(unsigned ArgNo) const { 1444 assert(hasPaddingArg(ArgNo)); 1445 return ArgInfo[ArgNo].PaddingArgIndex; 1446 } 1447 1448 /// Returns index of first IR argument corresponding to ArgNo, and their 1449 /// quantity. 1450 std::pair<unsigned, unsigned> getIRArgs(unsigned ArgNo) const { 1451 assert(ArgNo < ArgInfo.size()); 1452 return std::make_pair(ArgInfo[ArgNo].FirstArgIndex, 1453 ArgInfo[ArgNo].NumberOfArgs); 1454 } 1455 1456 private: 1457 void construct(const ASTContext &Context, const CGFunctionInfo &FI, 1458 bool OnlyRequiredArgs); 1459 }; 1460 1461 void ClangToLLVMArgMapping::construct(const ASTContext &Context, 1462 const CGFunctionInfo &FI, 1463 bool OnlyRequiredArgs) { 1464 unsigned IRArgNo = 0; 1465 bool SwapThisWithSRet = false; 1466 const ABIArgInfo &RetAI = FI.getReturnInfo(); 1467 1468 if (RetAI.getKind() == ABIArgInfo::Indirect) { 1469 SwapThisWithSRet = RetAI.isSRetAfterThis(); 1470 SRetArgNo = SwapThisWithSRet ? 1 : IRArgNo++; 1471 } 1472 1473 unsigned ArgNo = 0; 1474 unsigned NumArgs = OnlyRequiredArgs ? FI.getNumRequiredArgs() : FI.arg_size(); 1475 for (CGFunctionInfo::const_arg_iterator I = FI.arg_begin(); ArgNo < NumArgs; 1476 ++I, ++ArgNo) { 1477 assert(I != FI.arg_end()); 1478 QualType ArgType = I->type; 1479 const ABIArgInfo &AI = I->info; 1480 // Collect data about IR arguments corresponding to Clang argument ArgNo. 1481 auto &IRArgs = ArgInfo[ArgNo]; 1482 1483 if (AI.getPaddingType()) 1484 IRArgs.PaddingArgIndex = IRArgNo++; 1485 1486 switch (AI.getKind()) { 1487 case ABIArgInfo::Extend: 1488 case ABIArgInfo::Direct: { 1489 // FIXME: handle sseregparm someday... 1490 llvm::StructType *STy = dyn_cast<llvm::StructType>(AI.getCoerceToType()); 1491 if (AI.isDirect() && AI.getCanBeFlattened() && STy) { 1492 IRArgs.NumberOfArgs = STy->getNumElements(); 1493 } else { 1494 IRArgs.NumberOfArgs = 1; 1495 } 1496 break; 1497 } 1498 case ABIArgInfo::Indirect: 1499 case ABIArgInfo::IndirectAliased: 1500 IRArgs.NumberOfArgs = 1; 1501 break; 1502 case ABIArgInfo::Ignore: 1503 case ABIArgInfo::InAlloca: 1504 // ignore and inalloca doesn't have matching LLVM parameters. 1505 IRArgs.NumberOfArgs = 0; 1506 break; 1507 case ABIArgInfo::CoerceAndExpand: 1508 IRArgs.NumberOfArgs = AI.getCoerceAndExpandTypeSequence().size(); 1509 break; 1510 case ABIArgInfo::Expand: 1511 IRArgs.NumberOfArgs = getExpansionSize(ArgType, Context); 1512 break; 1513 } 1514 1515 if (IRArgs.NumberOfArgs > 0) { 1516 IRArgs.FirstArgIndex = IRArgNo; 1517 IRArgNo += IRArgs.NumberOfArgs; 1518 } 1519 1520 // Skip over the sret parameter when it comes second. We already handled it 1521 // above. 1522 if (IRArgNo == 1 && SwapThisWithSRet) 1523 IRArgNo++; 1524 } 1525 assert(ArgNo == ArgInfo.size()); 1526 1527 if (FI.usesInAlloca()) 1528 InallocaArgNo = IRArgNo++; 1529 1530 TotalIRArgs = IRArgNo; 1531 } 1532 } // namespace 1533 1534 /***/ 1535 1536 bool CodeGenModule::ReturnTypeUsesSRet(const CGFunctionInfo &FI) { 1537 const auto &RI = FI.getReturnInfo(); 1538 return RI.isIndirect() || (RI.isInAlloca() && RI.getInAllocaSRet()); 1539 } 1540 1541 bool CodeGenModule::ReturnSlotInterferesWithArgs(const CGFunctionInfo &FI) { 1542 return ReturnTypeUsesSRet(FI) && 1543 getTargetCodeGenInfo().doesReturnSlotInterfereWithArgs(); 1544 } 1545 1546 bool CodeGenModule::ReturnTypeUsesFPRet(QualType ResultType) { 1547 if (const BuiltinType *BT = ResultType->getAs<BuiltinType>()) { 1548 switch (BT->getKind()) { 1549 default: 1550 return false; 1551 case BuiltinType::Float: 1552 return getTarget().useObjCFPRetForRealType(TargetInfo::Float); 1553 case BuiltinType::Double: 1554 return getTarget().useObjCFPRetForRealType(TargetInfo::Double); 1555 case BuiltinType::LongDouble: 1556 return getTarget().useObjCFPRetForRealType(TargetInfo::LongDouble); 1557 } 1558 } 1559 1560 return false; 1561 } 1562 1563 bool CodeGenModule::ReturnTypeUsesFP2Ret(QualType ResultType) { 1564 if (const ComplexType *CT = ResultType->getAs<ComplexType>()) { 1565 if (const BuiltinType *BT = CT->getElementType()->getAs<BuiltinType>()) { 1566 if (BT->getKind() == BuiltinType::LongDouble) 1567 return getTarget().useObjCFP2RetForComplexLongDouble(); 1568 } 1569 } 1570 1571 return false; 1572 } 1573 1574 llvm::FunctionType *CodeGenTypes::GetFunctionType(GlobalDecl GD) { 1575 const CGFunctionInfo &FI = arrangeGlobalDeclaration(GD); 1576 return GetFunctionType(FI); 1577 } 1578 1579 llvm::FunctionType * 1580 CodeGenTypes::GetFunctionType(const CGFunctionInfo &FI) { 1581 1582 bool Inserted = FunctionsBeingProcessed.insert(&FI).second; 1583 (void)Inserted; 1584 assert(Inserted && "Recursively being processed?"); 1585 1586 llvm::Type *resultType = nullptr; 1587 const ABIArgInfo &retAI = FI.getReturnInfo(); 1588 switch (retAI.getKind()) { 1589 case ABIArgInfo::Expand: 1590 case ABIArgInfo::IndirectAliased: 1591 llvm_unreachable("Invalid ABI kind for return argument"); 1592 1593 case ABIArgInfo::Extend: 1594 case ABIArgInfo::Direct: 1595 resultType = retAI.getCoerceToType(); 1596 break; 1597 1598 case ABIArgInfo::InAlloca: 1599 if (retAI.getInAllocaSRet()) { 1600 // sret things on win32 aren't void, they return the sret pointer. 1601 QualType ret = FI.getReturnType(); 1602 llvm::Type *ty = ConvertType(ret); 1603 unsigned addressSpace = Context.getTargetAddressSpace(ret); 1604 resultType = llvm::PointerType::get(ty, addressSpace); 1605 } else { 1606 resultType = llvm::Type::getVoidTy(getLLVMContext()); 1607 } 1608 break; 1609 1610 case ABIArgInfo::Indirect: 1611 case ABIArgInfo::Ignore: 1612 resultType = llvm::Type::getVoidTy(getLLVMContext()); 1613 break; 1614 1615 case ABIArgInfo::CoerceAndExpand: 1616 resultType = retAI.getUnpaddedCoerceAndExpandType(); 1617 break; 1618 } 1619 1620 ClangToLLVMArgMapping IRFunctionArgs(getContext(), FI, true); 1621 SmallVector<llvm::Type*, 8> ArgTypes(IRFunctionArgs.totalIRArgs()); 1622 1623 // Add type for sret argument. 1624 if (IRFunctionArgs.hasSRetArg()) { 1625 QualType Ret = FI.getReturnType(); 1626 llvm::Type *Ty = ConvertType(Ret); 1627 unsigned AddressSpace = Context.getTargetAddressSpace(Ret); 1628 ArgTypes[IRFunctionArgs.getSRetArgNo()] = 1629 llvm::PointerType::get(Ty, AddressSpace); 1630 } 1631 1632 // Add type for inalloca argument. 1633 if (IRFunctionArgs.hasInallocaArg()) { 1634 auto ArgStruct = FI.getArgStruct(); 1635 assert(ArgStruct); 1636 ArgTypes[IRFunctionArgs.getInallocaArgNo()] = ArgStruct->getPointerTo(); 1637 } 1638 1639 // Add in all of the required arguments. 1640 unsigned ArgNo = 0; 1641 CGFunctionInfo::const_arg_iterator it = FI.arg_begin(), 1642 ie = it + FI.getNumRequiredArgs(); 1643 for (; it != ie; ++it, ++ArgNo) { 1644 const ABIArgInfo &ArgInfo = it->info; 1645 1646 // Insert a padding type to ensure proper alignment. 1647 if (IRFunctionArgs.hasPaddingArg(ArgNo)) 1648 ArgTypes[IRFunctionArgs.getPaddingArgNo(ArgNo)] = 1649 ArgInfo.getPaddingType(); 1650 1651 unsigned FirstIRArg, NumIRArgs; 1652 std::tie(FirstIRArg, NumIRArgs) = IRFunctionArgs.getIRArgs(ArgNo); 1653 1654 switch (ArgInfo.getKind()) { 1655 case ABIArgInfo::Ignore: 1656 case ABIArgInfo::InAlloca: 1657 assert(NumIRArgs == 0); 1658 break; 1659 1660 case ABIArgInfo::Indirect: { 1661 assert(NumIRArgs == 1); 1662 // indirect arguments are always on the stack, which is alloca addr space. 1663 llvm::Type *LTy = ConvertTypeForMem(it->type); 1664 ArgTypes[FirstIRArg] = LTy->getPointerTo( 1665 CGM.getDataLayout().getAllocaAddrSpace()); 1666 break; 1667 } 1668 case ABIArgInfo::IndirectAliased: { 1669 assert(NumIRArgs == 1); 1670 llvm::Type *LTy = ConvertTypeForMem(it->type); 1671 ArgTypes[FirstIRArg] = LTy->getPointerTo(ArgInfo.getIndirectAddrSpace()); 1672 break; 1673 } 1674 case ABIArgInfo::Extend: 1675 case ABIArgInfo::Direct: { 1676 // Fast-isel and the optimizer generally like scalar values better than 1677 // FCAs, so we flatten them if this is safe to do for this argument. 1678 llvm::Type *argType = ArgInfo.getCoerceToType(); 1679 llvm::StructType *st = dyn_cast<llvm::StructType>(argType); 1680 if (st && ArgInfo.isDirect() && ArgInfo.getCanBeFlattened()) { 1681 assert(NumIRArgs == st->getNumElements()); 1682 for (unsigned i = 0, e = st->getNumElements(); i != e; ++i) 1683 ArgTypes[FirstIRArg + i] = st->getElementType(i); 1684 } else { 1685 assert(NumIRArgs == 1); 1686 ArgTypes[FirstIRArg] = argType; 1687 } 1688 break; 1689 } 1690 1691 case ABIArgInfo::CoerceAndExpand: { 1692 auto ArgTypesIter = ArgTypes.begin() + FirstIRArg; 1693 for (auto EltTy : ArgInfo.getCoerceAndExpandTypeSequence()) { 1694 *ArgTypesIter++ = EltTy; 1695 } 1696 assert(ArgTypesIter == ArgTypes.begin() + FirstIRArg + NumIRArgs); 1697 break; 1698 } 1699 1700 case ABIArgInfo::Expand: 1701 auto ArgTypesIter = ArgTypes.begin() + FirstIRArg; 1702 getExpandedTypes(it->type, ArgTypesIter); 1703 assert(ArgTypesIter == ArgTypes.begin() + FirstIRArg + NumIRArgs); 1704 break; 1705 } 1706 } 1707 1708 bool Erased = FunctionsBeingProcessed.erase(&FI); (void)Erased; 1709 assert(Erased && "Not in set?"); 1710 1711 return llvm::FunctionType::get(resultType, ArgTypes, FI.isVariadic()); 1712 } 1713 1714 llvm::Type *CodeGenTypes::GetFunctionTypeForVTable(GlobalDecl GD) { 1715 const CXXMethodDecl *MD = cast<CXXMethodDecl>(GD.getDecl()); 1716 const FunctionProtoType *FPT = MD->getType()->getAs<FunctionProtoType>(); 1717 1718 if (!isFuncTypeConvertible(FPT)) 1719 return llvm::StructType::get(getLLVMContext()); 1720 1721 return GetFunctionType(GD); 1722 } 1723 1724 static void AddAttributesFromFunctionProtoType(ASTContext &Ctx, 1725 llvm::AttrBuilder &FuncAttrs, 1726 const FunctionProtoType *FPT) { 1727 if (!FPT) 1728 return; 1729 1730 if (!isUnresolvedExceptionSpec(FPT->getExceptionSpecType()) && 1731 FPT->isNothrow()) 1732 FuncAttrs.addAttribute(llvm::Attribute::NoUnwind); 1733 } 1734 1735 void CodeGenModule::getDefaultFunctionAttributes(StringRef Name, 1736 bool HasOptnone, 1737 bool AttrOnCallSite, 1738 llvm::AttrBuilder &FuncAttrs) { 1739 // OptimizeNoneAttr takes precedence over -Os or -Oz. No warning needed. 1740 if (!HasOptnone) { 1741 if (CodeGenOpts.OptimizeSize) 1742 FuncAttrs.addAttribute(llvm::Attribute::OptimizeForSize); 1743 if (CodeGenOpts.OptimizeSize == 2) 1744 FuncAttrs.addAttribute(llvm::Attribute::MinSize); 1745 } 1746 1747 if (CodeGenOpts.DisableRedZone) 1748 FuncAttrs.addAttribute(llvm::Attribute::NoRedZone); 1749 if (CodeGenOpts.IndirectTlsSegRefs) 1750 FuncAttrs.addAttribute("indirect-tls-seg-refs"); 1751 if (CodeGenOpts.NoImplicitFloat) 1752 FuncAttrs.addAttribute(llvm::Attribute::NoImplicitFloat); 1753 1754 if (AttrOnCallSite) { 1755 // Attributes that should go on the call site only. 1756 if (!CodeGenOpts.SimplifyLibCalls || 1757 CodeGenOpts.isNoBuiltinFunc(Name.data())) 1758 FuncAttrs.addAttribute(llvm::Attribute::NoBuiltin); 1759 if (!CodeGenOpts.TrapFuncName.empty()) 1760 FuncAttrs.addAttribute("trap-func-name", CodeGenOpts.TrapFuncName); 1761 } else { 1762 StringRef FpKind; 1763 switch (CodeGenOpts.getFramePointer()) { 1764 case CodeGenOptions::FramePointerKind::None: 1765 FpKind = "none"; 1766 break; 1767 case CodeGenOptions::FramePointerKind::NonLeaf: 1768 FpKind = "non-leaf"; 1769 break; 1770 case CodeGenOptions::FramePointerKind::All: 1771 FpKind = "all"; 1772 break; 1773 } 1774 FuncAttrs.addAttribute("frame-pointer", FpKind); 1775 1776 if (CodeGenOpts.LessPreciseFPMAD) 1777 FuncAttrs.addAttribute("less-precise-fpmad", "true"); 1778 1779 if (CodeGenOpts.NullPointerIsValid) 1780 FuncAttrs.addAttribute(llvm::Attribute::NullPointerIsValid); 1781 1782 if (CodeGenOpts.FPDenormalMode != llvm::DenormalMode::getIEEE()) 1783 FuncAttrs.addAttribute("denormal-fp-math", 1784 CodeGenOpts.FPDenormalMode.str()); 1785 if (CodeGenOpts.FP32DenormalMode != CodeGenOpts.FPDenormalMode) { 1786 FuncAttrs.addAttribute( 1787 "denormal-fp-math-f32", 1788 CodeGenOpts.FP32DenormalMode.str()); 1789 } 1790 1791 if (LangOpts.getFPExceptionMode() == LangOptions::FPE_Ignore) 1792 FuncAttrs.addAttribute("no-trapping-math", "true"); 1793 1794 // Strict (compliant) code is the default, so only add this attribute to 1795 // indicate that we are trying to workaround a problem case. 1796 if (!CodeGenOpts.StrictFloatCastOverflow) 1797 FuncAttrs.addAttribute("strict-float-cast-overflow", "false"); 1798 1799 // TODO: Are these all needed? 1800 // unsafe/inf/nan/nsz are handled by instruction-level FastMathFlags. 1801 if (LangOpts.NoHonorInfs) 1802 FuncAttrs.addAttribute("no-infs-fp-math", "true"); 1803 if (LangOpts.NoHonorNaNs) 1804 FuncAttrs.addAttribute("no-nans-fp-math", "true"); 1805 if (LangOpts.UnsafeFPMath) 1806 FuncAttrs.addAttribute("unsafe-fp-math", "true"); 1807 if (CodeGenOpts.SoftFloat) 1808 FuncAttrs.addAttribute("use-soft-float", "true"); 1809 FuncAttrs.addAttribute("stack-protector-buffer-size", 1810 llvm::utostr(CodeGenOpts.SSPBufferSize)); 1811 if (LangOpts.NoSignedZero) 1812 FuncAttrs.addAttribute("no-signed-zeros-fp-math", "true"); 1813 1814 // TODO: Reciprocal estimate codegen options should apply to instructions? 1815 const std::vector<std::string> &Recips = CodeGenOpts.Reciprocals; 1816 if (!Recips.empty()) 1817 FuncAttrs.addAttribute("reciprocal-estimates", 1818 llvm::join(Recips, ",")); 1819 1820 if (!CodeGenOpts.PreferVectorWidth.empty() && 1821 CodeGenOpts.PreferVectorWidth != "none") 1822 FuncAttrs.addAttribute("prefer-vector-width", 1823 CodeGenOpts.PreferVectorWidth); 1824 1825 if (CodeGenOpts.StackRealignment) 1826 FuncAttrs.addAttribute("stackrealign"); 1827 if (CodeGenOpts.Backchain) 1828 FuncAttrs.addAttribute("backchain"); 1829 if (CodeGenOpts.EnableSegmentedStacks) 1830 FuncAttrs.addAttribute("split-stack"); 1831 1832 if (CodeGenOpts.SpeculativeLoadHardening) 1833 FuncAttrs.addAttribute(llvm::Attribute::SpeculativeLoadHardening); 1834 } 1835 1836 if (getLangOpts().assumeFunctionsAreConvergent()) { 1837 // Conservatively, mark all functions and calls in CUDA and OpenCL as 1838 // convergent (meaning, they may call an intrinsically convergent op, such 1839 // as __syncthreads() / barrier(), and so can't have certain optimizations 1840 // applied around them). LLVM will remove this attribute where it safely 1841 // can. 1842 FuncAttrs.addAttribute(llvm::Attribute::Convergent); 1843 } 1844 1845 if (getLangOpts().CUDA && getLangOpts().CUDAIsDevice) { 1846 // Exceptions aren't supported in CUDA device code. 1847 FuncAttrs.addAttribute(llvm::Attribute::NoUnwind); 1848 } 1849 1850 for (StringRef Attr : CodeGenOpts.DefaultFunctionAttrs) { 1851 StringRef Var, Value; 1852 std::tie(Var, Value) = Attr.split('='); 1853 FuncAttrs.addAttribute(Var, Value); 1854 } 1855 } 1856 1857 void CodeGenModule::addDefaultFunctionDefinitionAttributes(llvm::Function &F) { 1858 llvm::AttrBuilder FuncAttrs; 1859 getDefaultFunctionAttributes(F.getName(), F.hasOptNone(), 1860 /* AttrOnCallSite = */ false, FuncAttrs); 1861 // TODO: call GetCPUAndFeaturesAttributes? 1862 F.addAttributes(llvm::AttributeList::FunctionIndex, FuncAttrs); 1863 } 1864 1865 void CodeGenModule::addDefaultFunctionDefinitionAttributes( 1866 llvm::AttrBuilder &attrs) { 1867 getDefaultFunctionAttributes(/*function name*/ "", /*optnone*/ false, 1868 /*for call*/ false, attrs); 1869 GetCPUAndFeaturesAttributes(GlobalDecl(), attrs); 1870 } 1871 1872 static void addNoBuiltinAttributes(llvm::AttrBuilder &FuncAttrs, 1873 const LangOptions &LangOpts, 1874 const NoBuiltinAttr *NBA = nullptr) { 1875 auto AddNoBuiltinAttr = [&FuncAttrs](StringRef BuiltinName) { 1876 SmallString<32> AttributeName; 1877 AttributeName += "no-builtin-"; 1878 AttributeName += BuiltinName; 1879 FuncAttrs.addAttribute(AttributeName); 1880 }; 1881 1882 // First, handle the language options passed through -fno-builtin. 1883 if (LangOpts.NoBuiltin) { 1884 // -fno-builtin disables them all. 1885 FuncAttrs.addAttribute("no-builtins"); 1886 return; 1887 } 1888 1889 // Then, add attributes for builtins specified through -fno-builtin-<name>. 1890 llvm::for_each(LangOpts.NoBuiltinFuncs, AddNoBuiltinAttr); 1891 1892 // Now, let's check the __attribute__((no_builtin("...")) attribute added to 1893 // the source. 1894 if (!NBA) 1895 return; 1896 1897 // If there is a wildcard in the builtin names specified through the 1898 // attribute, disable them all. 1899 if (llvm::is_contained(NBA->builtinNames(), "*")) { 1900 FuncAttrs.addAttribute("no-builtins"); 1901 return; 1902 } 1903 1904 // And last, add the rest of the builtin names. 1905 llvm::for_each(NBA->builtinNames(), AddNoBuiltinAttr); 1906 } 1907 1908 /// Construct the IR attribute list of a function or call. 1909 /// 1910 /// When adding an attribute, please consider where it should be handled: 1911 /// 1912 /// - getDefaultFunctionAttributes is for attributes that are essentially 1913 /// part of the global target configuration (but perhaps can be 1914 /// overridden on a per-function basis). Adding attributes there 1915 /// will cause them to also be set in frontends that build on Clang's 1916 /// target-configuration logic, as well as for code defined in library 1917 /// modules such as CUDA's libdevice. 1918 /// 1919 /// - ConstructAttributeList builds on top of getDefaultFunctionAttributes 1920 /// and adds declaration-specific, convention-specific, and 1921 /// frontend-specific logic. The last is of particular importance: 1922 /// attributes that restrict how the frontend generates code must be 1923 /// added here rather than getDefaultFunctionAttributes. 1924 /// 1925 void CodeGenModule::ConstructAttributeList( 1926 StringRef Name, const CGFunctionInfo &FI, CGCalleeInfo CalleeInfo, 1927 llvm::AttributeList &AttrList, unsigned &CallingConv, bool AttrOnCallSite) { 1928 llvm::AttrBuilder FuncAttrs; 1929 llvm::AttrBuilder RetAttrs; 1930 1931 // Collect function IR attributes from the CC lowering. 1932 // We'll collect the paramete and result attributes later. 1933 CallingConv = FI.getEffectiveCallingConvention(); 1934 if (FI.isNoReturn()) 1935 FuncAttrs.addAttribute(llvm::Attribute::NoReturn); 1936 if (FI.isCmseNSCall()) 1937 FuncAttrs.addAttribute("cmse_nonsecure_call"); 1938 1939 // Collect function IR attributes from the callee prototype if we have one. 1940 AddAttributesFromFunctionProtoType(getContext(), FuncAttrs, 1941 CalleeInfo.getCalleeFunctionProtoType()); 1942 1943 const Decl *TargetDecl = CalleeInfo.getCalleeDecl().getDecl(); 1944 1945 bool HasOptnone = false; 1946 // The NoBuiltinAttr attached to the target FunctionDecl. 1947 const NoBuiltinAttr *NBA = nullptr; 1948 1949 // Collect function IR attributes based on declaration-specific 1950 // information. 1951 // FIXME: handle sseregparm someday... 1952 if (TargetDecl) { 1953 if (TargetDecl->hasAttr<ReturnsTwiceAttr>()) 1954 FuncAttrs.addAttribute(llvm::Attribute::ReturnsTwice); 1955 if (TargetDecl->hasAttr<NoThrowAttr>()) 1956 FuncAttrs.addAttribute(llvm::Attribute::NoUnwind); 1957 if (TargetDecl->hasAttr<NoReturnAttr>()) 1958 FuncAttrs.addAttribute(llvm::Attribute::NoReturn); 1959 if (TargetDecl->hasAttr<ColdAttr>()) 1960 FuncAttrs.addAttribute(llvm::Attribute::Cold); 1961 if (TargetDecl->hasAttr<HotAttr>()) 1962 FuncAttrs.addAttribute(llvm::Attribute::Hot); 1963 if (TargetDecl->hasAttr<NoDuplicateAttr>()) 1964 FuncAttrs.addAttribute(llvm::Attribute::NoDuplicate); 1965 if (TargetDecl->hasAttr<ConvergentAttr>()) 1966 FuncAttrs.addAttribute(llvm::Attribute::Convergent); 1967 1968 if (const FunctionDecl *Fn = dyn_cast<FunctionDecl>(TargetDecl)) { 1969 AddAttributesFromFunctionProtoType( 1970 getContext(), FuncAttrs, Fn->getType()->getAs<FunctionProtoType>()); 1971 if (AttrOnCallSite && Fn->isReplaceableGlobalAllocationFunction()) { 1972 // A sane operator new returns a non-aliasing pointer. 1973 auto Kind = Fn->getDeclName().getCXXOverloadedOperator(); 1974 if (getCodeGenOpts().AssumeSaneOperatorNew && 1975 (Kind == OO_New || Kind == OO_Array_New)) 1976 RetAttrs.addAttribute(llvm::Attribute::NoAlias); 1977 } 1978 const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(Fn); 1979 const bool IsVirtualCall = MD && MD->isVirtual(); 1980 // Don't use [[noreturn]], _Noreturn or [[no_builtin]] for a call to a 1981 // virtual function. These attributes are not inherited by overloads. 1982 if (!(AttrOnCallSite && IsVirtualCall)) { 1983 if (Fn->isNoReturn()) 1984 FuncAttrs.addAttribute(llvm::Attribute::NoReturn); 1985 NBA = Fn->getAttr<NoBuiltinAttr>(); 1986 } 1987 // Only place nomerge attribute on call sites, never functions. This 1988 // allows it to work on indirect virtual function calls. 1989 if (AttrOnCallSite && TargetDecl->hasAttr<NoMergeAttr>()) 1990 FuncAttrs.addAttribute(llvm::Attribute::NoMerge); 1991 } 1992 1993 // 'const', 'pure' and 'noalias' attributed functions are also nounwind. 1994 if (TargetDecl->hasAttr<ConstAttr>()) { 1995 FuncAttrs.addAttribute(llvm::Attribute::ReadNone); 1996 FuncAttrs.addAttribute(llvm::Attribute::NoUnwind); 1997 // gcc specifies that 'const' functions have greater restrictions than 1998 // 'pure' functions, so they also cannot have infinite loops. 1999 FuncAttrs.addAttribute(llvm::Attribute::WillReturn); 2000 } else if (TargetDecl->hasAttr<PureAttr>()) { 2001 FuncAttrs.addAttribute(llvm::Attribute::ReadOnly); 2002 FuncAttrs.addAttribute(llvm::Attribute::NoUnwind); 2003 // gcc specifies that 'pure' functions cannot have infinite loops. 2004 FuncAttrs.addAttribute(llvm::Attribute::WillReturn); 2005 } else if (TargetDecl->hasAttr<NoAliasAttr>()) { 2006 FuncAttrs.addAttribute(llvm::Attribute::ArgMemOnly); 2007 FuncAttrs.addAttribute(llvm::Attribute::NoUnwind); 2008 } 2009 if (TargetDecl->hasAttr<RestrictAttr>()) 2010 RetAttrs.addAttribute(llvm::Attribute::NoAlias); 2011 if (TargetDecl->hasAttr<ReturnsNonNullAttr>() && 2012 !CodeGenOpts.NullPointerIsValid) 2013 RetAttrs.addAttribute(llvm::Attribute::NonNull); 2014 if (TargetDecl->hasAttr<AnyX86NoCallerSavedRegistersAttr>()) 2015 FuncAttrs.addAttribute("no_caller_saved_registers"); 2016 if (TargetDecl->hasAttr<AnyX86NoCfCheckAttr>()) 2017 FuncAttrs.addAttribute(llvm::Attribute::NoCfCheck); 2018 if (TargetDecl->hasAttr<LeafAttr>()) 2019 FuncAttrs.addAttribute(llvm::Attribute::NoCallback); 2020 2021 HasOptnone = TargetDecl->hasAttr<OptimizeNoneAttr>(); 2022 if (auto *AllocSize = TargetDecl->getAttr<AllocSizeAttr>()) { 2023 Optional<unsigned> NumElemsParam; 2024 if (AllocSize->getNumElemsParam().isValid()) 2025 NumElemsParam = AllocSize->getNumElemsParam().getLLVMIndex(); 2026 FuncAttrs.addAllocSizeAttr(AllocSize->getElemSizeParam().getLLVMIndex(), 2027 NumElemsParam); 2028 } 2029 2030 if (TargetDecl->hasAttr<OpenCLKernelAttr>()) { 2031 if (getLangOpts().OpenCLVersion <= 120) { 2032 // OpenCL v1.2 Work groups are always uniform 2033 FuncAttrs.addAttribute("uniform-work-group-size", "true"); 2034 } else { 2035 // OpenCL v2.0 Work groups may be whether uniform or not. 2036 // '-cl-uniform-work-group-size' compile option gets a hint 2037 // to the compiler that the global work-size be a multiple of 2038 // the work-group size specified to clEnqueueNDRangeKernel 2039 // (i.e. work groups are uniform). 2040 FuncAttrs.addAttribute("uniform-work-group-size", 2041 llvm::toStringRef(CodeGenOpts.UniformWGSize)); 2042 } 2043 } 2044 2045 std::string AssumptionValueStr; 2046 for (AssumptionAttr *AssumptionA : 2047 TargetDecl->specific_attrs<AssumptionAttr>()) { 2048 std::string AS = AssumptionA->getAssumption().str(); 2049 if (!AS.empty() && !AssumptionValueStr.empty()) 2050 AssumptionValueStr += ","; 2051 AssumptionValueStr += AS; 2052 } 2053 2054 if (!AssumptionValueStr.empty()) 2055 FuncAttrs.addAttribute(llvm::AssumptionAttrKey, AssumptionValueStr); 2056 } 2057 2058 // Attach "no-builtins" attributes to: 2059 // * call sites: both `nobuiltin` and "no-builtins" or "no-builtin-<name>". 2060 // * definitions: "no-builtins" or "no-builtin-<name>" only. 2061 // The attributes can come from: 2062 // * LangOpts: -ffreestanding, -fno-builtin, -fno-builtin-<name> 2063 // * FunctionDecl attributes: __attribute__((no_builtin(...))) 2064 addNoBuiltinAttributes(FuncAttrs, getLangOpts(), NBA); 2065 2066 // Collect function IR attributes based on global settiings. 2067 getDefaultFunctionAttributes(Name, HasOptnone, AttrOnCallSite, FuncAttrs); 2068 2069 // Override some default IR attributes based on declaration-specific 2070 // information. 2071 if (TargetDecl) { 2072 if (TargetDecl->hasAttr<NoSpeculativeLoadHardeningAttr>()) 2073 FuncAttrs.removeAttribute(llvm::Attribute::SpeculativeLoadHardening); 2074 if (TargetDecl->hasAttr<SpeculativeLoadHardeningAttr>()) 2075 FuncAttrs.addAttribute(llvm::Attribute::SpeculativeLoadHardening); 2076 if (TargetDecl->hasAttr<NoSplitStackAttr>()) 2077 FuncAttrs.removeAttribute("split-stack"); 2078 2079 // Add NonLazyBind attribute to function declarations when -fno-plt 2080 // is used. 2081 // FIXME: what if we just haven't processed the function definition 2082 // yet, or if it's an external definition like C99 inline? 2083 if (CodeGenOpts.NoPLT) { 2084 if (auto *Fn = dyn_cast<FunctionDecl>(TargetDecl)) { 2085 if (!Fn->isDefined() && !AttrOnCallSite) { 2086 FuncAttrs.addAttribute(llvm::Attribute::NonLazyBind); 2087 } 2088 } 2089 } 2090 } 2091 2092 // Collect non-call-site function IR attributes from declaration-specific 2093 // information. 2094 if (!AttrOnCallSite) { 2095 if (TargetDecl && TargetDecl->hasAttr<CmseNSEntryAttr>()) 2096 FuncAttrs.addAttribute("cmse_nonsecure_entry"); 2097 2098 // Whether tail calls are enabled. 2099 auto shouldDisableTailCalls = [&] { 2100 // Should this be honored in getDefaultFunctionAttributes? 2101 if (CodeGenOpts.DisableTailCalls) 2102 return true; 2103 2104 if (!TargetDecl) 2105 return false; 2106 2107 if (TargetDecl->hasAttr<DisableTailCallsAttr>() || 2108 TargetDecl->hasAttr<AnyX86InterruptAttr>()) 2109 return true; 2110 2111 if (CodeGenOpts.NoEscapingBlockTailCalls) { 2112 if (const auto *BD = dyn_cast<BlockDecl>(TargetDecl)) 2113 if (!BD->doesNotEscape()) 2114 return true; 2115 } 2116 2117 return false; 2118 }; 2119 if (shouldDisableTailCalls()) 2120 FuncAttrs.addAttribute("disable-tail-calls", "true"); 2121 2122 // CPU/feature overrides. addDefaultFunctionDefinitionAttributes 2123 // handles these separately to set them based on the global defaults. 2124 GetCPUAndFeaturesAttributes(CalleeInfo.getCalleeDecl(), FuncAttrs); 2125 } 2126 2127 // Collect attributes from arguments and return values. 2128 ClangToLLVMArgMapping IRFunctionArgs(getContext(), FI); 2129 2130 QualType RetTy = FI.getReturnType(); 2131 const ABIArgInfo &RetAI = FI.getReturnInfo(); 2132 switch (RetAI.getKind()) { 2133 case ABIArgInfo::Extend: 2134 if (RetAI.isSignExt()) 2135 RetAttrs.addAttribute(llvm::Attribute::SExt); 2136 else 2137 RetAttrs.addAttribute(llvm::Attribute::ZExt); 2138 LLVM_FALLTHROUGH; 2139 case ABIArgInfo::Direct: 2140 if (RetAI.getInReg()) 2141 RetAttrs.addAttribute(llvm::Attribute::InReg); 2142 break; 2143 case ABIArgInfo::Ignore: 2144 break; 2145 2146 case ABIArgInfo::InAlloca: 2147 case ABIArgInfo::Indirect: { 2148 // inalloca and sret disable readnone and readonly 2149 FuncAttrs.removeAttribute(llvm::Attribute::ReadOnly) 2150 .removeAttribute(llvm::Attribute::ReadNone); 2151 break; 2152 } 2153 2154 case ABIArgInfo::CoerceAndExpand: 2155 break; 2156 2157 case ABIArgInfo::Expand: 2158 case ABIArgInfo::IndirectAliased: 2159 llvm_unreachable("Invalid ABI kind for return argument"); 2160 } 2161 2162 if (const auto *RefTy = RetTy->getAs<ReferenceType>()) { 2163 QualType PTy = RefTy->getPointeeType(); 2164 if (!PTy->isIncompleteType() && PTy->isConstantSizeType()) 2165 RetAttrs.addDereferenceableAttr( 2166 getMinimumObjectSize(PTy).getQuantity()); 2167 if (getContext().getTargetAddressSpace(PTy) == 0 && 2168 !CodeGenOpts.NullPointerIsValid) 2169 RetAttrs.addAttribute(llvm::Attribute::NonNull); 2170 if (PTy->isObjectType()) { 2171 llvm::Align Alignment = 2172 getNaturalPointeeTypeAlignment(RetTy).getAsAlign(); 2173 RetAttrs.addAlignmentAttr(Alignment); 2174 } 2175 } 2176 2177 bool hasUsedSRet = false; 2178 SmallVector<llvm::AttributeSet, 4> ArgAttrs(IRFunctionArgs.totalIRArgs()); 2179 2180 // Attach attributes to sret. 2181 if (IRFunctionArgs.hasSRetArg()) { 2182 llvm::AttrBuilder SRETAttrs; 2183 SRETAttrs.addStructRetAttr(getTypes().ConvertTypeForMem(RetTy)); 2184 hasUsedSRet = true; 2185 if (RetAI.getInReg()) 2186 SRETAttrs.addAttribute(llvm::Attribute::InReg); 2187 SRETAttrs.addAlignmentAttr(RetAI.getIndirectAlign().getQuantity()); 2188 ArgAttrs[IRFunctionArgs.getSRetArgNo()] = 2189 llvm::AttributeSet::get(getLLVMContext(), SRETAttrs); 2190 } 2191 2192 // Attach attributes to inalloca argument. 2193 if (IRFunctionArgs.hasInallocaArg()) { 2194 llvm::AttrBuilder Attrs; 2195 Attrs.addAttribute(llvm::Attribute::InAlloca); 2196 ArgAttrs[IRFunctionArgs.getInallocaArgNo()] = 2197 llvm::AttributeSet::get(getLLVMContext(), Attrs); 2198 } 2199 2200 // Apply `nonnull` and `dereferencable(N)` to the `this` argument. 2201 if (FI.isInstanceMethod() && !IRFunctionArgs.hasInallocaArg() && 2202 !FI.arg_begin()->type->isVoidPointerType()) { 2203 auto IRArgs = IRFunctionArgs.getIRArgs(0); 2204 2205 assert(IRArgs.second == 1 && "Expected only a single `this` pointer."); 2206 2207 llvm::AttrBuilder Attrs; 2208 2209 if (!CodeGenOpts.NullPointerIsValid && 2210 getContext().getTargetAddressSpace(FI.arg_begin()->type) == 0) { 2211 Attrs.addAttribute(llvm::Attribute::NonNull); 2212 Attrs.addDereferenceableAttr( 2213 getMinimumObjectSize( 2214 FI.arg_begin()->type.castAs<PointerType>()->getPointeeType()) 2215 .getQuantity()); 2216 } else { 2217 // FIXME dereferenceable should be correct here, regardless of 2218 // NullPointerIsValid. However, dereferenceable currently does not always 2219 // respect NullPointerIsValid and may imply nonnull and break the program. 2220 // See https://reviews.llvm.org/D66618 for discussions. 2221 Attrs.addDereferenceableOrNullAttr( 2222 getMinimumObjectSize( 2223 FI.arg_begin()->type.castAs<PointerType>()->getPointeeType()) 2224 .getQuantity()); 2225 } 2226 2227 ArgAttrs[IRArgs.first] = llvm::AttributeSet::get(getLLVMContext(), Attrs); 2228 } 2229 2230 unsigned ArgNo = 0; 2231 for (CGFunctionInfo::const_arg_iterator I = FI.arg_begin(), 2232 E = FI.arg_end(); 2233 I != E; ++I, ++ArgNo) { 2234 QualType ParamType = I->type; 2235 const ABIArgInfo &AI = I->info; 2236 llvm::AttrBuilder Attrs; 2237 2238 // Add attribute for padding argument, if necessary. 2239 if (IRFunctionArgs.hasPaddingArg(ArgNo)) { 2240 if (AI.getPaddingInReg()) { 2241 ArgAttrs[IRFunctionArgs.getPaddingArgNo(ArgNo)] = 2242 llvm::AttributeSet::get( 2243 getLLVMContext(), 2244 llvm::AttrBuilder().addAttribute(llvm::Attribute::InReg)); 2245 } 2246 } 2247 2248 // 'restrict' -> 'noalias' is done in EmitFunctionProlog when we 2249 // have the corresponding parameter variable. It doesn't make 2250 // sense to do it here because parameters are so messed up. 2251 switch (AI.getKind()) { 2252 case ABIArgInfo::Extend: 2253 if (AI.isSignExt()) 2254 Attrs.addAttribute(llvm::Attribute::SExt); 2255 else 2256 Attrs.addAttribute(llvm::Attribute::ZExt); 2257 LLVM_FALLTHROUGH; 2258 case ABIArgInfo::Direct: 2259 if (ArgNo == 0 && FI.isChainCall()) 2260 Attrs.addAttribute(llvm::Attribute::Nest); 2261 else if (AI.getInReg()) 2262 Attrs.addAttribute(llvm::Attribute::InReg); 2263 break; 2264 2265 case ABIArgInfo::Indirect: { 2266 if (AI.getInReg()) 2267 Attrs.addAttribute(llvm::Attribute::InReg); 2268 2269 if (AI.getIndirectByVal()) 2270 Attrs.addByValAttr(getTypes().ConvertTypeForMem(ParamType)); 2271 2272 auto *Decl = ParamType->getAsRecordDecl(); 2273 if (CodeGenOpts.PassByValueIsNoAlias && Decl && 2274 Decl->getArgPassingRestrictions() == RecordDecl::APK_CanPassInRegs) 2275 // When calling the function, the pointer passed in will be the only 2276 // reference to the underlying object. Mark it accordingly. 2277 Attrs.addAttribute(llvm::Attribute::NoAlias); 2278 2279 // TODO: We could add the byref attribute if not byval, but it would 2280 // require updating many testcases. 2281 2282 CharUnits Align = AI.getIndirectAlign(); 2283 2284 // In a byval argument, it is important that the required 2285 // alignment of the type is honored, as LLVM might be creating a 2286 // *new* stack object, and needs to know what alignment to give 2287 // it. (Sometimes it can deduce a sensible alignment on its own, 2288 // but not if clang decides it must emit a packed struct, or the 2289 // user specifies increased alignment requirements.) 2290 // 2291 // This is different from indirect *not* byval, where the object 2292 // exists already, and the align attribute is purely 2293 // informative. 2294 assert(!Align.isZero()); 2295 2296 // For now, only add this when we have a byval argument. 2297 // TODO: be less lazy about updating test cases. 2298 if (AI.getIndirectByVal()) 2299 Attrs.addAlignmentAttr(Align.getQuantity()); 2300 2301 // byval disables readnone and readonly. 2302 FuncAttrs.removeAttribute(llvm::Attribute::ReadOnly) 2303 .removeAttribute(llvm::Attribute::ReadNone); 2304 2305 break; 2306 } 2307 case ABIArgInfo::IndirectAliased: { 2308 CharUnits Align = AI.getIndirectAlign(); 2309 Attrs.addByRefAttr(getTypes().ConvertTypeForMem(ParamType)); 2310 Attrs.addAlignmentAttr(Align.getQuantity()); 2311 break; 2312 } 2313 case ABIArgInfo::Ignore: 2314 case ABIArgInfo::Expand: 2315 case ABIArgInfo::CoerceAndExpand: 2316 break; 2317 2318 case ABIArgInfo::InAlloca: 2319 // inalloca disables readnone and readonly. 2320 FuncAttrs.removeAttribute(llvm::Attribute::ReadOnly) 2321 .removeAttribute(llvm::Attribute::ReadNone); 2322 continue; 2323 } 2324 2325 if (const auto *RefTy = ParamType->getAs<ReferenceType>()) { 2326 QualType PTy = RefTy->getPointeeType(); 2327 if (!PTy->isIncompleteType() && PTy->isConstantSizeType()) 2328 Attrs.addDereferenceableAttr( 2329 getMinimumObjectSize(PTy).getQuantity()); 2330 if (getContext().getTargetAddressSpace(PTy) == 0 && 2331 !CodeGenOpts.NullPointerIsValid) 2332 Attrs.addAttribute(llvm::Attribute::NonNull); 2333 if (PTy->isObjectType()) { 2334 llvm::Align Alignment = 2335 getNaturalPointeeTypeAlignment(ParamType).getAsAlign(); 2336 Attrs.addAlignmentAttr(Alignment); 2337 } 2338 } 2339 2340 switch (FI.getExtParameterInfo(ArgNo).getABI()) { 2341 case ParameterABI::Ordinary: 2342 break; 2343 2344 case ParameterABI::SwiftIndirectResult: { 2345 // Add 'sret' if we haven't already used it for something, but 2346 // only if the result is void. 2347 if (!hasUsedSRet && RetTy->isVoidType()) { 2348 Attrs.addStructRetAttr(getTypes().ConvertTypeForMem(ParamType)); 2349 hasUsedSRet = true; 2350 } 2351 2352 // Add 'noalias' in either case. 2353 Attrs.addAttribute(llvm::Attribute::NoAlias); 2354 2355 // Add 'dereferenceable' and 'alignment'. 2356 auto PTy = ParamType->getPointeeType(); 2357 if (!PTy->isIncompleteType() && PTy->isConstantSizeType()) { 2358 auto info = getContext().getTypeInfoInChars(PTy); 2359 Attrs.addDereferenceableAttr(info.Width.getQuantity()); 2360 Attrs.addAlignmentAttr(info.Align.getAsAlign()); 2361 } 2362 break; 2363 } 2364 2365 case ParameterABI::SwiftErrorResult: 2366 Attrs.addAttribute(llvm::Attribute::SwiftError); 2367 break; 2368 2369 case ParameterABI::SwiftContext: 2370 Attrs.addAttribute(llvm::Attribute::SwiftSelf); 2371 break; 2372 } 2373 2374 if (FI.getExtParameterInfo(ArgNo).isNoEscape()) 2375 Attrs.addAttribute(llvm::Attribute::NoCapture); 2376 2377 if (Attrs.hasAttributes()) { 2378 unsigned FirstIRArg, NumIRArgs; 2379 std::tie(FirstIRArg, NumIRArgs) = IRFunctionArgs.getIRArgs(ArgNo); 2380 for (unsigned i = 0; i < NumIRArgs; i++) 2381 ArgAttrs[FirstIRArg + i] = 2382 llvm::AttributeSet::get(getLLVMContext(), Attrs); 2383 } 2384 } 2385 assert(ArgNo == FI.arg_size()); 2386 2387 AttrList = llvm::AttributeList::get( 2388 getLLVMContext(), llvm::AttributeSet::get(getLLVMContext(), FuncAttrs), 2389 llvm::AttributeSet::get(getLLVMContext(), RetAttrs), ArgAttrs); 2390 } 2391 2392 /// An argument came in as a promoted argument; demote it back to its 2393 /// declared type. 2394 static llvm::Value *emitArgumentDemotion(CodeGenFunction &CGF, 2395 const VarDecl *var, 2396 llvm::Value *value) { 2397 llvm::Type *varType = CGF.ConvertType(var->getType()); 2398 2399 // This can happen with promotions that actually don't change the 2400 // underlying type, like the enum promotions. 2401 if (value->getType() == varType) return value; 2402 2403 assert((varType->isIntegerTy() || varType->isFloatingPointTy()) 2404 && "unexpected promotion type"); 2405 2406 if (isa<llvm::IntegerType>(varType)) 2407 return CGF.Builder.CreateTrunc(value, varType, "arg.unpromote"); 2408 2409 return CGF.Builder.CreateFPCast(value, varType, "arg.unpromote"); 2410 } 2411 2412 /// Returns the attribute (either parameter attribute, or function 2413 /// attribute), which declares argument ArgNo to be non-null. 2414 static const NonNullAttr *getNonNullAttr(const Decl *FD, const ParmVarDecl *PVD, 2415 QualType ArgType, unsigned ArgNo) { 2416 // FIXME: __attribute__((nonnull)) can also be applied to: 2417 // - references to pointers, where the pointee is known to be 2418 // nonnull (apparently a Clang extension) 2419 // - transparent unions containing pointers 2420 // In the former case, LLVM IR cannot represent the constraint. In 2421 // the latter case, we have no guarantee that the transparent union 2422 // is in fact passed as a pointer. 2423 if (!ArgType->isAnyPointerType() && !ArgType->isBlockPointerType()) 2424 return nullptr; 2425 // First, check attribute on parameter itself. 2426 if (PVD) { 2427 if (auto ParmNNAttr = PVD->getAttr<NonNullAttr>()) 2428 return ParmNNAttr; 2429 } 2430 // Check function attributes. 2431 if (!FD) 2432 return nullptr; 2433 for (const auto *NNAttr : FD->specific_attrs<NonNullAttr>()) { 2434 if (NNAttr->isNonNull(ArgNo)) 2435 return NNAttr; 2436 } 2437 return nullptr; 2438 } 2439 2440 namespace { 2441 struct CopyBackSwiftError final : EHScopeStack::Cleanup { 2442 Address Temp; 2443 Address Arg; 2444 CopyBackSwiftError(Address temp, Address arg) : Temp(temp), Arg(arg) {} 2445 void Emit(CodeGenFunction &CGF, Flags flags) override { 2446 llvm::Value *errorValue = CGF.Builder.CreateLoad(Temp); 2447 CGF.Builder.CreateStore(errorValue, Arg); 2448 } 2449 }; 2450 } 2451 2452 void CodeGenFunction::EmitFunctionProlog(const CGFunctionInfo &FI, 2453 llvm::Function *Fn, 2454 const FunctionArgList &Args) { 2455 if (CurCodeDecl && CurCodeDecl->hasAttr<NakedAttr>()) 2456 // Naked functions don't have prologues. 2457 return; 2458 2459 // If this is an implicit-return-zero function, go ahead and 2460 // initialize the return value. TODO: it might be nice to have 2461 // a more general mechanism for this that didn't require synthesized 2462 // return statements. 2463 if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(CurCodeDecl)) { 2464 if (FD->hasImplicitReturnZero()) { 2465 QualType RetTy = FD->getReturnType().getUnqualifiedType(); 2466 llvm::Type* LLVMTy = CGM.getTypes().ConvertType(RetTy); 2467 llvm::Constant* Zero = llvm::Constant::getNullValue(LLVMTy); 2468 Builder.CreateStore(Zero, ReturnValue); 2469 } 2470 } 2471 2472 // FIXME: We no longer need the types from FunctionArgList; lift up and 2473 // simplify. 2474 2475 ClangToLLVMArgMapping IRFunctionArgs(CGM.getContext(), FI); 2476 assert(Fn->arg_size() == IRFunctionArgs.totalIRArgs()); 2477 2478 // If we're using inalloca, all the memory arguments are GEPs off of the last 2479 // parameter, which is a pointer to the complete memory area. 2480 Address ArgStruct = Address::invalid(); 2481 if (IRFunctionArgs.hasInallocaArg()) { 2482 ArgStruct = Address(Fn->getArg(IRFunctionArgs.getInallocaArgNo()), 2483 FI.getArgStructAlignment()); 2484 2485 assert(ArgStruct.getType() == FI.getArgStruct()->getPointerTo()); 2486 } 2487 2488 // Name the struct return parameter. 2489 if (IRFunctionArgs.hasSRetArg()) { 2490 auto AI = Fn->getArg(IRFunctionArgs.getSRetArgNo()); 2491 AI->setName("agg.result"); 2492 AI->addAttr(llvm::Attribute::NoAlias); 2493 } 2494 2495 // Track if we received the parameter as a pointer (indirect, byval, or 2496 // inalloca). If already have a pointer, EmitParmDecl doesn't need to copy it 2497 // into a local alloca for us. 2498 SmallVector<ParamValue, 16> ArgVals; 2499 ArgVals.reserve(Args.size()); 2500 2501 // Create a pointer value for every parameter declaration. This usually 2502 // entails copying one or more LLVM IR arguments into an alloca. Don't push 2503 // any cleanups or do anything that might unwind. We do that separately, so 2504 // we can push the cleanups in the correct order for the ABI. 2505 assert(FI.arg_size() == Args.size() && 2506 "Mismatch between function signature & arguments."); 2507 unsigned ArgNo = 0; 2508 CGFunctionInfo::const_arg_iterator info_it = FI.arg_begin(); 2509 for (FunctionArgList::const_iterator i = Args.begin(), e = Args.end(); 2510 i != e; ++i, ++info_it, ++ArgNo) { 2511 const VarDecl *Arg = *i; 2512 const ABIArgInfo &ArgI = info_it->info; 2513 2514 bool isPromoted = 2515 isa<ParmVarDecl>(Arg) && cast<ParmVarDecl>(Arg)->isKNRPromoted(); 2516 // We are converting from ABIArgInfo type to VarDecl type directly, unless 2517 // the parameter is promoted. In this case we convert to 2518 // CGFunctionInfo::ArgInfo type with subsequent argument demotion. 2519 QualType Ty = isPromoted ? info_it->type : Arg->getType(); 2520 assert(hasScalarEvaluationKind(Ty) == 2521 hasScalarEvaluationKind(Arg->getType())); 2522 2523 unsigned FirstIRArg, NumIRArgs; 2524 std::tie(FirstIRArg, NumIRArgs) = IRFunctionArgs.getIRArgs(ArgNo); 2525 2526 switch (ArgI.getKind()) { 2527 case ABIArgInfo::InAlloca: { 2528 assert(NumIRArgs == 0); 2529 auto FieldIndex = ArgI.getInAllocaFieldIndex(); 2530 Address V = 2531 Builder.CreateStructGEP(ArgStruct, FieldIndex, Arg->getName()); 2532 if (ArgI.getInAllocaIndirect()) 2533 V = Address(Builder.CreateLoad(V), 2534 getContext().getTypeAlignInChars(Ty)); 2535 ArgVals.push_back(ParamValue::forIndirect(V)); 2536 break; 2537 } 2538 2539 case ABIArgInfo::Indirect: 2540 case ABIArgInfo::IndirectAliased: { 2541 assert(NumIRArgs == 1); 2542 Address ParamAddr = 2543 Address(Fn->getArg(FirstIRArg), ArgI.getIndirectAlign()); 2544 2545 if (!hasScalarEvaluationKind(Ty)) { 2546 // Aggregates and complex variables are accessed by reference. All we 2547 // need to do is realign the value, if requested. Also, if the address 2548 // may be aliased, copy it to ensure that the parameter variable is 2549 // mutable and has a unique adress, as C requires. 2550 Address V = ParamAddr; 2551 if (ArgI.getIndirectRealign() || ArgI.isIndirectAliased()) { 2552 Address AlignedTemp = CreateMemTemp(Ty, "coerce"); 2553 2554 // Copy from the incoming argument pointer to the temporary with the 2555 // appropriate alignment. 2556 // 2557 // FIXME: We should have a common utility for generating an aggregate 2558 // copy. 2559 CharUnits Size = getContext().getTypeSizeInChars(Ty); 2560 Builder.CreateMemCpy( 2561 AlignedTemp.getPointer(), AlignedTemp.getAlignment().getAsAlign(), 2562 ParamAddr.getPointer(), ParamAddr.getAlignment().getAsAlign(), 2563 llvm::ConstantInt::get(IntPtrTy, Size.getQuantity())); 2564 V = AlignedTemp; 2565 } 2566 ArgVals.push_back(ParamValue::forIndirect(V)); 2567 } else { 2568 // Load scalar value from indirect argument. 2569 llvm::Value *V = 2570 EmitLoadOfScalar(ParamAddr, false, Ty, Arg->getBeginLoc()); 2571 2572 if (isPromoted) 2573 V = emitArgumentDemotion(*this, Arg, V); 2574 ArgVals.push_back(ParamValue::forDirect(V)); 2575 } 2576 break; 2577 } 2578 2579 case ABIArgInfo::Extend: 2580 case ABIArgInfo::Direct: { 2581 auto AI = Fn->getArg(FirstIRArg); 2582 llvm::Type *LTy = ConvertType(Arg->getType()); 2583 2584 // Prepare parameter attributes. So far, only attributes for pointer 2585 // parameters are prepared. See 2586 // http://llvm.org/docs/LangRef.html#paramattrs. 2587 if (ArgI.getDirectOffset() == 0 && LTy->isPointerTy() && 2588 ArgI.getCoerceToType()->isPointerTy()) { 2589 assert(NumIRArgs == 1); 2590 2591 if (const ParmVarDecl *PVD = dyn_cast<ParmVarDecl>(Arg)) { 2592 // Set `nonnull` attribute if any. 2593 if (getNonNullAttr(CurCodeDecl, PVD, PVD->getType(), 2594 PVD->getFunctionScopeIndex()) && 2595 !CGM.getCodeGenOpts().NullPointerIsValid) 2596 AI->addAttr(llvm::Attribute::NonNull); 2597 2598 QualType OTy = PVD->getOriginalType(); 2599 if (const auto *ArrTy = 2600 getContext().getAsConstantArrayType(OTy)) { 2601 // A C99 array parameter declaration with the static keyword also 2602 // indicates dereferenceability, and if the size is constant we can 2603 // use the dereferenceable attribute (which requires the size in 2604 // bytes). 2605 if (ArrTy->getSizeModifier() == ArrayType::Static) { 2606 QualType ETy = ArrTy->getElementType(); 2607 llvm::Align Alignment = 2608 CGM.getNaturalTypeAlignment(ETy).getAsAlign(); 2609 AI->addAttrs(llvm::AttrBuilder().addAlignmentAttr(Alignment)); 2610 uint64_t ArrSize = ArrTy->getSize().getZExtValue(); 2611 if (!ETy->isIncompleteType() && ETy->isConstantSizeType() && 2612 ArrSize) { 2613 llvm::AttrBuilder Attrs; 2614 Attrs.addDereferenceableAttr( 2615 getContext().getTypeSizeInChars(ETy).getQuantity() * 2616 ArrSize); 2617 AI->addAttrs(Attrs); 2618 } else if (getContext().getTargetInfo().getNullPointerValue( 2619 ETy.getAddressSpace()) == 0 && 2620 !CGM.getCodeGenOpts().NullPointerIsValid) { 2621 AI->addAttr(llvm::Attribute::NonNull); 2622 } 2623 } 2624 } else if (const auto *ArrTy = 2625 getContext().getAsVariableArrayType(OTy)) { 2626 // For C99 VLAs with the static keyword, we don't know the size so 2627 // we can't use the dereferenceable attribute, but in addrspace(0) 2628 // we know that it must be nonnull. 2629 if (ArrTy->getSizeModifier() == VariableArrayType::Static) { 2630 QualType ETy = ArrTy->getElementType(); 2631 llvm::Align Alignment = 2632 CGM.getNaturalTypeAlignment(ETy).getAsAlign(); 2633 AI->addAttrs(llvm::AttrBuilder().addAlignmentAttr(Alignment)); 2634 if (!getContext().getTargetAddressSpace(ETy) && 2635 !CGM.getCodeGenOpts().NullPointerIsValid) 2636 AI->addAttr(llvm::Attribute::NonNull); 2637 } 2638 } 2639 2640 // Set `align` attribute if any. 2641 const auto *AVAttr = PVD->getAttr<AlignValueAttr>(); 2642 if (!AVAttr) 2643 if (const auto *TOTy = dyn_cast<TypedefType>(OTy)) 2644 AVAttr = TOTy->getDecl()->getAttr<AlignValueAttr>(); 2645 if (AVAttr && !SanOpts.has(SanitizerKind::Alignment)) { 2646 // If alignment-assumption sanitizer is enabled, we do *not* add 2647 // alignment attribute here, but emit normal alignment assumption, 2648 // so the UBSAN check could function. 2649 llvm::ConstantInt *AlignmentCI = 2650 cast<llvm::ConstantInt>(EmitScalarExpr(AVAttr->getAlignment())); 2651 unsigned AlignmentInt = 2652 AlignmentCI->getLimitedValue(llvm::Value::MaximumAlignment); 2653 if (AI->getParamAlign().valueOrOne() < AlignmentInt) { 2654 AI->removeAttr(llvm::Attribute::AttrKind::Alignment); 2655 AI->addAttrs(llvm::AttrBuilder().addAlignmentAttr( 2656 llvm::Align(AlignmentInt))); 2657 } 2658 } 2659 } 2660 2661 // Set 'noalias' if an argument type has the `restrict` qualifier. 2662 if (Arg->getType().isRestrictQualified()) 2663 AI->addAttr(llvm::Attribute::NoAlias); 2664 } 2665 2666 // Prepare the argument value. If we have the trivial case, handle it 2667 // with no muss and fuss. 2668 if (!isa<llvm::StructType>(ArgI.getCoerceToType()) && 2669 ArgI.getCoerceToType() == ConvertType(Ty) && 2670 ArgI.getDirectOffset() == 0) { 2671 assert(NumIRArgs == 1); 2672 2673 // LLVM expects swifterror parameters to be used in very restricted 2674 // ways. Copy the value into a less-restricted temporary. 2675 llvm::Value *V = AI; 2676 if (FI.getExtParameterInfo(ArgNo).getABI() 2677 == ParameterABI::SwiftErrorResult) { 2678 QualType pointeeTy = Ty->getPointeeType(); 2679 assert(pointeeTy->isPointerType()); 2680 Address temp = 2681 CreateMemTemp(pointeeTy, getPointerAlign(), "swifterror.temp"); 2682 Address arg = Address(V, getContext().getTypeAlignInChars(pointeeTy)); 2683 llvm::Value *incomingErrorValue = Builder.CreateLoad(arg); 2684 Builder.CreateStore(incomingErrorValue, temp); 2685 V = temp.getPointer(); 2686 2687 // Push a cleanup to copy the value back at the end of the function. 2688 // The convention does not guarantee that the value will be written 2689 // back if the function exits with an unwind exception. 2690 EHStack.pushCleanup<CopyBackSwiftError>(NormalCleanup, temp, arg); 2691 } 2692 2693 // Ensure the argument is the correct type. 2694 if (V->getType() != ArgI.getCoerceToType()) 2695 V = Builder.CreateBitCast(V, ArgI.getCoerceToType()); 2696 2697 if (isPromoted) 2698 V = emitArgumentDemotion(*this, Arg, V); 2699 2700 // Because of merging of function types from multiple decls it is 2701 // possible for the type of an argument to not match the corresponding 2702 // type in the function type. Since we are codegening the callee 2703 // in here, add a cast to the argument type. 2704 llvm::Type *LTy = ConvertType(Arg->getType()); 2705 if (V->getType() != LTy) 2706 V = Builder.CreateBitCast(V, LTy); 2707 2708 ArgVals.push_back(ParamValue::forDirect(V)); 2709 break; 2710 } 2711 2712 // VLST arguments are coerced to VLATs at the function boundary for 2713 // ABI consistency. If this is a VLST that was coerced to 2714 // a VLAT at the function boundary and the types match up, use 2715 // llvm.experimental.vector.extract to convert back to the original 2716 // VLST. 2717 if (auto *VecTyTo = dyn_cast<llvm::FixedVectorType>(ConvertType(Ty))) { 2718 auto *Coerced = Fn->getArg(FirstIRArg); 2719 if (auto *VecTyFrom = 2720 dyn_cast<llvm::ScalableVectorType>(Coerced->getType())) { 2721 if (VecTyFrom->getElementType() == VecTyTo->getElementType()) { 2722 llvm::Value *Zero = llvm::Constant::getNullValue(CGM.Int64Ty); 2723 2724 assert(NumIRArgs == 1); 2725 Coerced->setName(Arg->getName() + ".coerce"); 2726 ArgVals.push_back(ParamValue::forDirect(Builder.CreateExtractVector( 2727 VecTyTo, Coerced, Zero, "castFixedSve"))); 2728 break; 2729 } 2730 } 2731 } 2732 2733 Address Alloca = CreateMemTemp(Ty, getContext().getDeclAlign(Arg), 2734 Arg->getName()); 2735 2736 // Pointer to store into. 2737 Address Ptr = emitAddressAtOffset(*this, Alloca, ArgI); 2738 2739 // Fast-isel and the optimizer generally like scalar values better than 2740 // FCAs, so we flatten them if this is safe to do for this argument. 2741 llvm::StructType *STy = dyn_cast<llvm::StructType>(ArgI.getCoerceToType()); 2742 if (ArgI.isDirect() && ArgI.getCanBeFlattened() && STy && 2743 STy->getNumElements() > 1) { 2744 uint64_t SrcSize = CGM.getDataLayout().getTypeAllocSize(STy); 2745 llvm::Type *DstTy = Ptr.getElementType(); 2746 uint64_t DstSize = CGM.getDataLayout().getTypeAllocSize(DstTy); 2747 2748 Address AddrToStoreInto = Address::invalid(); 2749 if (SrcSize <= DstSize) { 2750 AddrToStoreInto = Builder.CreateElementBitCast(Ptr, STy); 2751 } else { 2752 AddrToStoreInto = 2753 CreateTempAlloca(STy, Alloca.getAlignment(), "coerce"); 2754 } 2755 2756 assert(STy->getNumElements() == NumIRArgs); 2757 for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) { 2758 auto AI = Fn->getArg(FirstIRArg + i); 2759 AI->setName(Arg->getName() + ".coerce" + Twine(i)); 2760 Address EltPtr = Builder.CreateStructGEP(AddrToStoreInto, i); 2761 Builder.CreateStore(AI, EltPtr); 2762 } 2763 2764 if (SrcSize > DstSize) { 2765 Builder.CreateMemCpy(Ptr, AddrToStoreInto, DstSize); 2766 } 2767 2768 } else { 2769 // Simple case, just do a coerced store of the argument into the alloca. 2770 assert(NumIRArgs == 1); 2771 auto AI = Fn->getArg(FirstIRArg); 2772 AI->setName(Arg->getName() + ".coerce"); 2773 CreateCoercedStore(AI, Ptr, /*DstIsVolatile=*/false, *this); 2774 } 2775 2776 // Match to what EmitParmDecl is expecting for this type. 2777 if (CodeGenFunction::hasScalarEvaluationKind(Ty)) { 2778 llvm::Value *V = 2779 EmitLoadOfScalar(Alloca, false, Ty, Arg->getBeginLoc()); 2780 if (isPromoted) 2781 V = emitArgumentDemotion(*this, Arg, V); 2782 ArgVals.push_back(ParamValue::forDirect(V)); 2783 } else { 2784 ArgVals.push_back(ParamValue::forIndirect(Alloca)); 2785 } 2786 break; 2787 } 2788 2789 case ABIArgInfo::CoerceAndExpand: { 2790 // Reconstruct into a temporary. 2791 Address alloca = CreateMemTemp(Ty, getContext().getDeclAlign(Arg)); 2792 ArgVals.push_back(ParamValue::forIndirect(alloca)); 2793 2794 auto coercionType = ArgI.getCoerceAndExpandType(); 2795 alloca = Builder.CreateElementBitCast(alloca, coercionType); 2796 2797 unsigned argIndex = FirstIRArg; 2798 for (unsigned i = 0, e = coercionType->getNumElements(); i != e; ++i) { 2799 llvm::Type *eltType = coercionType->getElementType(i); 2800 if (ABIArgInfo::isPaddingForCoerceAndExpand(eltType)) 2801 continue; 2802 2803 auto eltAddr = Builder.CreateStructGEP(alloca, i); 2804 auto elt = Fn->getArg(argIndex++); 2805 Builder.CreateStore(elt, eltAddr); 2806 } 2807 assert(argIndex == FirstIRArg + NumIRArgs); 2808 break; 2809 } 2810 2811 case ABIArgInfo::Expand: { 2812 // If this structure was expanded into multiple arguments then 2813 // we need to create a temporary and reconstruct it from the 2814 // arguments. 2815 Address Alloca = CreateMemTemp(Ty, getContext().getDeclAlign(Arg)); 2816 LValue LV = MakeAddrLValue(Alloca, Ty); 2817 ArgVals.push_back(ParamValue::forIndirect(Alloca)); 2818 2819 auto FnArgIter = Fn->arg_begin() + FirstIRArg; 2820 ExpandTypeFromArgs(Ty, LV, FnArgIter); 2821 assert(FnArgIter == Fn->arg_begin() + FirstIRArg + NumIRArgs); 2822 for (unsigned i = 0, e = NumIRArgs; i != e; ++i) { 2823 auto AI = Fn->getArg(FirstIRArg + i); 2824 AI->setName(Arg->getName() + "." + Twine(i)); 2825 } 2826 break; 2827 } 2828 2829 case ABIArgInfo::Ignore: 2830 assert(NumIRArgs == 0); 2831 // Initialize the local variable appropriately. 2832 if (!hasScalarEvaluationKind(Ty)) { 2833 ArgVals.push_back(ParamValue::forIndirect(CreateMemTemp(Ty))); 2834 } else { 2835 llvm::Value *U = llvm::UndefValue::get(ConvertType(Arg->getType())); 2836 ArgVals.push_back(ParamValue::forDirect(U)); 2837 } 2838 break; 2839 } 2840 } 2841 2842 if (getTarget().getCXXABI().areArgsDestroyedLeftToRightInCallee()) { 2843 for (int I = Args.size() - 1; I >= 0; --I) 2844 EmitParmDecl(*Args[I], ArgVals[I], I + 1); 2845 } else { 2846 for (unsigned I = 0, E = Args.size(); I != E; ++I) 2847 EmitParmDecl(*Args[I], ArgVals[I], I + 1); 2848 } 2849 } 2850 2851 static void eraseUnusedBitCasts(llvm::Instruction *insn) { 2852 while (insn->use_empty()) { 2853 llvm::BitCastInst *bitcast = dyn_cast<llvm::BitCastInst>(insn); 2854 if (!bitcast) return; 2855 2856 // This is "safe" because we would have used a ConstantExpr otherwise. 2857 insn = cast<llvm::Instruction>(bitcast->getOperand(0)); 2858 bitcast->eraseFromParent(); 2859 } 2860 } 2861 2862 /// Try to emit a fused autorelease of a return result. 2863 static llvm::Value *tryEmitFusedAutoreleaseOfResult(CodeGenFunction &CGF, 2864 llvm::Value *result) { 2865 // We must be immediately followed the cast. 2866 llvm::BasicBlock *BB = CGF.Builder.GetInsertBlock(); 2867 if (BB->empty()) return nullptr; 2868 if (&BB->back() != result) return nullptr; 2869 2870 llvm::Type *resultType = result->getType(); 2871 2872 // result is in a BasicBlock and is therefore an Instruction. 2873 llvm::Instruction *generator = cast<llvm::Instruction>(result); 2874 2875 SmallVector<llvm::Instruction *, 4> InstsToKill; 2876 2877 // Look for: 2878 // %generator = bitcast %type1* %generator2 to %type2* 2879 while (llvm::BitCastInst *bitcast = dyn_cast<llvm::BitCastInst>(generator)) { 2880 // We would have emitted this as a constant if the operand weren't 2881 // an Instruction. 2882 generator = cast<llvm::Instruction>(bitcast->getOperand(0)); 2883 2884 // Require the generator to be immediately followed by the cast. 2885 if (generator->getNextNode() != bitcast) 2886 return nullptr; 2887 2888 InstsToKill.push_back(bitcast); 2889 } 2890 2891 // Look for: 2892 // %generator = call i8* @objc_retain(i8* %originalResult) 2893 // or 2894 // %generator = call i8* @objc_retainAutoreleasedReturnValue(i8* %originalResult) 2895 llvm::CallInst *call = dyn_cast<llvm::CallInst>(generator); 2896 if (!call) return nullptr; 2897 2898 bool doRetainAutorelease; 2899 2900 if (call->getCalledOperand() == CGF.CGM.getObjCEntrypoints().objc_retain) { 2901 doRetainAutorelease = true; 2902 } else if (call->getCalledOperand() == 2903 CGF.CGM.getObjCEntrypoints().objc_retainAutoreleasedReturnValue) { 2904 doRetainAutorelease = false; 2905 2906 // If we emitted an assembly marker for this call (and the 2907 // ARCEntrypoints field should have been set if so), go looking 2908 // for that call. If we can't find it, we can't do this 2909 // optimization. But it should always be the immediately previous 2910 // instruction, unless we needed bitcasts around the call. 2911 if (CGF.CGM.getObjCEntrypoints().retainAutoreleasedReturnValueMarker) { 2912 llvm::Instruction *prev = call->getPrevNode(); 2913 assert(prev); 2914 if (isa<llvm::BitCastInst>(prev)) { 2915 prev = prev->getPrevNode(); 2916 assert(prev); 2917 } 2918 assert(isa<llvm::CallInst>(prev)); 2919 assert(cast<llvm::CallInst>(prev)->getCalledOperand() == 2920 CGF.CGM.getObjCEntrypoints().retainAutoreleasedReturnValueMarker); 2921 InstsToKill.push_back(prev); 2922 } 2923 } else { 2924 return nullptr; 2925 } 2926 2927 result = call->getArgOperand(0); 2928 InstsToKill.push_back(call); 2929 2930 // Keep killing bitcasts, for sanity. Note that we no longer care 2931 // about precise ordering as long as there's exactly one use. 2932 while (llvm::BitCastInst *bitcast = dyn_cast<llvm::BitCastInst>(result)) { 2933 if (!bitcast->hasOneUse()) break; 2934 InstsToKill.push_back(bitcast); 2935 result = bitcast->getOperand(0); 2936 } 2937 2938 // Delete all the unnecessary instructions, from latest to earliest. 2939 for (auto *I : InstsToKill) 2940 I->eraseFromParent(); 2941 2942 // Do the fused retain/autorelease if we were asked to. 2943 if (doRetainAutorelease) 2944 result = CGF.EmitARCRetainAutoreleaseReturnValue(result); 2945 2946 // Cast back to the result type. 2947 return CGF.Builder.CreateBitCast(result, resultType); 2948 } 2949 2950 /// If this is a +1 of the value of an immutable 'self', remove it. 2951 static llvm::Value *tryRemoveRetainOfSelf(CodeGenFunction &CGF, 2952 llvm::Value *result) { 2953 // This is only applicable to a method with an immutable 'self'. 2954 const ObjCMethodDecl *method = 2955 dyn_cast_or_null<ObjCMethodDecl>(CGF.CurCodeDecl); 2956 if (!method) return nullptr; 2957 const VarDecl *self = method->getSelfDecl(); 2958 if (!self->getType().isConstQualified()) return nullptr; 2959 2960 // Look for a retain call. 2961 llvm::CallInst *retainCall = 2962 dyn_cast<llvm::CallInst>(result->stripPointerCasts()); 2963 if (!retainCall || retainCall->getCalledOperand() != 2964 CGF.CGM.getObjCEntrypoints().objc_retain) 2965 return nullptr; 2966 2967 // Look for an ordinary load of 'self'. 2968 llvm::Value *retainedValue = retainCall->getArgOperand(0); 2969 llvm::LoadInst *load = 2970 dyn_cast<llvm::LoadInst>(retainedValue->stripPointerCasts()); 2971 if (!load || load->isAtomic() || load->isVolatile() || 2972 load->getPointerOperand() != CGF.GetAddrOfLocalVar(self).getPointer()) 2973 return nullptr; 2974 2975 // Okay! Burn it all down. This relies for correctness on the 2976 // assumption that the retain is emitted as part of the return and 2977 // that thereafter everything is used "linearly". 2978 llvm::Type *resultType = result->getType(); 2979 eraseUnusedBitCasts(cast<llvm::Instruction>(result)); 2980 assert(retainCall->use_empty()); 2981 retainCall->eraseFromParent(); 2982 eraseUnusedBitCasts(cast<llvm::Instruction>(retainedValue)); 2983 2984 return CGF.Builder.CreateBitCast(load, resultType); 2985 } 2986 2987 /// Emit an ARC autorelease of the result of a function. 2988 /// 2989 /// \return the value to actually return from the function 2990 static llvm::Value *emitAutoreleaseOfResult(CodeGenFunction &CGF, 2991 llvm::Value *result) { 2992 // If we're returning 'self', kill the initial retain. This is a 2993 // heuristic attempt to "encourage correctness" in the really unfortunate 2994 // case where we have a return of self during a dealloc and we desperately 2995 // need to avoid the possible autorelease. 2996 if (llvm::Value *self = tryRemoveRetainOfSelf(CGF, result)) 2997 return self; 2998 2999 // At -O0, try to emit a fused retain/autorelease. 3000 if (CGF.shouldUseFusedARCCalls()) 3001 if (llvm::Value *fused = tryEmitFusedAutoreleaseOfResult(CGF, result)) 3002 return fused; 3003 3004 return CGF.EmitARCAutoreleaseReturnValue(result); 3005 } 3006 3007 /// Heuristically search for a dominating store to the return-value slot. 3008 static llvm::StoreInst *findDominatingStoreToReturnValue(CodeGenFunction &CGF) { 3009 // Check if a User is a store which pointerOperand is the ReturnValue. 3010 // We are looking for stores to the ReturnValue, not for stores of the 3011 // ReturnValue to some other location. 3012 auto GetStoreIfValid = [&CGF](llvm::User *U) -> llvm::StoreInst * { 3013 auto *SI = dyn_cast<llvm::StoreInst>(U); 3014 if (!SI || SI->getPointerOperand() != CGF.ReturnValue.getPointer()) 3015 return nullptr; 3016 // These aren't actually possible for non-coerced returns, and we 3017 // only care about non-coerced returns on this code path. 3018 assert(!SI->isAtomic() && !SI->isVolatile()); 3019 return SI; 3020 }; 3021 // If there are multiple uses of the return-value slot, just check 3022 // for something immediately preceding the IP. Sometimes this can 3023 // happen with how we generate implicit-returns; it can also happen 3024 // with noreturn cleanups. 3025 if (!CGF.ReturnValue.getPointer()->hasOneUse()) { 3026 llvm::BasicBlock *IP = CGF.Builder.GetInsertBlock(); 3027 if (IP->empty()) return nullptr; 3028 llvm::Instruction *I = &IP->back(); 3029 3030 // Skip lifetime markers 3031 for (llvm::BasicBlock::reverse_iterator II = IP->rbegin(), 3032 IE = IP->rend(); 3033 II != IE; ++II) { 3034 if (llvm::IntrinsicInst *Intrinsic = 3035 dyn_cast<llvm::IntrinsicInst>(&*II)) { 3036 if (Intrinsic->getIntrinsicID() == llvm::Intrinsic::lifetime_end) { 3037 const llvm::Value *CastAddr = Intrinsic->getArgOperand(1); 3038 ++II; 3039 if (II == IE) 3040 break; 3041 if (isa<llvm::BitCastInst>(&*II) && (CastAddr == &*II)) 3042 continue; 3043 } 3044 } 3045 I = &*II; 3046 break; 3047 } 3048 3049 return GetStoreIfValid(I); 3050 } 3051 3052 llvm::StoreInst *store = 3053 GetStoreIfValid(CGF.ReturnValue.getPointer()->user_back()); 3054 if (!store) return nullptr; 3055 3056 // Now do a first-and-dirty dominance check: just walk up the 3057 // single-predecessors chain from the current insertion point. 3058 llvm::BasicBlock *StoreBB = store->getParent(); 3059 llvm::BasicBlock *IP = CGF.Builder.GetInsertBlock(); 3060 while (IP != StoreBB) { 3061 if (!(IP = IP->getSinglePredecessor())) 3062 return nullptr; 3063 } 3064 3065 // Okay, the store's basic block dominates the insertion point; we 3066 // can do our thing. 3067 return store; 3068 } 3069 3070 // Helper functions for EmitCMSEClearRecord 3071 3072 // Set the bits corresponding to a field having width `BitWidth` and located at 3073 // offset `BitOffset` (from the least significant bit) within a storage unit of 3074 // `Bits.size()` bytes. Each element of `Bits` corresponds to one target byte. 3075 // Use little-endian layout, i.e.`Bits[0]` is the LSB. 3076 static void setBitRange(SmallVectorImpl<uint64_t> &Bits, int BitOffset, 3077 int BitWidth, int CharWidth) { 3078 assert(CharWidth <= 64); 3079 assert(static_cast<unsigned>(BitWidth) <= Bits.size() * CharWidth); 3080 3081 int Pos = 0; 3082 if (BitOffset >= CharWidth) { 3083 Pos += BitOffset / CharWidth; 3084 BitOffset = BitOffset % CharWidth; 3085 } 3086 3087 const uint64_t Used = (uint64_t(1) << CharWidth) - 1; 3088 if (BitOffset + BitWidth >= CharWidth) { 3089 Bits[Pos++] |= (Used << BitOffset) & Used; 3090 BitWidth -= CharWidth - BitOffset; 3091 BitOffset = 0; 3092 } 3093 3094 while (BitWidth >= CharWidth) { 3095 Bits[Pos++] = Used; 3096 BitWidth -= CharWidth; 3097 } 3098 3099 if (BitWidth > 0) 3100 Bits[Pos++] |= (Used >> (CharWidth - BitWidth)) << BitOffset; 3101 } 3102 3103 // Set the bits corresponding to a field having width `BitWidth` and located at 3104 // offset `BitOffset` (from the least significant bit) within a storage unit of 3105 // `StorageSize` bytes, located at `StorageOffset` in `Bits`. Each element of 3106 // `Bits` corresponds to one target byte. Use target endian layout. 3107 static void setBitRange(SmallVectorImpl<uint64_t> &Bits, int StorageOffset, 3108 int StorageSize, int BitOffset, int BitWidth, 3109 int CharWidth, bool BigEndian) { 3110 3111 SmallVector<uint64_t, 8> TmpBits(StorageSize); 3112 setBitRange(TmpBits, BitOffset, BitWidth, CharWidth); 3113 3114 if (BigEndian) 3115 std::reverse(TmpBits.begin(), TmpBits.end()); 3116 3117 for (uint64_t V : TmpBits) 3118 Bits[StorageOffset++] |= V; 3119 } 3120 3121 static void setUsedBits(CodeGenModule &, QualType, int, 3122 SmallVectorImpl<uint64_t> &); 3123 3124 // Set the bits in `Bits`, which correspond to the value representations of 3125 // the actual members of the record type `RTy`. Note that this function does 3126 // not handle base classes, virtual tables, etc, since they cannot happen in 3127 // CMSE function arguments or return. The bit mask corresponds to the target 3128 // memory layout, i.e. it's endian dependent. 3129 static void setUsedBits(CodeGenModule &CGM, const RecordType *RTy, int Offset, 3130 SmallVectorImpl<uint64_t> &Bits) { 3131 ASTContext &Context = CGM.getContext(); 3132 int CharWidth = Context.getCharWidth(); 3133 const RecordDecl *RD = RTy->getDecl()->getDefinition(); 3134 const ASTRecordLayout &ASTLayout = Context.getASTRecordLayout(RD); 3135 const CGRecordLayout &Layout = CGM.getTypes().getCGRecordLayout(RD); 3136 3137 int Idx = 0; 3138 for (auto I = RD->field_begin(), E = RD->field_end(); I != E; ++I, ++Idx) { 3139 const FieldDecl *F = *I; 3140 3141 if (F->isUnnamedBitfield() || F->isZeroLengthBitField(Context) || 3142 F->getType()->isIncompleteArrayType()) 3143 continue; 3144 3145 if (F->isBitField()) { 3146 const CGBitFieldInfo &BFI = Layout.getBitFieldInfo(F); 3147 setBitRange(Bits, Offset + BFI.StorageOffset.getQuantity(), 3148 BFI.StorageSize / CharWidth, BFI.Offset, 3149 BFI.Size, CharWidth, 3150 CGM.getDataLayout().isBigEndian()); 3151 continue; 3152 } 3153 3154 setUsedBits(CGM, F->getType(), 3155 Offset + ASTLayout.getFieldOffset(Idx) / CharWidth, Bits); 3156 } 3157 } 3158 3159 // Set the bits in `Bits`, which correspond to the value representations of 3160 // the elements of an array type `ATy`. 3161 static void setUsedBits(CodeGenModule &CGM, const ConstantArrayType *ATy, 3162 int Offset, SmallVectorImpl<uint64_t> &Bits) { 3163 const ASTContext &Context = CGM.getContext(); 3164 3165 QualType ETy = Context.getBaseElementType(ATy); 3166 int Size = Context.getTypeSizeInChars(ETy).getQuantity(); 3167 SmallVector<uint64_t, 4> TmpBits(Size); 3168 setUsedBits(CGM, ETy, 0, TmpBits); 3169 3170 for (int I = 0, N = Context.getConstantArrayElementCount(ATy); I < N; ++I) { 3171 auto Src = TmpBits.begin(); 3172 auto Dst = Bits.begin() + Offset + I * Size; 3173 for (int J = 0; J < Size; ++J) 3174 *Dst++ |= *Src++; 3175 } 3176 } 3177 3178 // Set the bits in `Bits`, which correspond to the value representations of 3179 // the type `QTy`. 3180 static void setUsedBits(CodeGenModule &CGM, QualType QTy, int Offset, 3181 SmallVectorImpl<uint64_t> &Bits) { 3182 if (const auto *RTy = QTy->getAs<RecordType>()) 3183 return setUsedBits(CGM, RTy, Offset, Bits); 3184 3185 ASTContext &Context = CGM.getContext(); 3186 if (const auto *ATy = Context.getAsConstantArrayType(QTy)) 3187 return setUsedBits(CGM, ATy, Offset, Bits); 3188 3189 int Size = Context.getTypeSizeInChars(QTy).getQuantity(); 3190 if (Size <= 0) 3191 return; 3192 3193 std::fill_n(Bits.begin() + Offset, Size, 3194 (uint64_t(1) << Context.getCharWidth()) - 1); 3195 } 3196 3197 static uint64_t buildMultiCharMask(const SmallVectorImpl<uint64_t> &Bits, 3198 int Pos, int Size, int CharWidth, 3199 bool BigEndian) { 3200 assert(Size > 0); 3201 uint64_t Mask = 0; 3202 if (BigEndian) { 3203 for (auto P = Bits.begin() + Pos, E = Bits.begin() + Pos + Size; P != E; 3204 ++P) 3205 Mask = (Mask << CharWidth) | *P; 3206 } else { 3207 auto P = Bits.begin() + Pos + Size, End = Bits.begin() + Pos; 3208 do 3209 Mask = (Mask << CharWidth) | *--P; 3210 while (P != End); 3211 } 3212 return Mask; 3213 } 3214 3215 // Emit code to clear the bits in a record, which aren't a part of any user 3216 // declared member, when the record is a function return. 3217 llvm::Value *CodeGenFunction::EmitCMSEClearRecord(llvm::Value *Src, 3218 llvm::IntegerType *ITy, 3219 QualType QTy) { 3220 assert(Src->getType() == ITy); 3221 assert(ITy->getScalarSizeInBits() <= 64); 3222 3223 const llvm::DataLayout &DataLayout = CGM.getDataLayout(); 3224 int Size = DataLayout.getTypeStoreSize(ITy); 3225 SmallVector<uint64_t, 4> Bits(Size); 3226 setUsedBits(CGM, QTy->castAs<RecordType>(), 0, Bits); 3227 3228 int CharWidth = CGM.getContext().getCharWidth(); 3229 uint64_t Mask = 3230 buildMultiCharMask(Bits, 0, Size, CharWidth, DataLayout.isBigEndian()); 3231 3232 return Builder.CreateAnd(Src, Mask, "cmse.clear"); 3233 } 3234 3235 // Emit code to clear the bits in a record, which aren't a part of any user 3236 // declared member, when the record is a function argument. 3237 llvm::Value *CodeGenFunction::EmitCMSEClearRecord(llvm::Value *Src, 3238 llvm::ArrayType *ATy, 3239 QualType QTy) { 3240 const llvm::DataLayout &DataLayout = CGM.getDataLayout(); 3241 int Size = DataLayout.getTypeStoreSize(ATy); 3242 SmallVector<uint64_t, 16> Bits(Size); 3243 setUsedBits(CGM, QTy->castAs<RecordType>(), 0, Bits); 3244 3245 // Clear each element of the LLVM array. 3246 int CharWidth = CGM.getContext().getCharWidth(); 3247 int CharsPerElt = 3248 ATy->getArrayElementType()->getScalarSizeInBits() / CharWidth; 3249 int MaskIndex = 0; 3250 llvm::Value *R = llvm::UndefValue::get(ATy); 3251 for (int I = 0, N = ATy->getArrayNumElements(); I != N; ++I) { 3252 uint64_t Mask = buildMultiCharMask(Bits, MaskIndex, CharsPerElt, CharWidth, 3253 DataLayout.isBigEndian()); 3254 MaskIndex += CharsPerElt; 3255 llvm::Value *T0 = Builder.CreateExtractValue(Src, I); 3256 llvm::Value *T1 = Builder.CreateAnd(T0, Mask, "cmse.clear"); 3257 R = Builder.CreateInsertValue(R, T1, I); 3258 } 3259 3260 return R; 3261 } 3262 3263 void CodeGenFunction::EmitFunctionEpilog(const CGFunctionInfo &FI, 3264 bool EmitRetDbgLoc, 3265 SourceLocation EndLoc) { 3266 if (FI.isNoReturn()) { 3267 // Noreturn functions don't return. 3268 EmitUnreachable(EndLoc); 3269 return; 3270 } 3271 3272 if (CurCodeDecl && CurCodeDecl->hasAttr<NakedAttr>()) { 3273 // Naked functions don't have epilogues. 3274 Builder.CreateUnreachable(); 3275 return; 3276 } 3277 3278 // Functions with no result always return void. 3279 if (!ReturnValue.isValid()) { 3280 Builder.CreateRetVoid(); 3281 return; 3282 } 3283 3284 llvm::DebugLoc RetDbgLoc; 3285 llvm::Value *RV = nullptr; 3286 QualType RetTy = FI.getReturnType(); 3287 const ABIArgInfo &RetAI = FI.getReturnInfo(); 3288 3289 switch (RetAI.getKind()) { 3290 case ABIArgInfo::InAlloca: 3291 // Aggregrates get evaluated directly into the destination. Sometimes we 3292 // need to return the sret value in a register, though. 3293 assert(hasAggregateEvaluationKind(RetTy)); 3294 if (RetAI.getInAllocaSRet()) { 3295 llvm::Function::arg_iterator EI = CurFn->arg_end(); 3296 --EI; 3297 llvm::Value *ArgStruct = &*EI; 3298 llvm::Value *SRet = Builder.CreateStructGEP( 3299 nullptr, ArgStruct, RetAI.getInAllocaFieldIndex()); 3300 RV = Builder.CreateAlignedLoad(SRet, getPointerAlign(), "sret"); 3301 } 3302 break; 3303 3304 case ABIArgInfo::Indirect: { 3305 auto AI = CurFn->arg_begin(); 3306 if (RetAI.isSRetAfterThis()) 3307 ++AI; 3308 switch (getEvaluationKind(RetTy)) { 3309 case TEK_Complex: { 3310 ComplexPairTy RT = 3311 EmitLoadOfComplex(MakeAddrLValue(ReturnValue, RetTy), EndLoc); 3312 EmitStoreOfComplex(RT, MakeNaturalAlignAddrLValue(&*AI, RetTy), 3313 /*isInit*/ true); 3314 break; 3315 } 3316 case TEK_Aggregate: 3317 // Do nothing; aggregrates get evaluated directly into the destination. 3318 break; 3319 case TEK_Scalar: 3320 EmitStoreOfScalar(Builder.CreateLoad(ReturnValue), 3321 MakeNaturalAlignAddrLValue(&*AI, RetTy), 3322 /*isInit*/ true); 3323 break; 3324 } 3325 break; 3326 } 3327 3328 case ABIArgInfo::Extend: 3329 case ABIArgInfo::Direct: 3330 if (RetAI.getCoerceToType() == ConvertType(RetTy) && 3331 RetAI.getDirectOffset() == 0) { 3332 // The internal return value temp always will have pointer-to-return-type 3333 // type, just do a load. 3334 3335 // If there is a dominating store to ReturnValue, we can elide 3336 // the load, zap the store, and usually zap the alloca. 3337 if (llvm::StoreInst *SI = 3338 findDominatingStoreToReturnValue(*this)) { 3339 // Reuse the debug location from the store unless there is 3340 // cleanup code to be emitted between the store and return 3341 // instruction. 3342 if (EmitRetDbgLoc && !AutoreleaseResult) 3343 RetDbgLoc = SI->getDebugLoc(); 3344 // Get the stored value and nuke the now-dead store. 3345 RV = SI->getValueOperand(); 3346 SI->eraseFromParent(); 3347 3348 // Otherwise, we have to do a simple load. 3349 } else { 3350 RV = Builder.CreateLoad(ReturnValue); 3351 } 3352 } else { 3353 // If the value is offset in memory, apply the offset now. 3354 Address V = emitAddressAtOffset(*this, ReturnValue, RetAI); 3355 3356 RV = CreateCoercedLoad(V, RetAI.getCoerceToType(), *this); 3357 } 3358 3359 // In ARC, end functions that return a retainable type with a call 3360 // to objc_autoreleaseReturnValue. 3361 if (AutoreleaseResult) { 3362 #ifndef NDEBUG 3363 // Type::isObjCRetainabletype has to be called on a QualType that hasn't 3364 // been stripped of the typedefs, so we cannot use RetTy here. Get the 3365 // original return type of FunctionDecl, CurCodeDecl, and BlockDecl from 3366 // CurCodeDecl or BlockInfo. 3367 QualType RT; 3368 3369 if (auto *FD = dyn_cast<FunctionDecl>(CurCodeDecl)) 3370 RT = FD->getReturnType(); 3371 else if (auto *MD = dyn_cast<ObjCMethodDecl>(CurCodeDecl)) 3372 RT = MD->getReturnType(); 3373 else if (isa<BlockDecl>(CurCodeDecl)) 3374 RT = BlockInfo->BlockExpression->getFunctionType()->getReturnType(); 3375 else 3376 llvm_unreachable("Unexpected function/method type"); 3377 3378 assert(getLangOpts().ObjCAutoRefCount && 3379 !FI.isReturnsRetained() && 3380 RT->isObjCRetainableType()); 3381 #endif 3382 RV = emitAutoreleaseOfResult(*this, RV); 3383 } 3384 3385 break; 3386 3387 case ABIArgInfo::Ignore: 3388 break; 3389 3390 case ABIArgInfo::CoerceAndExpand: { 3391 auto coercionType = RetAI.getCoerceAndExpandType(); 3392 3393 // Load all of the coerced elements out into results. 3394 llvm::SmallVector<llvm::Value*, 4> results; 3395 Address addr = Builder.CreateElementBitCast(ReturnValue, coercionType); 3396 for (unsigned i = 0, e = coercionType->getNumElements(); i != e; ++i) { 3397 auto coercedEltType = coercionType->getElementType(i); 3398 if (ABIArgInfo::isPaddingForCoerceAndExpand(coercedEltType)) 3399 continue; 3400 3401 auto eltAddr = Builder.CreateStructGEP(addr, i); 3402 auto elt = Builder.CreateLoad(eltAddr); 3403 results.push_back(elt); 3404 } 3405 3406 // If we have one result, it's the single direct result type. 3407 if (results.size() == 1) { 3408 RV = results[0]; 3409 3410 // Otherwise, we need to make a first-class aggregate. 3411 } else { 3412 // Construct a return type that lacks padding elements. 3413 llvm::Type *returnType = RetAI.getUnpaddedCoerceAndExpandType(); 3414 3415 RV = llvm::UndefValue::get(returnType); 3416 for (unsigned i = 0, e = results.size(); i != e; ++i) { 3417 RV = Builder.CreateInsertValue(RV, results[i], i); 3418 } 3419 } 3420 break; 3421 } 3422 case ABIArgInfo::Expand: 3423 case ABIArgInfo::IndirectAliased: 3424 llvm_unreachable("Invalid ABI kind for return argument"); 3425 } 3426 3427 llvm::Instruction *Ret; 3428 if (RV) { 3429 if (CurFuncDecl && CurFuncDecl->hasAttr<CmseNSEntryAttr>()) { 3430 // For certain return types, clear padding bits, as they may reveal 3431 // sensitive information. 3432 // Small struct/union types are passed as integers. 3433 auto *ITy = dyn_cast<llvm::IntegerType>(RV->getType()); 3434 if (ITy != nullptr && isa<RecordType>(RetTy.getCanonicalType())) 3435 RV = EmitCMSEClearRecord(RV, ITy, RetTy); 3436 } 3437 EmitReturnValueCheck(RV); 3438 Ret = Builder.CreateRet(RV); 3439 } else { 3440 Ret = Builder.CreateRetVoid(); 3441 } 3442 3443 if (RetDbgLoc) 3444 Ret->setDebugLoc(std::move(RetDbgLoc)); 3445 } 3446 3447 void CodeGenFunction::EmitReturnValueCheck(llvm::Value *RV) { 3448 // A current decl may not be available when emitting vtable thunks. 3449 if (!CurCodeDecl) 3450 return; 3451 3452 // If the return block isn't reachable, neither is this check, so don't emit 3453 // it. 3454 if (ReturnBlock.isValid() && ReturnBlock.getBlock()->use_empty()) 3455 return; 3456 3457 ReturnsNonNullAttr *RetNNAttr = nullptr; 3458 if (SanOpts.has(SanitizerKind::ReturnsNonnullAttribute)) 3459 RetNNAttr = CurCodeDecl->getAttr<ReturnsNonNullAttr>(); 3460 3461 if (!RetNNAttr && !requiresReturnValueNullabilityCheck()) 3462 return; 3463 3464 // Prefer the returns_nonnull attribute if it's present. 3465 SourceLocation AttrLoc; 3466 SanitizerMask CheckKind; 3467 SanitizerHandler Handler; 3468 if (RetNNAttr) { 3469 assert(!requiresReturnValueNullabilityCheck() && 3470 "Cannot check nullability and the nonnull attribute"); 3471 AttrLoc = RetNNAttr->getLocation(); 3472 CheckKind = SanitizerKind::ReturnsNonnullAttribute; 3473 Handler = SanitizerHandler::NonnullReturn; 3474 } else { 3475 if (auto *DD = dyn_cast<DeclaratorDecl>(CurCodeDecl)) 3476 if (auto *TSI = DD->getTypeSourceInfo()) 3477 if (auto FTL = TSI->getTypeLoc().getAsAdjusted<FunctionTypeLoc>()) 3478 AttrLoc = FTL.getReturnLoc().findNullabilityLoc(); 3479 CheckKind = SanitizerKind::NullabilityReturn; 3480 Handler = SanitizerHandler::NullabilityReturn; 3481 } 3482 3483 SanitizerScope SanScope(this); 3484 3485 // Make sure the "return" source location is valid. If we're checking a 3486 // nullability annotation, make sure the preconditions for the check are met. 3487 llvm::BasicBlock *Check = createBasicBlock("nullcheck"); 3488 llvm::BasicBlock *NoCheck = createBasicBlock("no.nullcheck"); 3489 llvm::Value *SLocPtr = Builder.CreateLoad(ReturnLocation, "return.sloc.load"); 3490 llvm::Value *CanNullCheck = Builder.CreateIsNotNull(SLocPtr); 3491 if (requiresReturnValueNullabilityCheck()) 3492 CanNullCheck = 3493 Builder.CreateAnd(CanNullCheck, RetValNullabilityPrecondition); 3494 Builder.CreateCondBr(CanNullCheck, Check, NoCheck); 3495 EmitBlock(Check); 3496 3497 // Now do the null check. 3498 llvm::Value *Cond = Builder.CreateIsNotNull(RV); 3499 llvm::Constant *StaticData[] = {EmitCheckSourceLocation(AttrLoc)}; 3500 llvm::Value *DynamicData[] = {SLocPtr}; 3501 EmitCheck(std::make_pair(Cond, CheckKind), Handler, StaticData, DynamicData); 3502 3503 EmitBlock(NoCheck); 3504 3505 #ifndef NDEBUG 3506 // The return location should not be used after the check has been emitted. 3507 ReturnLocation = Address::invalid(); 3508 #endif 3509 } 3510 3511 static bool isInAllocaArgument(CGCXXABI &ABI, QualType type) { 3512 const CXXRecordDecl *RD = type->getAsCXXRecordDecl(); 3513 return RD && ABI.getRecordArgABI(RD) == CGCXXABI::RAA_DirectInMemory; 3514 } 3515 3516 static AggValueSlot createPlaceholderSlot(CodeGenFunction &CGF, 3517 QualType Ty) { 3518 // FIXME: Generate IR in one pass, rather than going back and fixing up these 3519 // placeholders. 3520 llvm::Type *IRTy = CGF.ConvertTypeForMem(Ty); 3521 llvm::Type *IRPtrTy = IRTy->getPointerTo(); 3522 llvm::Value *Placeholder = llvm::UndefValue::get(IRPtrTy->getPointerTo()); 3523 3524 // FIXME: When we generate this IR in one pass, we shouldn't need 3525 // this win32-specific alignment hack. 3526 CharUnits Align = CharUnits::fromQuantity(4); 3527 Placeholder = CGF.Builder.CreateAlignedLoad(IRPtrTy, Placeholder, Align); 3528 3529 return AggValueSlot::forAddr(Address(Placeholder, Align), 3530 Ty.getQualifiers(), 3531 AggValueSlot::IsNotDestructed, 3532 AggValueSlot::DoesNotNeedGCBarriers, 3533 AggValueSlot::IsNotAliased, 3534 AggValueSlot::DoesNotOverlap); 3535 } 3536 3537 void CodeGenFunction::EmitDelegateCallArg(CallArgList &args, 3538 const VarDecl *param, 3539 SourceLocation loc) { 3540 // StartFunction converted the ABI-lowered parameter(s) into a 3541 // local alloca. We need to turn that into an r-value suitable 3542 // for EmitCall. 3543 Address local = GetAddrOfLocalVar(param); 3544 3545 QualType type = param->getType(); 3546 3547 if (isInAllocaArgument(CGM.getCXXABI(), type)) { 3548 CGM.ErrorUnsupported(param, "forwarded non-trivially copyable parameter"); 3549 } 3550 3551 // GetAddrOfLocalVar returns a pointer-to-pointer for references, 3552 // but the argument needs to be the original pointer. 3553 if (type->isReferenceType()) { 3554 args.add(RValue::get(Builder.CreateLoad(local)), type); 3555 3556 // In ARC, move out of consumed arguments so that the release cleanup 3557 // entered by StartFunction doesn't cause an over-release. This isn't 3558 // optimal -O0 code generation, but it should get cleaned up when 3559 // optimization is enabled. This also assumes that delegate calls are 3560 // performed exactly once for a set of arguments, but that should be safe. 3561 } else if (getLangOpts().ObjCAutoRefCount && 3562 param->hasAttr<NSConsumedAttr>() && 3563 type->isObjCRetainableType()) { 3564 llvm::Value *ptr = Builder.CreateLoad(local); 3565 auto null = 3566 llvm::ConstantPointerNull::get(cast<llvm::PointerType>(ptr->getType())); 3567 Builder.CreateStore(null, local); 3568 args.add(RValue::get(ptr), type); 3569 3570 // For the most part, we just need to load the alloca, except that 3571 // aggregate r-values are actually pointers to temporaries. 3572 } else { 3573 args.add(convertTempToRValue(local, type, loc), type); 3574 } 3575 3576 // Deactivate the cleanup for the callee-destructed param that was pushed. 3577 if (hasAggregateEvaluationKind(type) && !CurFuncIsThunk && 3578 type->castAs<RecordType>()->getDecl()->isParamDestroyedInCallee() && 3579 param->needsDestruction(getContext())) { 3580 EHScopeStack::stable_iterator cleanup = 3581 CalleeDestructedParamCleanups.lookup(cast<ParmVarDecl>(param)); 3582 assert(cleanup.isValid() && 3583 "cleanup for callee-destructed param not recorded"); 3584 // This unreachable is a temporary marker which will be removed later. 3585 llvm::Instruction *isActive = Builder.CreateUnreachable(); 3586 args.addArgCleanupDeactivation(cleanup, isActive); 3587 } 3588 } 3589 3590 static bool isProvablyNull(llvm::Value *addr) { 3591 return isa<llvm::ConstantPointerNull>(addr); 3592 } 3593 3594 /// Emit the actual writing-back of a writeback. 3595 static void emitWriteback(CodeGenFunction &CGF, 3596 const CallArgList::Writeback &writeback) { 3597 const LValue &srcLV = writeback.Source; 3598 Address srcAddr = srcLV.getAddress(CGF); 3599 assert(!isProvablyNull(srcAddr.getPointer()) && 3600 "shouldn't have writeback for provably null argument"); 3601 3602 llvm::BasicBlock *contBB = nullptr; 3603 3604 // If the argument wasn't provably non-null, we need to null check 3605 // before doing the store. 3606 bool provablyNonNull = llvm::isKnownNonZero(srcAddr.getPointer(), 3607 CGF.CGM.getDataLayout()); 3608 if (!provablyNonNull) { 3609 llvm::BasicBlock *writebackBB = CGF.createBasicBlock("icr.writeback"); 3610 contBB = CGF.createBasicBlock("icr.done"); 3611 3612 llvm::Value *isNull = 3613 CGF.Builder.CreateIsNull(srcAddr.getPointer(), "icr.isnull"); 3614 CGF.Builder.CreateCondBr(isNull, contBB, writebackBB); 3615 CGF.EmitBlock(writebackBB); 3616 } 3617 3618 // Load the value to writeback. 3619 llvm::Value *value = CGF.Builder.CreateLoad(writeback.Temporary); 3620 3621 // Cast it back, in case we're writing an id to a Foo* or something. 3622 value = CGF.Builder.CreateBitCast(value, srcAddr.getElementType(), 3623 "icr.writeback-cast"); 3624 3625 // Perform the writeback. 3626 3627 // If we have a "to use" value, it's something we need to emit a use 3628 // of. This has to be carefully threaded in: if it's done after the 3629 // release it's potentially undefined behavior (and the optimizer 3630 // will ignore it), and if it happens before the retain then the 3631 // optimizer could move the release there. 3632 if (writeback.ToUse) { 3633 assert(srcLV.getObjCLifetime() == Qualifiers::OCL_Strong); 3634 3635 // Retain the new value. No need to block-copy here: the block's 3636 // being passed up the stack. 3637 value = CGF.EmitARCRetainNonBlock(value); 3638 3639 // Emit the intrinsic use here. 3640 CGF.EmitARCIntrinsicUse(writeback.ToUse); 3641 3642 // Load the old value (primitively). 3643 llvm::Value *oldValue = CGF.EmitLoadOfScalar(srcLV, SourceLocation()); 3644 3645 // Put the new value in place (primitively). 3646 CGF.EmitStoreOfScalar(value, srcLV, /*init*/ false); 3647 3648 // Release the old value. 3649 CGF.EmitARCRelease(oldValue, srcLV.isARCPreciseLifetime()); 3650 3651 // Otherwise, we can just do a normal lvalue store. 3652 } else { 3653 CGF.EmitStoreThroughLValue(RValue::get(value), srcLV); 3654 } 3655 3656 // Jump to the continuation block. 3657 if (!provablyNonNull) 3658 CGF.EmitBlock(contBB); 3659 } 3660 3661 static void emitWritebacks(CodeGenFunction &CGF, 3662 const CallArgList &args) { 3663 for (const auto &I : args.writebacks()) 3664 emitWriteback(CGF, I); 3665 } 3666 3667 static void deactivateArgCleanupsBeforeCall(CodeGenFunction &CGF, 3668 const CallArgList &CallArgs) { 3669 ArrayRef<CallArgList::CallArgCleanup> Cleanups = 3670 CallArgs.getCleanupsToDeactivate(); 3671 // Iterate in reverse to increase the likelihood of popping the cleanup. 3672 for (const auto &I : llvm::reverse(Cleanups)) { 3673 CGF.DeactivateCleanupBlock(I.Cleanup, I.IsActiveIP); 3674 I.IsActiveIP->eraseFromParent(); 3675 } 3676 } 3677 3678 static const Expr *maybeGetUnaryAddrOfOperand(const Expr *E) { 3679 if (const UnaryOperator *uop = dyn_cast<UnaryOperator>(E->IgnoreParens())) 3680 if (uop->getOpcode() == UO_AddrOf) 3681 return uop->getSubExpr(); 3682 return nullptr; 3683 } 3684 3685 /// Emit an argument that's being passed call-by-writeback. That is, 3686 /// we are passing the address of an __autoreleased temporary; it 3687 /// might be copy-initialized with the current value of the given 3688 /// address, but it will definitely be copied out of after the call. 3689 static void emitWritebackArg(CodeGenFunction &CGF, CallArgList &args, 3690 const ObjCIndirectCopyRestoreExpr *CRE) { 3691 LValue srcLV; 3692 3693 // Make an optimistic effort to emit the address as an l-value. 3694 // This can fail if the argument expression is more complicated. 3695 if (const Expr *lvExpr = maybeGetUnaryAddrOfOperand(CRE->getSubExpr())) { 3696 srcLV = CGF.EmitLValue(lvExpr); 3697 3698 // Otherwise, just emit it as a scalar. 3699 } else { 3700 Address srcAddr = CGF.EmitPointerWithAlignment(CRE->getSubExpr()); 3701 3702 QualType srcAddrType = 3703 CRE->getSubExpr()->getType()->castAs<PointerType>()->getPointeeType(); 3704 srcLV = CGF.MakeAddrLValue(srcAddr, srcAddrType); 3705 } 3706 Address srcAddr = srcLV.getAddress(CGF); 3707 3708 // The dest and src types don't necessarily match in LLVM terms 3709 // because of the crazy ObjC compatibility rules. 3710 3711 llvm::PointerType *destType = 3712 cast<llvm::PointerType>(CGF.ConvertType(CRE->getType())); 3713 3714 // If the address is a constant null, just pass the appropriate null. 3715 if (isProvablyNull(srcAddr.getPointer())) { 3716 args.add(RValue::get(llvm::ConstantPointerNull::get(destType)), 3717 CRE->getType()); 3718 return; 3719 } 3720 3721 // Create the temporary. 3722 Address temp = CGF.CreateTempAlloca(destType->getElementType(), 3723 CGF.getPointerAlign(), 3724 "icr.temp"); 3725 // Loading an l-value can introduce a cleanup if the l-value is __weak, 3726 // and that cleanup will be conditional if we can't prove that the l-value 3727 // isn't null, so we need to register a dominating point so that the cleanups 3728 // system will make valid IR. 3729 CodeGenFunction::ConditionalEvaluation condEval(CGF); 3730 3731 // Zero-initialize it if we're not doing a copy-initialization. 3732 bool shouldCopy = CRE->shouldCopy(); 3733 if (!shouldCopy) { 3734 llvm::Value *null = 3735 llvm::ConstantPointerNull::get( 3736 cast<llvm::PointerType>(destType->getElementType())); 3737 CGF.Builder.CreateStore(null, temp); 3738 } 3739 3740 llvm::BasicBlock *contBB = nullptr; 3741 llvm::BasicBlock *originBB = nullptr; 3742 3743 // If the address is *not* known to be non-null, we need to switch. 3744 llvm::Value *finalArgument; 3745 3746 bool provablyNonNull = llvm::isKnownNonZero(srcAddr.getPointer(), 3747 CGF.CGM.getDataLayout()); 3748 if (provablyNonNull) { 3749 finalArgument = temp.getPointer(); 3750 } else { 3751 llvm::Value *isNull = 3752 CGF.Builder.CreateIsNull(srcAddr.getPointer(), "icr.isnull"); 3753 3754 finalArgument = CGF.Builder.CreateSelect(isNull, 3755 llvm::ConstantPointerNull::get(destType), 3756 temp.getPointer(), "icr.argument"); 3757 3758 // If we need to copy, then the load has to be conditional, which 3759 // means we need control flow. 3760 if (shouldCopy) { 3761 originBB = CGF.Builder.GetInsertBlock(); 3762 contBB = CGF.createBasicBlock("icr.cont"); 3763 llvm::BasicBlock *copyBB = CGF.createBasicBlock("icr.copy"); 3764 CGF.Builder.CreateCondBr(isNull, contBB, copyBB); 3765 CGF.EmitBlock(copyBB); 3766 condEval.begin(CGF); 3767 } 3768 } 3769 3770 llvm::Value *valueToUse = nullptr; 3771 3772 // Perform a copy if necessary. 3773 if (shouldCopy) { 3774 RValue srcRV = CGF.EmitLoadOfLValue(srcLV, SourceLocation()); 3775 assert(srcRV.isScalar()); 3776 3777 llvm::Value *src = srcRV.getScalarVal(); 3778 src = CGF.Builder.CreateBitCast(src, destType->getElementType(), 3779 "icr.cast"); 3780 3781 // Use an ordinary store, not a store-to-lvalue. 3782 CGF.Builder.CreateStore(src, temp); 3783 3784 // If optimization is enabled, and the value was held in a 3785 // __strong variable, we need to tell the optimizer that this 3786 // value has to stay alive until we're doing the store back. 3787 // This is because the temporary is effectively unretained, 3788 // and so otherwise we can violate the high-level semantics. 3789 if (CGF.CGM.getCodeGenOpts().OptimizationLevel != 0 && 3790 srcLV.getObjCLifetime() == Qualifiers::OCL_Strong) { 3791 valueToUse = src; 3792 } 3793 } 3794 3795 // Finish the control flow if we needed it. 3796 if (shouldCopy && !provablyNonNull) { 3797 llvm::BasicBlock *copyBB = CGF.Builder.GetInsertBlock(); 3798 CGF.EmitBlock(contBB); 3799 3800 // Make a phi for the value to intrinsically use. 3801 if (valueToUse) { 3802 llvm::PHINode *phiToUse = CGF.Builder.CreatePHI(valueToUse->getType(), 2, 3803 "icr.to-use"); 3804 phiToUse->addIncoming(valueToUse, copyBB); 3805 phiToUse->addIncoming(llvm::UndefValue::get(valueToUse->getType()), 3806 originBB); 3807 valueToUse = phiToUse; 3808 } 3809 3810 condEval.end(CGF); 3811 } 3812 3813 args.addWriteback(srcLV, temp, valueToUse); 3814 args.add(RValue::get(finalArgument), CRE->getType()); 3815 } 3816 3817 void CallArgList::allocateArgumentMemory(CodeGenFunction &CGF) { 3818 assert(!StackBase); 3819 3820 // Save the stack. 3821 llvm::Function *F = CGF.CGM.getIntrinsic(llvm::Intrinsic::stacksave); 3822 StackBase = CGF.Builder.CreateCall(F, {}, "inalloca.save"); 3823 } 3824 3825 void CallArgList::freeArgumentMemory(CodeGenFunction &CGF) const { 3826 if (StackBase) { 3827 // Restore the stack after the call. 3828 llvm::Function *F = CGF.CGM.getIntrinsic(llvm::Intrinsic::stackrestore); 3829 CGF.Builder.CreateCall(F, StackBase); 3830 } 3831 } 3832 3833 void CodeGenFunction::EmitNonNullArgCheck(RValue RV, QualType ArgType, 3834 SourceLocation ArgLoc, 3835 AbstractCallee AC, 3836 unsigned ParmNum) { 3837 if (!AC.getDecl() || !(SanOpts.has(SanitizerKind::NonnullAttribute) || 3838 SanOpts.has(SanitizerKind::NullabilityArg))) 3839 return; 3840 3841 // The param decl may be missing in a variadic function. 3842 auto PVD = ParmNum < AC.getNumParams() ? AC.getParamDecl(ParmNum) : nullptr; 3843 unsigned ArgNo = PVD ? PVD->getFunctionScopeIndex() : ParmNum; 3844 3845 // Prefer the nonnull attribute if it's present. 3846 const NonNullAttr *NNAttr = nullptr; 3847 if (SanOpts.has(SanitizerKind::NonnullAttribute)) 3848 NNAttr = getNonNullAttr(AC.getDecl(), PVD, ArgType, ArgNo); 3849 3850 bool CanCheckNullability = false; 3851 if (SanOpts.has(SanitizerKind::NullabilityArg) && !NNAttr && PVD) { 3852 auto Nullability = PVD->getType()->getNullability(getContext()); 3853 CanCheckNullability = Nullability && 3854 *Nullability == NullabilityKind::NonNull && 3855 PVD->getTypeSourceInfo(); 3856 } 3857 3858 if (!NNAttr && !CanCheckNullability) 3859 return; 3860 3861 SourceLocation AttrLoc; 3862 SanitizerMask CheckKind; 3863 SanitizerHandler Handler; 3864 if (NNAttr) { 3865 AttrLoc = NNAttr->getLocation(); 3866 CheckKind = SanitizerKind::NonnullAttribute; 3867 Handler = SanitizerHandler::NonnullArg; 3868 } else { 3869 AttrLoc = PVD->getTypeSourceInfo()->getTypeLoc().findNullabilityLoc(); 3870 CheckKind = SanitizerKind::NullabilityArg; 3871 Handler = SanitizerHandler::NullabilityArg; 3872 } 3873 3874 SanitizerScope SanScope(this); 3875 llvm::Value *Cond = EmitNonNullRValueCheck(RV, ArgType); 3876 llvm::Constant *StaticData[] = { 3877 EmitCheckSourceLocation(ArgLoc), EmitCheckSourceLocation(AttrLoc), 3878 llvm::ConstantInt::get(Int32Ty, ArgNo + 1), 3879 }; 3880 EmitCheck(std::make_pair(Cond, CheckKind), Handler, StaticData, None); 3881 } 3882 3883 // Check if the call is going to use the inalloca convention. This needs to 3884 // agree with CGFunctionInfo::usesInAlloca. The CGFunctionInfo is arranged 3885 // later, so we can't check it directly. 3886 static bool hasInAllocaArgs(CodeGenModule &CGM, CallingConv ExplicitCC, 3887 ArrayRef<QualType> ArgTypes) { 3888 // The Swift calling convention doesn't go through the target-specific 3889 // argument classification, so it never uses inalloca. 3890 // TODO: Consider limiting inalloca use to only calling conventions supported 3891 // by MSVC. 3892 if (ExplicitCC == CC_Swift) 3893 return false; 3894 if (!CGM.getTarget().getCXXABI().isMicrosoft()) 3895 return false; 3896 return llvm::any_of(ArgTypes, [&](QualType Ty) { 3897 return isInAllocaArgument(CGM.getCXXABI(), Ty); 3898 }); 3899 } 3900 3901 #ifndef NDEBUG 3902 // Determine whether the given argument is an Objective-C method 3903 // that may have type parameters in its signature. 3904 static bool isObjCMethodWithTypeParams(const ObjCMethodDecl *method) { 3905 const DeclContext *dc = method->getDeclContext(); 3906 if (const ObjCInterfaceDecl *classDecl = dyn_cast<ObjCInterfaceDecl>(dc)) { 3907 return classDecl->getTypeParamListAsWritten(); 3908 } 3909 3910 if (const ObjCCategoryDecl *catDecl = dyn_cast<ObjCCategoryDecl>(dc)) { 3911 return catDecl->getTypeParamList(); 3912 } 3913 3914 return false; 3915 } 3916 #endif 3917 3918 /// EmitCallArgs - Emit call arguments for a function. 3919 void CodeGenFunction::EmitCallArgs( 3920 CallArgList &Args, PrototypeWrapper Prototype, 3921 llvm::iterator_range<CallExpr::const_arg_iterator> ArgRange, 3922 AbstractCallee AC, unsigned ParamsToSkip, EvaluationOrder Order) { 3923 SmallVector<QualType, 16> ArgTypes; 3924 3925 assert((ParamsToSkip == 0 || Prototype.P) && 3926 "Can't skip parameters if type info is not provided"); 3927 3928 // This variable only captures *explicitly* written conventions, not those 3929 // applied by default via command line flags or target defaults, such as 3930 // thiscall, aapcs, stdcall via -mrtd, etc. Computing that correctly would 3931 // require knowing if this is a C++ instance method or being able to see 3932 // unprototyped FunctionTypes. 3933 CallingConv ExplicitCC = CC_C; 3934 3935 // First, if a prototype was provided, use those argument types. 3936 bool IsVariadic = false; 3937 if (Prototype.P) { 3938 const auto *MD = Prototype.P.dyn_cast<const ObjCMethodDecl *>(); 3939 if (MD) { 3940 IsVariadic = MD->isVariadic(); 3941 ExplicitCC = getCallingConventionForDecl( 3942 MD, CGM.getTarget().getTriple().isOSWindows()); 3943 ArgTypes.assign(MD->param_type_begin() + ParamsToSkip, 3944 MD->param_type_end()); 3945 } else { 3946 const auto *FPT = Prototype.P.get<const FunctionProtoType *>(); 3947 IsVariadic = FPT->isVariadic(); 3948 ExplicitCC = FPT->getExtInfo().getCC(); 3949 ArgTypes.assign(FPT->param_type_begin() + ParamsToSkip, 3950 FPT->param_type_end()); 3951 } 3952 3953 #ifndef NDEBUG 3954 // Check that the prototyped types match the argument expression types. 3955 bool isGenericMethod = MD && isObjCMethodWithTypeParams(MD); 3956 CallExpr::const_arg_iterator Arg = ArgRange.begin(); 3957 for (QualType Ty : ArgTypes) { 3958 assert(Arg != ArgRange.end() && "Running over edge of argument list!"); 3959 assert( 3960 (isGenericMethod || Ty->isVariablyModifiedType() || 3961 Ty.getNonReferenceType()->isObjCRetainableType() || 3962 getContext() 3963 .getCanonicalType(Ty.getNonReferenceType()) 3964 .getTypePtr() == 3965 getContext().getCanonicalType((*Arg)->getType()).getTypePtr()) && 3966 "type mismatch in call argument!"); 3967 ++Arg; 3968 } 3969 3970 // Either we've emitted all the call args, or we have a call to variadic 3971 // function. 3972 assert((Arg == ArgRange.end() || IsVariadic) && 3973 "Extra arguments in non-variadic function!"); 3974 #endif 3975 } 3976 3977 // If we still have any arguments, emit them using the type of the argument. 3978 for (auto *A : llvm::make_range(std::next(ArgRange.begin(), ArgTypes.size()), 3979 ArgRange.end())) 3980 ArgTypes.push_back(IsVariadic ? getVarArgType(A) : A->getType()); 3981 assert((int)ArgTypes.size() == (ArgRange.end() - ArgRange.begin())); 3982 3983 // We must evaluate arguments from right to left in the MS C++ ABI, 3984 // because arguments are destroyed left to right in the callee. As a special 3985 // case, there are certain language constructs that require left-to-right 3986 // evaluation, and in those cases we consider the evaluation order requirement 3987 // to trump the "destruction order is reverse construction order" guarantee. 3988 bool LeftToRight = 3989 CGM.getTarget().getCXXABI().areArgsDestroyedLeftToRightInCallee() 3990 ? Order == EvaluationOrder::ForceLeftToRight 3991 : Order != EvaluationOrder::ForceRightToLeft; 3992 3993 auto MaybeEmitImplicitObjectSize = [&](unsigned I, const Expr *Arg, 3994 RValue EmittedArg) { 3995 if (!AC.hasFunctionDecl() || I >= AC.getNumParams()) 3996 return; 3997 auto *PS = AC.getParamDecl(I)->getAttr<PassObjectSizeAttr>(); 3998 if (PS == nullptr) 3999 return; 4000 4001 const auto &Context = getContext(); 4002 auto SizeTy = Context.getSizeType(); 4003 auto T = Builder.getIntNTy(Context.getTypeSize(SizeTy)); 4004 assert(EmittedArg.getScalarVal() && "We emitted nothing for the arg?"); 4005 llvm::Value *V = evaluateOrEmitBuiltinObjectSize(Arg, PS->getType(), T, 4006 EmittedArg.getScalarVal(), 4007 PS->isDynamic()); 4008 Args.add(RValue::get(V), SizeTy); 4009 // If we're emitting args in reverse, be sure to do so with 4010 // pass_object_size, as well. 4011 if (!LeftToRight) 4012 std::swap(Args.back(), *(&Args.back() - 1)); 4013 }; 4014 4015 // Insert a stack save if we're going to need any inalloca args. 4016 if (hasInAllocaArgs(CGM, ExplicitCC, ArgTypes)) { 4017 assert(getTarget().getTriple().getArch() == llvm::Triple::x86 && 4018 "inalloca only supported on x86"); 4019 Args.allocateArgumentMemory(*this); 4020 } 4021 4022 // Evaluate each argument in the appropriate order. 4023 size_t CallArgsStart = Args.size(); 4024 for (unsigned I = 0, E = ArgTypes.size(); I != E; ++I) { 4025 unsigned Idx = LeftToRight ? I : E - I - 1; 4026 CallExpr::const_arg_iterator Arg = ArgRange.begin() + Idx; 4027 unsigned InitialArgSize = Args.size(); 4028 // If *Arg is an ObjCIndirectCopyRestoreExpr, check that either the types of 4029 // the argument and parameter match or the objc method is parameterized. 4030 assert((!isa<ObjCIndirectCopyRestoreExpr>(*Arg) || 4031 getContext().hasSameUnqualifiedType((*Arg)->getType(), 4032 ArgTypes[Idx]) || 4033 (isa<ObjCMethodDecl>(AC.getDecl()) && 4034 isObjCMethodWithTypeParams(cast<ObjCMethodDecl>(AC.getDecl())))) && 4035 "Argument and parameter types don't match"); 4036 EmitCallArg(Args, *Arg, ArgTypes[Idx]); 4037 // In particular, we depend on it being the last arg in Args, and the 4038 // objectsize bits depend on there only being one arg if !LeftToRight. 4039 assert(InitialArgSize + 1 == Args.size() && 4040 "The code below depends on only adding one arg per EmitCallArg"); 4041 (void)InitialArgSize; 4042 // Since pointer argument are never emitted as LValue, it is safe to emit 4043 // non-null argument check for r-value only. 4044 if (!Args.back().hasLValue()) { 4045 RValue RVArg = Args.back().getKnownRValue(); 4046 EmitNonNullArgCheck(RVArg, ArgTypes[Idx], (*Arg)->getExprLoc(), AC, 4047 ParamsToSkip + Idx); 4048 // @llvm.objectsize should never have side-effects and shouldn't need 4049 // destruction/cleanups, so we can safely "emit" it after its arg, 4050 // regardless of right-to-leftness 4051 MaybeEmitImplicitObjectSize(Idx, *Arg, RVArg); 4052 } 4053 } 4054 4055 if (!LeftToRight) { 4056 // Un-reverse the arguments we just evaluated so they match up with the LLVM 4057 // IR function. 4058 std::reverse(Args.begin() + CallArgsStart, Args.end()); 4059 } 4060 } 4061 4062 namespace { 4063 4064 struct DestroyUnpassedArg final : EHScopeStack::Cleanup { 4065 DestroyUnpassedArg(Address Addr, QualType Ty) 4066 : Addr(Addr), Ty(Ty) {} 4067 4068 Address Addr; 4069 QualType Ty; 4070 4071 void Emit(CodeGenFunction &CGF, Flags flags) override { 4072 QualType::DestructionKind DtorKind = Ty.isDestructedType(); 4073 if (DtorKind == QualType::DK_cxx_destructor) { 4074 const CXXDestructorDecl *Dtor = Ty->getAsCXXRecordDecl()->getDestructor(); 4075 assert(!Dtor->isTrivial()); 4076 CGF.EmitCXXDestructorCall(Dtor, Dtor_Complete, /*for vbase*/ false, 4077 /*Delegating=*/false, Addr, Ty); 4078 } else { 4079 CGF.callCStructDestructor(CGF.MakeAddrLValue(Addr, Ty)); 4080 } 4081 } 4082 }; 4083 4084 struct DisableDebugLocationUpdates { 4085 CodeGenFunction &CGF; 4086 bool disabledDebugInfo; 4087 DisableDebugLocationUpdates(CodeGenFunction &CGF, const Expr *E) : CGF(CGF) { 4088 if ((disabledDebugInfo = isa<CXXDefaultArgExpr>(E) && CGF.getDebugInfo())) 4089 CGF.disableDebugInfo(); 4090 } 4091 ~DisableDebugLocationUpdates() { 4092 if (disabledDebugInfo) 4093 CGF.enableDebugInfo(); 4094 } 4095 }; 4096 4097 } // end anonymous namespace 4098 4099 RValue CallArg::getRValue(CodeGenFunction &CGF) const { 4100 if (!HasLV) 4101 return RV; 4102 LValue Copy = CGF.MakeAddrLValue(CGF.CreateMemTemp(Ty), Ty); 4103 CGF.EmitAggregateCopy(Copy, LV, Ty, AggValueSlot::DoesNotOverlap, 4104 LV.isVolatile()); 4105 IsUsed = true; 4106 return RValue::getAggregate(Copy.getAddress(CGF)); 4107 } 4108 4109 void CallArg::copyInto(CodeGenFunction &CGF, Address Addr) const { 4110 LValue Dst = CGF.MakeAddrLValue(Addr, Ty); 4111 if (!HasLV && RV.isScalar()) 4112 CGF.EmitStoreOfScalar(RV.getScalarVal(), Dst, /*isInit=*/true); 4113 else if (!HasLV && RV.isComplex()) 4114 CGF.EmitStoreOfComplex(RV.getComplexVal(), Dst, /*init=*/true); 4115 else { 4116 auto Addr = HasLV ? LV.getAddress(CGF) : RV.getAggregateAddress(); 4117 LValue SrcLV = CGF.MakeAddrLValue(Addr, Ty); 4118 // We assume that call args are never copied into subobjects. 4119 CGF.EmitAggregateCopy(Dst, SrcLV, Ty, AggValueSlot::DoesNotOverlap, 4120 HasLV ? LV.isVolatileQualified() 4121 : RV.isVolatileQualified()); 4122 } 4123 IsUsed = true; 4124 } 4125 4126 void CodeGenFunction::EmitCallArg(CallArgList &args, const Expr *E, 4127 QualType type) { 4128 DisableDebugLocationUpdates Dis(*this, E); 4129 if (const ObjCIndirectCopyRestoreExpr *CRE 4130 = dyn_cast<ObjCIndirectCopyRestoreExpr>(E)) { 4131 assert(getLangOpts().ObjCAutoRefCount); 4132 return emitWritebackArg(*this, args, CRE); 4133 } 4134 4135 assert(type->isReferenceType() == E->isGLValue() && 4136 "reference binding to unmaterialized r-value!"); 4137 4138 if (E->isGLValue()) { 4139 assert(E->getObjectKind() == OK_Ordinary); 4140 return args.add(EmitReferenceBindingToExpr(E), type); 4141 } 4142 4143 bool HasAggregateEvalKind = hasAggregateEvaluationKind(type); 4144 4145 // In the Microsoft C++ ABI, aggregate arguments are destructed by the callee. 4146 // However, we still have to push an EH-only cleanup in case we unwind before 4147 // we make it to the call. 4148 if (HasAggregateEvalKind && 4149 type->castAs<RecordType>()->getDecl()->isParamDestroyedInCallee()) { 4150 // If we're using inalloca, use the argument memory. Otherwise, use a 4151 // temporary. 4152 AggValueSlot Slot; 4153 if (args.isUsingInAlloca()) 4154 Slot = createPlaceholderSlot(*this, type); 4155 else 4156 Slot = CreateAggTemp(type, "agg.tmp"); 4157 4158 bool DestroyedInCallee = true, NeedsEHCleanup = true; 4159 if (const auto *RD = type->getAsCXXRecordDecl()) 4160 DestroyedInCallee = RD->hasNonTrivialDestructor(); 4161 else 4162 NeedsEHCleanup = needsEHCleanup(type.isDestructedType()); 4163 4164 if (DestroyedInCallee) 4165 Slot.setExternallyDestructed(); 4166 4167 EmitAggExpr(E, Slot); 4168 RValue RV = Slot.asRValue(); 4169 args.add(RV, type); 4170 4171 if (DestroyedInCallee && NeedsEHCleanup) { 4172 // Create a no-op GEP between the placeholder and the cleanup so we can 4173 // RAUW it successfully. It also serves as a marker of the first 4174 // instruction where the cleanup is active. 4175 pushFullExprCleanup<DestroyUnpassedArg>(EHCleanup, Slot.getAddress(), 4176 type); 4177 // This unreachable is a temporary marker which will be removed later. 4178 llvm::Instruction *IsActive = Builder.CreateUnreachable(); 4179 args.addArgCleanupDeactivation(EHStack.getInnermostEHScope(), IsActive); 4180 } 4181 return; 4182 } 4183 4184 if (HasAggregateEvalKind && isa<ImplicitCastExpr>(E) && 4185 cast<CastExpr>(E)->getCastKind() == CK_LValueToRValue) { 4186 LValue L = EmitLValue(cast<CastExpr>(E)->getSubExpr()); 4187 assert(L.isSimple()); 4188 args.addUncopiedAggregate(L, type); 4189 return; 4190 } 4191 4192 args.add(EmitAnyExprToTemp(E), type); 4193 } 4194 4195 QualType CodeGenFunction::getVarArgType(const Expr *Arg) { 4196 // System headers on Windows define NULL to 0 instead of 0LL on Win64. MSVC 4197 // implicitly widens null pointer constants that are arguments to varargs 4198 // functions to pointer-sized ints. 4199 if (!getTarget().getTriple().isOSWindows()) 4200 return Arg->getType(); 4201 4202 if (Arg->getType()->isIntegerType() && 4203 getContext().getTypeSize(Arg->getType()) < 4204 getContext().getTargetInfo().getPointerWidth(0) && 4205 Arg->isNullPointerConstant(getContext(), 4206 Expr::NPC_ValueDependentIsNotNull)) { 4207 return getContext().getIntPtrType(); 4208 } 4209 4210 return Arg->getType(); 4211 } 4212 4213 // In ObjC ARC mode with no ObjC ARC exception safety, tell the ARC 4214 // optimizer it can aggressively ignore unwind edges. 4215 void 4216 CodeGenFunction::AddObjCARCExceptionMetadata(llvm::Instruction *Inst) { 4217 if (CGM.getCodeGenOpts().OptimizationLevel != 0 && 4218 !CGM.getCodeGenOpts().ObjCAutoRefCountExceptions) 4219 Inst->setMetadata("clang.arc.no_objc_arc_exceptions", 4220 CGM.getNoObjCARCExceptionsMetadata()); 4221 } 4222 4223 /// Emits a call to the given no-arguments nounwind runtime function. 4224 llvm::CallInst * 4225 CodeGenFunction::EmitNounwindRuntimeCall(llvm::FunctionCallee callee, 4226 const llvm::Twine &name) { 4227 return EmitNounwindRuntimeCall(callee, None, name); 4228 } 4229 4230 /// Emits a call to the given nounwind runtime function. 4231 llvm::CallInst * 4232 CodeGenFunction::EmitNounwindRuntimeCall(llvm::FunctionCallee callee, 4233 ArrayRef<llvm::Value *> args, 4234 const llvm::Twine &name) { 4235 llvm::CallInst *call = EmitRuntimeCall(callee, args, name); 4236 call->setDoesNotThrow(); 4237 return call; 4238 } 4239 4240 /// Emits a simple call (never an invoke) to the given no-arguments 4241 /// runtime function. 4242 llvm::CallInst *CodeGenFunction::EmitRuntimeCall(llvm::FunctionCallee callee, 4243 const llvm::Twine &name) { 4244 return EmitRuntimeCall(callee, None, name); 4245 } 4246 4247 // Calls which may throw must have operand bundles indicating which funclet 4248 // they are nested within. 4249 SmallVector<llvm::OperandBundleDef, 1> 4250 CodeGenFunction::getBundlesForFunclet(llvm::Value *Callee) { 4251 SmallVector<llvm::OperandBundleDef, 1> BundleList; 4252 // There is no need for a funclet operand bundle if we aren't inside a 4253 // funclet. 4254 if (!CurrentFuncletPad) 4255 return BundleList; 4256 4257 // Skip intrinsics which cannot throw. 4258 auto *CalleeFn = dyn_cast<llvm::Function>(Callee->stripPointerCasts()); 4259 if (CalleeFn && CalleeFn->isIntrinsic() && CalleeFn->doesNotThrow()) 4260 return BundleList; 4261 4262 BundleList.emplace_back("funclet", CurrentFuncletPad); 4263 return BundleList; 4264 } 4265 4266 /// Emits a simple call (never an invoke) to the given runtime function. 4267 llvm::CallInst *CodeGenFunction::EmitRuntimeCall(llvm::FunctionCallee callee, 4268 ArrayRef<llvm::Value *> args, 4269 const llvm::Twine &name) { 4270 llvm::CallInst *call = Builder.CreateCall( 4271 callee, args, getBundlesForFunclet(callee.getCallee()), name); 4272 call->setCallingConv(getRuntimeCC()); 4273 return call; 4274 } 4275 4276 /// Emits a call or invoke to the given noreturn runtime function. 4277 void CodeGenFunction::EmitNoreturnRuntimeCallOrInvoke( 4278 llvm::FunctionCallee callee, ArrayRef<llvm::Value *> args) { 4279 SmallVector<llvm::OperandBundleDef, 1> BundleList = 4280 getBundlesForFunclet(callee.getCallee()); 4281 4282 if (getInvokeDest()) { 4283 llvm::InvokeInst *invoke = 4284 Builder.CreateInvoke(callee, 4285 getUnreachableBlock(), 4286 getInvokeDest(), 4287 args, 4288 BundleList); 4289 invoke->setDoesNotReturn(); 4290 invoke->setCallingConv(getRuntimeCC()); 4291 } else { 4292 llvm::CallInst *call = Builder.CreateCall(callee, args, BundleList); 4293 call->setDoesNotReturn(); 4294 call->setCallingConv(getRuntimeCC()); 4295 Builder.CreateUnreachable(); 4296 } 4297 } 4298 4299 /// Emits a call or invoke instruction to the given nullary runtime function. 4300 llvm::CallBase * 4301 CodeGenFunction::EmitRuntimeCallOrInvoke(llvm::FunctionCallee callee, 4302 const Twine &name) { 4303 return EmitRuntimeCallOrInvoke(callee, None, name); 4304 } 4305 4306 /// Emits a call or invoke instruction to the given runtime function. 4307 llvm::CallBase * 4308 CodeGenFunction::EmitRuntimeCallOrInvoke(llvm::FunctionCallee callee, 4309 ArrayRef<llvm::Value *> args, 4310 const Twine &name) { 4311 llvm::CallBase *call = EmitCallOrInvoke(callee, args, name); 4312 call->setCallingConv(getRuntimeCC()); 4313 return call; 4314 } 4315 4316 /// Emits a call or invoke instruction to the given function, depending 4317 /// on the current state of the EH stack. 4318 llvm::CallBase *CodeGenFunction::EmitCallOrInvoke(llvm::FunctionCallee Callee, 4319 ArrayRef<llvm::Value *> Args, 4320 const Twine &Name) { 4321 llvm::BasicBlock *InvokeDest = getInvokeDest(); 4322 SmallVector<llvm::OperandBundleDef, 1> BundleList = 4323 getBundlesForFunclet(Callee.getCallee()); 4324 4325 llvm::CallBase *Inst; 4326 if (!InvokeDest) 4327 Inst = Builder.CreateCall(Callee, Args, BundleList, Name); 4328 else { 4329 llvm::BasicBlock *ContBB = createBasicBlock("invoke.cont"); 4330 Inst = Builder.CreateInvoke(Callee, ContBB, InvokeDest, Args, BundleList, 4331 Name); 4332 EmitBlock(ContBB); 4333 } 4334 4335 // In ObjC ARC mode with no ObjC ARC exception safety, tell the ARC 4336 // optimizer it can aggressively ignore unwind edges. 4337 if (CGM.getLangOpts().ObjCAutoRefCount) 4338 AddObjCARCExceptionMetadata(Inst); 4339 4340 return Inst; 4341 } 4342 4343 void CodeGenFunction::deferPlaceholderReplacement(llvm::Instruction *Old, 4344 llvm::Value *New) { 4345 DeferredReplacements.push_back(std::make_pair(Old, New)); 4346 } 4347 4348 namespace { 4349 4350 /// Specify given \p NewAlign as the alignment of return value attribute. If 4351 /// such attribute already exists, re-set it to the maximal one of two options. 4352 LLVM_NODISCARD llvm::AttributeList 4353 maybeRaiseRetAlignmentAttribute(llvm::LLVMContext &Ctx, 4354 const llvm::AttributeList &Attrs, 4355 llvm::Align NewAlign) { 4356 llvm::Align CurAlign = Attrs.getRetAlignment().valueOrOne(); 4357 if (CurAlign >= NewAlign) 4358 return Attrs; 4359 llvm::Attribute AlignAttr = llvm::Attribute::getWithAlignment(Ctx, NewAlign); 4360 return Attrs 4361 .removeAttribute(Ctx, llvm::AttributeList::ReturnIndex, 4362 llvm::Attribute::AttrKind::Alignment) 4363 .addAttribute(Ctx, llvm::AttributeList::ReturnIndex, AlignAttr); 4364 } 4365 4366 template <typename AlignedAttrTy> class AbstractAssumeAlignedAttrEmitter { 4367 protected: 4368 CodeGenFunction &CGF; 4369 4370 /// We do nothing if this is, or becomes, nullptr. 4371 const AlignedAttrTy *AA = nullptr; 4372 4373 llvm::Value *Alignment = nullptr; // May or may not be a constant. 4374 llvm::ConstantInt *OffsetCI = nullptr; // Constant, hopefully zero. 4375 4376 AbstractAssumeAlignedAttrEmitter(CodeGenFunction &CGF_, const Decl *FuncDecl) 4377 : CGF(CGF_) { 4378 if (!FuncDecl) 4379 return; 4380 AA = FuncDecl->getAttr<AlignedAttrTy>(); 4381 } 4382 4383 public: 4384 /// If we can, materialize the alignment as an attribute on return value. 4385 LLVM_NODISCARD llvm::AttributeList 4386 TryEmitAsCallSiteAttribute(const llvm::AttributeList &Attrs) { 4387 if (!AA || OffsetCI || CGF.SanOpts.has(SanitizerKind::Alignment)) 4388 return Attrs; 4389 const auto *AlignmentCI = dyn_cast<llvm::ConstantInt>(Alignment); 4390 if (!AlignmentCI) 4391 return Attrs; 4392 // We may legitimately have non-power-of-2 alignment here. 4393 // If so, this is UB land, emit it via `@llvm.assume` instead. 4394 if (!AlignmentCI->getValue().isPowerOf2()) 4395 return Attrs; 4396 llvm::AttributeList NewAttrs = maybeRaiseRetAlignmentAttribute( 4397 CGF.getLLVMContext(), Attrs, 4398 llvm::Align( 4399 AlignmentCI->getLimitedValue(llvm::Value::MaximumAlignment))); 4400 AA = nullptr; // We're done. Disallow doing anything else. 4401 return NewAttrs; 4402 } 4403 4404 /// Emit alignment assumption. 4405 /// This is a general fallback that we take if either there is an offset, 4406 /// or the alignment is variable or we are sanitizing for alignment. 4407 void EmitAsAnAssumption(SourceLocation Loc, QualType RetTy, RValue &Ret) { 4408 if (!AA) 4409 return; 4410 CGF.emitAlignmentAssumption(Ret.getScalarVal(), RetTy, Loc, 4411 AA->getLocation(), Alignment, OffsetCI); 4412 AA = nullptr; // We're done. Disallow doing anything else. 4413 } 4414 }; 4415 4416 /// Helper data structure to emit `AssumeAlignedAttr`. 4417 class AssumeAlignedAttrEmitter final 4418 : public AbstractAssumeAlignedAttrEmitter<AssumeAlignedAttr> { 4419 public: 4420 AssumeAlignedAttrEmitter(CodeGenFunction &CGF_, const Decl *FuncDecl) 4421 : AbstractAssumeAlignedAttrEmitter(CGF_, FuncDecl) { 4422 if (!AA) 4423 return; 4424 // It is guaranteed that the alignment/offset are constants. 4425 Alignment = cast<llvm::ConstantInt>(CGF.EmitScalarExpr(AA->getAlignment())); 4426 if (Expr *Offset = AA->getOffset()) { 4427 OffsetCI = cast<llvm::ConstantInt>(CGF.EmitScalarExpr(Offset)); 4428 if (OffsetCI->isNullValue()) // Canonicalize zero offset to no offset. 4429 OffsetCI = nullptr; 4430 } 4431 } 4432 }; 4433 4434 /// Helper data structure to emit `AllocAlignAttr`. 4435 class AllocAlignAttrEmitter final 4436 : public AbstractAssumeAlignedAttrEmitter<AllocAlignAttr> { 4437 public: 4438 AllocAlignAttrEmitter(CodeGenFunction &CGF_, const Decl *FuncDecl, 4439 const CallArgList &CallArgs) 4440 : AbstractAssumeAlignedAttrEmitter(CGF_, FuncDecl) { 4441 if (!AA) 4442 return; 4443 // Alignment may or may not be a constant, and that is okay. 4444 Alignment = CallArgs[AA->getParamIndex().getLLVMIndex()] 4445 .getRValue(CGF) 4446 .getScalarVal(); 4447 } 4448 }; 4449 4450 } // namespace 4451 4452 RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo, 4453 const CGCallee &Callee, 4454 ReturnValueSlot ReturnValue, 4455 const CallArgList &CallArgs, 4456 llvm::CallBase **callOrInvoke, 4457 SourceLocation Loc) { 4458 // FIXME: We no longer need the types from CallArgs; lift up and simplify. 4459 4460 assert(Callee.isOrdinary() || Callee.isVirtual()); 4461 4462 // Handle struct-return functions by passing a pointer to the 4463 // location that we would like to return into. 4464 QualType RetTy = CallInfo.getReturnType(); 4465 const ABIArgInfo &RetAI = CallInfo.getReturnInfo(); 4466 4467 llvm::FunctionType *IRFuncTy = getTypes().GetFunctionType(CallInfo); 4468 4469 const Decl *TargetDecl = Callee.getAbstractInfo().getCalleeDecl().getDecl(); 4470 if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(TargetDecl)) { 4471 // We can only guarantee that a function is called from the correct 4472 // context/function based on the appropriate target attributes, 4473 // so only check in the case where we have both always_inline and target 4474 // since otherwise we could be making a conditional call after a check for 4475 // the proper cpu features (and it won't cause code generation issues due to 4476 // function based code generation). 4477 if (TargetDecl->hasAttr<AlwaysInlineAttr>() && 4478 TargetDecl->hasAttr<TargetAttr>()) 4479 checkTargetFeatures(Loc, FD); 4480 4481 // Some architectures (such as x86-64) have the ABI changed based on 4482 // attribute-target/features. Give them a chance to diagnose. 4483 CGM.getTargetCodeGenInfo().checkFunctionCallABI( 4484 CGM, Loc, dyn_cast_or_null<FunctionDecl>(CurCodeDecl), FD, CallArgs); 4485 } 4486 4487 #ifndef NDEBUG 4488 if (!(CallInfo.isVariadic() && CallInfo.getArgStruct())) { 4489 // For an inalloca varargs function, we don't expect CallInfo to match the 4490 // function pointer's type, because the inalloca struct a will have extra 4491 // fields in it for the varargs parameters. Code later in this function 4492 // bitcasts the function pointer to the type derived from CallInfo. 4493 // 4494 // In other cases, we assert that the types match up (until pointers stop 4495 // having pointee types). 4496 llvm::Type *TypeFromVal; 4497 if (Callee.isVirtual()) 4498 TypeFromVal = Callee.getVirtualFunctionType(); 4499 else 4500 TypeFromVal = 4501 Callee.getFunctionPointer()->getType()->getPointerElementType(); 4502 assert(IRFuncTy == TypeFromVal); 4503 } 4504 #endif 4505 4506 // 1. Set up the arguments. 4507 4508 // If we're using inalloca, insert the allocation after the stack save. 4509 // FIXME: Do this earlier rather than hacking it in here! 4510 Address ArgMemory = Address::invalid(); 4511 if (llvm::StructType *ArgStruct = CallInfo.getArgStruct()) { 4512 const llvm::DataLayout &DL = CGM.getDataLayout(); 4513 llvm::Instruction *IP = CallArgs.getStackBase(); 4514 llvm::AllocaInst *AI; 4515 if (IP) { 4516 IP = IP->getNextNode(); 4517 AI = new llvm::AllocaInst(ArgStruct, DL.getAllocaAddrSpace(), 4518 "argmem", IP); 4519 } else { 4520 AI = CreateTempAlloca(ArgStruct, "argmem"); 4521 } 4522 auto Align = CallInfo.getArgStructAlignment(); 4523 AI->setAlignment(Align.getAsAlign()); 4524 AI->setUsedWithInAlloca(true); 4525 assert(AI->isUsedWithInAlloca() && !AI->isStaticAlloca()); 4526 ArgMemory = Address(AI, Align); 4527 } 4528 4529 ClangToLLVMArgMapping IRFunctionArgs(CGM.getContext(), CallInfo); 4530 SmallVector<llvm::Value *, 16> IRCallArgs(IRFunctionArgs.totalIRArgs()); 4531 4532 // If the call returns a temporary with struct return, create a temporary 4533 // alloca to hold the result, unless one is given to us. 4534 Address SRetPtr = Address::invalid(); 4535 Address SRetAlloca = Address::invalid(); 4536 llvm::Value *UnusedReturnSizePtr = nullptr; 4537 if (RetAI.isIndirect() || RetAI.isInAlloca() || RetAI.isCoerceAndExpand()) { 4538 if (!ReturnValue.isNull()) { 4539 SRetPtr = ReturnValue.getValue(); 4540 } else { 4541 SRetPtr = CreateMemTemp(RetTy, "tmp", &SRetAlloca); 4542 if (HaveInsertPoint() && ReturnValue.isUnused()) { 4543 uint64_t size = 4544 CGM.getDataLayout().getTypeAllocSize(ConvertTypeForMem(RetTy)); 4545 UnusedReturnSizePtr = EmitLifetimeStart(size, SRetAlloca.getPointer()); 4546 } 4547 } 4548 if (IRFunctionArgs.hasSRetArg()) { 4549 IRCallArgs[IRFunctionArgs.getSRetArgNo()] = SRetPtr.getPointer(); 4550 } else if (RetAI.isInAlloca()) { 4551 Address Addr = 4552 Builder.CreateStructGEP(ArgMemory, RetAI.getInAllocaFieldIndex()); 4553 Builder.CreateStore(SRetPtr.getPointer(), Addr); 4554 } 4555 } 4556 4557 Address swiftErrorTemp = Address::invalid(); 4558 Address swiftErrorArg = Address::invalid(); 4559 4560 // When passing arguments using temporary allocas, we need to add the 4561 // appropriate lifetime markers. This vector keeps track of all the lifetime 4562 // markers that need to be ended right after the call. 4563 SmallVector<CallLifetimeEnd, 2> CallLifetimeEndAfterCall; 4564 4565 // Translate all of the arguments as necessary to match the IR lowering. 4566 assert(CallInfo.arg_size() == CallArgs.size() && 4567 "Mismatch between function signature & arguments."); 4568 unsigned ArgNo = 0; 4569 CGFunctionInfo::const_arg_iterator info_it = CallInfo.arg_begin(); 4570 for (CallArgList::const_iterator I = CallArgs.begin(), E = CallArgs.end(); 4571 I != E; ++I, ++info_it, ++ArgNo) { 4572 const ABIArgInfo &ArgInfo = info_it->info; 4573 4574 // Insert a padding argument to ensure proper alignment. 4575 if (IRFunctionArgs.hasPaddingArg(ArgNo)) 4576 IRCallArgs[IRFunctionArgs.getPaddingArgNo(ArgNo)] = 4577 llvm::UndefValue::get(ArgInfo.getPaddingType()); 4578 4579 unsigned FirstIRArg, NumIRArgs; 4580 std::tie(FirstIRArg, NumIRArgs) = IRFunctionArgs.getIRArgs(ArgNo); 4581 4582 switch (ArgInfo.getKind()) { 4583 case ABIArgInfo::InAlloca: { 4584 assert(NumIRArgs == 0); 4585 assert(getTarget().getTriple().getArch() == llvm::Triple::x86); 4586 if (I->isAggregate()) { 4587 Address Addr = I->hasLValue() 4588 ? I->getKnownLValue().getAddress(*this) 4589 : I->getKnownRValue().getAggregateAddress(); 4590 llvm::Instruction *Placeholder = 4591 cast<llvm::Instruction>(Addr.getPointer()); 4592 4593 if (!ArgInfo.getInAllocaIndirect()) { 4594 // Replace the placeholder with the appropriate argument slot GEP. 4595 CGBuilderTy::InsertPoint IP = Builder.saveIP(); 4596 Builder.SetInsertPoint(Placeholder); 4597 Addr = Builder.CreateStructGEP(ArgMemory, 4598 ArgInfo.getInAllocaFieldIndex()); 4599 Builder.restoreIP(IP); 4600 } else { 4601 // For indirect things such as overaligned structs, replace the 4602 // placeholder with a regular aggregate temporary alloca. Store the 4603 // address of this alloca into the struct. 4604 Addr = CreateMemTemp(info_it->type, "inalloca.indirect.tmp"); 4605 Address ArgSlot = Builder.CreateStructGEP( 4606 ArgMemory, ArgInfo.getInAllocaFieldIndex()); 4607 Builder.CreateStore(Addr.getPointer(), ArgSlot); 4608 } 4609 deferPlaceholderReplacement(Placeholder, Addr.getPointer()); 4610 } else if (ArgInfo.getInAllocaIndirect()) { 4611 // Make a temporary alloca and store the address of it into the argument 4612 // struct. 4613 Address Addr = CreateMemTempWithoutCast( 4614 I->Ty, getContext().getTypeAlignInChars(I->Ty), 4615 "indirect-arg-temp"); 4616 I->copyInto(*this, Addr); 4617 Address ArgSlot = 4618 Builder.CreateStructGEP(ArgMemory, ArgInfo.getInAllocaFieldIndex()); 4619 Builder.CreateStore(Addr.getPointer(), ArgSlot); 4620 } else { 4621 // Store the RValue into the argument struct. 4622 Address Addr = 4623 Builder.CreateStructGEP(ArgMemory, ArgInfo.getInAllocaFieldIndex()); 4624 unsigned AS = Addr.getType()->getPointerAddressSpace(); 4625 llvm::Type *MemType = ConvertTypeForMem(I->Ty)->getPointerTo(AS); 4626 // There are some cases where a trivial bitcast is not avoidable. The 4627 // definition of a type later in a translation unit may change it's type 4628 // from {}* to (%struct.foo*)*. 4629 if (Addr.getType() != MemType) 4630 Addr = Builder.CreateBitCast(Addr, MemType); 4631 I->copyInto(*this, Addr); 4632 } 4633 break; 4634 } 4635 4636 case ABIArgInfo::Indirect: 4637 case ABIArgInfo::IndirectAliased: { 4638 assert(NumIRArgs == 1); 4639 if (!I->isAggregate()) { 4640 // Make a temporary alloca to pass the argument. 4641 Address Addr = CreateMemTempWithoutCast( 4642 I->Ty, ArgInfo.getIndirectAlign(), "indirect-arg-temp"); 4643 IRCallArgs[FirstIRArg] = Addr.getPointer(); 4644 4645 I->copyInto(*this, Addr); 4646 } else { 4647 // We want to avoid creating an unnecessary temporary+copy here; 4648 // however, we need one in three cases: 4649 // 1. If the argument is not byval, and we are required to copy the 4650 // source. (This case doesn't occur on any common architecture.) 4651 // 2. If the argument is byval, RV is not sufficiently aligned, and 4652 // we cannot force it to be sufficiently aligned. 4653 // 3. If the argument is byval, but RV is not located in default 4654 // or alloca address space. 4655 Address Addr = I->hasLValue() 4656 ? I->getKnownLValue().getAddress(*this) 4657 : I->getKnownRValue().getAggregateAddress(); 4658 llvm::Value *V = Addr.getPointer(); 4659 CharUnits Align = ArgInfo.getIndirectAlign(); 4660 const llvm::DataLayout *TD = &CGM.getDataLayout(); 4661 4662 assert((FirstIRArg >= IRFuncTy->getNumParams() || 4663 IRFuncTy->getParamType(FirstIRArg)->getPointerAddressSpace() == 4664 TD->getAllocaAddrSpace()) && 4665 "indirect argument must be in alloca address space"); 4666 4667 bool NeedCopy = false; 4668 4669 if (Addr.getAlignment() < Align && 4670 llvm::getOrEnforceKnownAlignment(V, Align.getAsAlign(), *TD) < 4671 Align.getAsAlign()) { 4672 NeedCopy = true; 4673 } else if (I->hasLValue()) { 4674 auto LV = I->getKnownLValue(); 4675 auto AS = LV.getAddressSpace(); 4676 4677 if (!ArgInfo.getIndirectByVal() || 4678 (LV.getAlignment() < getContext().getTypeAlignInChars(I->Ty))) { 4679 NeedCopy = true; 4680 } 4681 if (!getLangOpts().OpenCL) { 4682 if ((ArgInfo.getIndirectByVal() && 4683 (AS != LangAS::Default && 4684 AS != CGM.getASTAllocaAddressSpace()))) { 4685 NeedCopy = true; 4686 } 4687 } 4688 // For OpenCL even if RV is located in default or alloca address space 4689 // we don't want to perform address space cast for it. 4690 else if ((ArgInfo.getIndirectByVal() && 4691 Addr.getType()->getAddressSpace() != IRFuncTy-> 4692 getParamType(FirstIRArg)->getPointerAddressSpace())) { 4693 NeedCopy = true; 4694 } 4695 } 4696 4697 if (NeedCopy) { 4698 // Create an aligned temporary, and copy to it. 4699 Address AI = CreateMemTempWithoutCast( 4700 I->Ty, ArgInfo.getIndirectAlign(), "byval-temp"); 4701 IRCallArgs[FirstIRArg] = AI.getPointer(); 4702 4703 // Emit lifetime markers for the temporary alloca. 4704 uint64_t ByvalTempElementSize = 4705 CGM.getDataLayout().getTypeAllocSize(AI.getElementType()); 4706 llvm::Value *LifetimeSize = 4707 EmitLifetimeStart(ByvalTempElementSize, AI.getPointer()); 4708 4709 // Add cleanup code to emit the end lifetime marker after the call. 4710 if (LifetimeSize) // In case we disabled lifetime markers. 4711 CallLifetimeEndAfterCall.emplace_back(AI, LifetimeSize); 4712 4713 // Generate the copy. 4714 I->copyInto(*this, AI); 4715 } else { 4716 // Skip the extra memcpy call. 4717 auto *T = V->getType()->getPointerElementType()->getPointerTo( 4718 CGM.getDataLayout().getAllocaAddrSpace()); 4719 IRCallArgs[FirstIRArg] = getTargetHooks().performAddrSpaceCast( 4720 *this, V, LangAS::Default, CGM.getASTAllocaAddressSpace(), T, 4721 true); 4722 } 4723 } 4724 break; 4725 } 4726 4727 case ABIArgInfo::Ignore: 4728 assert(NumIRArgs == 0); 4729 break; 4730 4731 case ABIArgInfo::Extend: 4732 case ABIArgInfo::Direct: { 4733 if (!isa<llvm::StructType>(ArgInfo.getCoerceToType()) && 4734 ArgInfo.getCoerceToType() == ConvertType(info_it->type) && 4735 ArgInfo.getDirectOffset() == 0) { 4736 assert(NumIRArgs == 1); 4737 llvm::Value *V; 4738 if (!I->isAggregate()) 4739 V = I->getKnownRValue().getScalarVal(); 4740 else 4741 V = Builder.CreateLoad( 4742 I->hasLValue() ? I->getKnownLValue().getAddress(*this) 4743 : I->getKnownRValue().getAggregateAddress()); 4744 4745 // Implement swifterror by copying into a new swifterror argument. 4746 // We'll write back in the normal path out of the call. 4747 if (CallInfo.getExtParameterInfo(ArgNo).getABI() 4748 == ParameterABI::SwiftErrorResult) { 4749 assert(!swiftErrorTemp.isValid() && "multiple swifterror args"); 4750 4751 QualType pointeeTy = I->Ty->getPointeeType(); 4752 swiftErrorArg = 4753 Address(V, getContext().getTypeAlignInChars(pointeeTy)); 4754 4755 swiftErrorTemp = 4756 CreateMemTemp(pointeeTy, getPointerAlign(), "swifterror.temp"); 4757 V = swiftErrorTemp.getPointer(); 4758 cast<llvm::AllocaInst>(V)->setSwiftError(true); 4759 4760 llvm::Value *errorValue = Builder.CreateLoad(swiftErrorArg); 4761 Builder.CreateStore(errorValue, swiftErrorTemp); 4762 } 4763 4764 // We might have to widen integers, but we should never truncate. 4765 if (ArgInfo.getCoerceToType() != V->getType() && 4766 V->getType()->isIntegerTy()) 4767 V = Builder.CreateZExt(V, ArgInfo.getCoerceToType()); 4768 4769 // If the argument doesn't match, perform a bitcast to coerce it. This 4770 // can happen due to trivial type mismatches. 4771 if (FirstIRArg < IRFuncTy->getNumParams() && 4772 V->getType() != IRFuncTy->getParamType(FirstIRArg)) 4773 V = Builder.CreateBitCast(V, IRFuncTy->getParamType(FirstIRArg)); 4774 4775 IRCallArgs[FirstIRArg] = V; 4776 break; 4777 } 4778 4779 // FIXME: Avoid the conversion through memory if possible. 4780 Address Src = Address::invalid(); 4781 if (!I->isAggregate()) { 4782 Src = CreateMemTemp(I->Ty, "coerce"); 4783 I->copyInto(*this, Src); 4784 } else { 4785 Src = I->hasLValue() ? I->getKnownLValue().getAddress(*this) 4786 : I->getKnownRValue().getAggregateAddress(); 4787 } 4788 4789 // If the value is offset in memory, apply the offset now. 4790 Src = emitAddressAtOffset(*this, Src, ArgInfo); 4791 4792 // Fast-isel and the optimizer generally like scalar values better than 4793 // FCAs, so we flatten them if this is safe to do for this argument. 4794 llvm::StructType *STy = 4795 dyn_cast<llvm::StructType>(ArgInfo.getCoerceToType()); 4796 if (STy && ArgInfo.isDirect() && ArgInfo.getCanBeFlattened()) { 4797 llvm::Type *SrcTy = Src.getElementType(); 4798 uint64_t SrcSize = CGM.getDataLayout().getTypeAllocSize(SrcTy); 4799 uint64_t DstSize = CGM.getDataLayout().getTypeAllocSize(STy); 4800 4801 // If the source type is smaller than the destination type of the 4802 // coerce-to logic, copy the source value into a temp alloca the size 4803 // of the destination type to allow loading all of it. The bits past 4804 // the source value are left undef. 4805 if (SrcSize < DstSize) { 4806 Address TempAlloca 4807 = CreateTempAlloca(STy, Src.getAlignment(), 4808 Src.getName() + ".coerce"); 4809 Builder.CreateMemCpy(TempAlloca, Src, SrcSize); 4810 Src = TempAlloca; 4811 } else { 4812 Src = Builder.CreateBitCast(Src, 4813 STy->getPointerTo(Src.getAddressSpace())); 4814 } 4815 4816 assert(NumIRArgs == STy->getNumElements()); 4817 for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) { 4818 Address EltPtr = Builder.CreateStructGEP(Src, i); 4819 llvm::Value *LI = Builder.CreateLoad(EltPtr); 4820 IRCallArgs[FirstIRArg + i] = LI; 4821 } 4822 } else { 4823 // In the simple case, just pass the coerced loaded value. 4824 assert(NumIRArgs == 1); 4825 llvm::Value *Load = 4826 CreateCoercedLoad(Src, ArgInfo.getCoerceToType(), *this); 4827 4828 if (CallInfo.isCmseNSCall()) { 4829 // For certain parameter types, clear padding bits, as they may reveal 4830 // sensitive information. 4831 // Small struct/union types are passed as integer arrays. 4832 auto *ATy = dyn_cast<llvm::ArrayType>(Load->getType()); 4833 if (ATy != nullptr && isa<RecordType>(I->Ty.getCanonicalType())) 4834 Load = EmitCMSEClearRecord(Load, ATy, I->Ty); 4835 } 4836 IRCallArgs[FirstIRArg] = Load; 4837 } 4838 4839 break; 4840 } 4841 4842 case ABIArgInfo::CoerceAndExpand: { 4843 auto coercionType = ArgInfo.getCoerceAndExpandType(); 4844 auto layout = CGM.getDataLayout().getStructLayout(coercionType); 4845 4846 llvm::Value *tempSize = nullptr; 4847 Address addr = Address::invalid(); 4848 Address AllocaAddr = Address::invalid(); 4849 if (I->isAggregate()) { 4850 addr = I->hasLValue() ? I->getKnownLValue().getAddress(*this) 4851 : I->getKnownRValue().getAggregateAddress(); 4852 4853 } else { 4854 RValue RV = I->getKnownRValue(); 4855 assert(RV.isScalar()); // complex should always just be direct 4856 4857 llvm::Type *scalarType = RV.getScalarVal()->getType(); 4858 auto scalarSize = CGM.getDataLayout().getTypeAllocSize(scalarType); 4859 auto scalarAlign = CGM.getDataLayout().getPrefTypeAlignment(scalarType); 4860 4861 // Materialize to a temporary. 4862 addr = CreateTempAlloca( 4863 RV.getScalarVal()->getType(), 4864 CharUnits::fromQuantity(std::max( 4865 (unsigned)layout->getAlignment().value(), scalarAlign)), 4866 "tmp", 4867 /*ArraySize=*/nullptr, &AllocaAddr); 4868 tempSize = EmitLifetimeStart(scalarSize, AllocaAddr.getPointer()); 4869 4870 Builder.CreateStore(RV.getScalarVal(), addr); 4871 } 4872 4873 addr = Builder.CreateElementBitCast(addr, coercionType); 4874 4875 unsigned IRArgPos = FirstIRArg; 4876 for (unsigned i = 0, e = coercionType->getNumElements(); i != e; ++i) { 4877 llvm::Type *eltType = coercionType->getElementType(i); 4878 if (ABIArgInfo::isPaddingForCoerceAndExpand(eltType)) continue; 4879 Address eltAddr = Builder.CreateStructGEP(addr, i); 4880 llvm::Value *elt = Builder.CreateLoad(eltAddr); 4881 IRCallArgs[IRArgPos++] = elt; 4882 } 4883 assert(IRArgPos == FirstIRArg + NumIRArgs); 4884 4885 if (tempSize) { 4886 EmitLifetimeEnd(tempSize, AllocaAddr.getPointer()); 4887 } 4888 4889 break; 4890 } 4891 4892 case ABIArgInfo::Expand: { 4893 unsigned IRArgPos = FirstIRArg; 4894 ExpandTypeToArgs(I->Ty, *I, IRFuncTy, IRCallArgs, IRArgPos); 4895 assert(IRArgPos == FirstIRArg + NumIRArgs); 4896 break; 4897 } 4898 } 4899 } 4900 4901 const CGCallee &ConcreteCallee = Callee.prepareConcreteCallee(*this); 4902 llvm::Value *CalleePtr = ConcreteCallee.getFunctionPointer(); 4903 4904 // If we're using inalloca, set up that argument. 4905 if (ArgMemory.isValid()) { 4906 llvm::Value *Arg = ArgMemory.getPointer(); 4907 if (CallInfo.isVariadic()) { 4908 // When passing non-POD arguments by value to variadic functions, we will 4909 // end up with a variadic prototype and an inalloca call site. In such 4910 // cases, we can't do any parameter mismatch checks. Give up and bitcast 4911 // the callee. 4912 unsigned CalleeAS = CalleePtr->getType()->getPointerAddressSpace(); 4913 CalleePtr = 4914 Builder.CreateBitCast(CalleePtr, IRFuncTy->getPointerTo(CalleeAS)); 4915 } else { 4916 llvm::Type *LastParamTy = 4917 IRFuncTy->getParamType(IRFuncTy->getNumParams() - 1); 4918 if (Arg->getType() != LastParamTy) { 4919 #ifndef NDEBUG 4920 // Assert that these structs have equivalent element types. 4921 llvm::StructType *FullTy = CallInfo.getArgStruct(); 4922 llvm::StructType *DeclaredTy = cast<llvm::StructType>( 4923 cast<llvm::PointerType>(LastParamTy)->getElementType()); 4924 assert(DeclaredTy->getNumElements() == FullTy->getNumElements()); 4925 for (llvm::StructType::element_iterator DI = DeclaredTy->element_begin(), 4926 DE = DeclaredTy->element_end(), 4927 FI = FullTy->element_begin(); 4928 DI != DE; ++DI, ++FI) 4929 assert(*DI == *FI); 4930 #endif 4931 Arg = Builder.CreateBitCast(Arg, LastParamTy); 4932 } 4933 } 4934 assert(IRFunctionArgs.hasInallocaArg()); 4935 IRCallArgs[IRFunctionArgs.getInallocaArgNo()] = Arg; 4936 } 4937 4938 // 2. Prepare the function pointer. 4939 4940 // If the callee is a bitcast of a non-variadic function to have a 4941 // variadic function pointer type, check to see if we can remove the 4942 // bitcast. This comes up with unprototyped functions. 4943 // 4944 // This makes the IR nicer, but more importantly it ensures that we 4945 // can inline the function at -O0 if it is marked always_inline. 4946 auto simplifyVariadicCallee = [](llvm::FunctionType *CalleeFT, 4947 llvm::Value *Ptr) -> llvm::Function * { 4948 if (!CalleeFT->isVarArg()) 4949 return nullptr; 4950 4951 // Get underlying value if it's a bitcast 4952 if (llvm::ConstantExpr *CE = dyn_cast<llvm::ConstantExpr>(Ptr)) { 4953 if (CE->getOpcode() == llvm::Instruction::BitCast) 4954 Ptr = CE->getOperand(0); 4955 } 4956 4957 llvm::Function *OrigFn = dyn_cast<llvm::Function>(Ptr); 4958 if (!OrigFn) 4959 return nullptr; 4960 4961 llvm::FunctionType *OrigFT = OrigFn->getFunctionType(); 4962 4963 // If the original type is variadic, or if any of the component types 4964 // disagree, we cannot remove the cast. 4965 if (OrigFT->isVarArg() || 4966 OrigFT->getNumParams() != CalleeFT->getNumParams() || 4967 OrigFT->getReturnType() != CalleeFT->getReturnType()) 4968 return nullptr; 4969 4970 for (unsigned i = 0, e = OrigFT->getNumParams(); i != e; ++i) 4971 if (OrigFT->getParamType(i) != CalleeFT->getParamType(i)) 4972 return nullptr; 4973 4974 return OrigFn; 4975 }; 4976 4977 if (llvm::Function *OrigFn = simplifyVariadicCallee(IRFuncTy, CalleePtr)) { 4978 CalleePtr = OrigFn; 4979 IRFuncTy = OrigFn->getFunctionType(); 4980 } 4981 4982 // 3. Perform the actual call. 4983 4984 // Deactivate any cleanups that we're supposed to do immediately before 4985 // the call. 4986 if (!CallArgs.getCleanupsToDeactivate().empty()) 4987 deactivateArgCleanupsBeforeCall(*this, CallArgs); 4988 4989 // Assert that the arguments we computed match up. The IR verifier 4990 // will catch this, but this is a common enough source of problems 4991 // during IRGen changes that it's way better for debugging to catch 4992 // it ourselves here. 4993 #ifndef NDEBUG 4994 assert(IRCallArgs.size() == IRFuncTy->getNumParams() || IRFuncTy->isVarArg()); 4995 for (unsigned i = 0; i < IRCallArgs.size(); ++i) { 4996 // Inalloca argument can have different type. 4997 if (IRFunctionArgs.hasInallocaArg() && 4998 i == IRFunctionArgs.getInallocaArgNo()) 4999 continue; 5000 if (i < IRFuncTy->getNumParams()) 5001 assert(IRCallArgs[i]->getType() == IRFuncTy->getParamType(i)); 5002 } 5003 #endif 5004 5005 // Update the largest vector width if any arguments have vector types. 5006 for (unsigned i = 0; i < IRCallArgs.size(); ++i) { 5007 if (auto *VT = dyn_cast<llvm::VectorType>(IRCallArgs[i]->getType())) 5008 LargestVectorWidth = 5009 std::max((uint64_t)LargestVectorWidth, 5010 VT->getPrimitiveSizeInBits().getKnownMinSize()); 5011 } 5012 5013 // Compute the calling convention and attributes. 5014 unsigned CallingConv; 5015 llvm::AttributeList Attrs; 5016 CGM.ConstructAttributeList(CalleePtr->getName(), CallInfo, 5017 Callee.getAbstractInfo(), Attrs, CallingConv, 5018 /*AttrOnCallSite=*/true); 5019 5020 if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(CurFuncDecl)) 5021 if (FD->hasAttr<StrictFPAttr>()) 5022 // All calls within a strictfp function are marked strictfp 5023 Attrs = 5024 Attrs.addAttribute(getLLVMContext(), llvm::AttributeList::FunctionIndex, 5025 llvm::Attribute::StrictFP); 5026 5027 // Add call-site nomerge attribute if exists. 5028 if (InNoMergeAttributedStmt) 5029 Attrs = 5030 Attrs.addAttribute(getLLVMContext(), llvm::AttributeList::FunctionIndex, 5031 llvm::Attribute::NoMerge); 5032 5033 // Apply some call-site-specific attributes. 5034 // TODO: work this into building the attribute set. 5035 5036 // Apply always_inline to all calls within flatten functions. 5037 // FIXME: should this really take priority over __try, below? 5038 if (CurCodeDecl && CurCodeDecl->hasAttr<FlattenAttr>() && 5039 !(TargetDecl && TargetDecl->hasAttr<NoInlineAttr>())) { 5040 Attrs = 5041 Attrs.addAttribute(getLLVMContext(), llvm::AttributeList::FunctionIndex, 5042 llvm::Attribute::AlwaysInline); 5043 } 5044 5045 // Disable inlining inside SEH __try blocks. 5046 if (isSEHTryScope()) { 5047 Attrs = 5048 Attrs.addAttribute(getLLVMContext(), llvm::AttributeList::FunctionIndex, 5049 llvm::Attribute::NoInline); 5050 } 5051 5052 // Decide whether to use a call or an invoke. 5053 bool CannotThrow; 5054 if (currentFunctionUsesSEHTry()) { 5055 // SEH cares about asynchronous exceptions, so everything can "throw." 5056 CannotThrow = false; 5057 } else if (isCleanupPadScope() && 5058 EHPersonality::get(*this).isMSVCXXPersonality()) { 5059 // The MSVC++ personality will implicitly terminate the program if an 5060 // exception is thrown during a cleanup outside of a try/catch. 5061 // We don't need to model anything in IR to get this behavior. 5062 CannotThrow = true; 5063 } else { 5064 // Otherwise, nounwind call sites will never throw. 5065 CannotThrow = Attrs.hasFnAttribute(llvm::Attribute::NoUnwind); 5066 5067 if (auto *FPtr = dyn_cast<llvm::Function>(CalleePtr)) 5068 if (FPtr->hasFnAttribute(llvm::Attribute::NoUnwind)) 5069 CannotThrow = true; 5070 } 5071 5072 // If we made a temporary, be sure to clean up after ourselves. Note that we 5073 // can't depend on being inside of an ExprWithCleanups, so we need to manually 5074 // pop this cleanup later on. Being eager about this is OK, since this 5075 // temporary is 'invisible' outside of the callee. 5076 if (UnusedReturnSizePtr) 5077 pushFullExprCleanup<CallLifetimeEnd>(NormalEHLifetimeMarker, SRetAlloca, 5078 UnusedReturnSizePtr); 5079 5080 llvm::BasicBlock *InvokeDest = CannotThrow ? nullptr : getInvokeDest(); 5081 5082 SmallVector<llvm::OperandBundleDef, 1> BundleList = 5083 getBundlesForFunclet(CalleePtr); 5084 5085 if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(CurFuncDecl)) 5086 if (FD->hasAttr<StrictFPAttr>()) 5087 // All calls within a strictfp function are marked strictfp 5088 Attrs = 5089 Attrs.addAttribute(getLLVMContext(), llvm::AttributeList::FunctionIndex, 5090 llvm::Attribute::StrictFP); 5091 5092 AssumeAlignedAttrEmitter AssumeAlignedAttrEmitter(*this, TargetDecl); 5093 Attrs = AssumeAlignedAttrEmitter.TryEmitAsCallSiteAttribute(Attrs); 5094 5095 AllocAlignAttrEmitter AllocAlignAttrEmitter(*this, TargetDecl, CallArgs); 5096 Attrs = AllocAlignAttrEmitter.TryEmitAsCallSiteAttribute(Attrs); 5097 5098 // Emit the actual call/invoke instruction. 5099 llvm::CallBase *CI; 5100 if (!InvokeDest) { 5101 CI = Builder.CreateCall(IRFuncTy, CalleePtr, IRCallArgs, BundleList); 5102 } else { 5103 llvm::BasicBlock *Cont = createBasicBlock("invoke.cont"); 5104 CI = Builder.CreateInvoke(IRFuncTy, CalleePtr, Cont, InvokeDest, IRCallArgs, 5105 BundleList); 5106 EmitBlock(Cont); 5107 } 5108 if (callOrInvoke) 5109 *callOrInvoke = CI; 5110 5111 // If this is within a function that has the guard(nocf) attribute and is an 5112 // indirect call, add the "guard_nocf" attribute to this call to indicate that 5113 // Control Flow Guard checks should not be added, even if the call is inlined. 5114 if (const auto *FD = dyn_cast_or_null<FunctionDecl>(CurFuncDecl)) { 5115 if (const auto *A = FD->getAttr<CFGuardAttr>()) { 5116 if (A->getGuard() == CFGuardAttr::GuardArg::nocf && !CI->getCalledFunction()) 5117 Attrs = Attrs.addAttribute( 5118 getLLVMContext(), llvm::AttributeList::FunctionIndex, "guard_nocf"); 5119 } 5120 } 5121 5122 // Apply the attributes and calling convention. 5123 CI->setAttributes(Attrs); 5124 CI->setCallingConv(static_cast<llvm::CallingConv::ID>(CallingConv)); 5125 5126 // Apply various metadata. 5127 5128 if (!CI->getType()->isVoidTy()) 5129 CI->setName("call"); 5130 5131 // Update largest vector width from the return type. 5132 if (auto *VT = dyn_cast<llvm::VectorType>(CI->getType())) 5133 LargestVectorWidth = 5134 std::max((uint64_t)LargestVectorWidth, 5135 VT->getPrimitiveSizeInBits().getKnownMinSize()); 5136 5137 // Insert instrumentation or attach profile metadata at indirect call sites. 5138 // For more details, see the comment before the definition of 5139 // IPVK_IndirectCallTarget in InstrProfData.inc. 5140 if (!CI->getCalledFunction()) 5141 PGO.valueProfile(Builder, llvm::IPVK_IndirectCallTarget, 5142 CI, CalleePtr); 5143 5144 // In ObjC ARC mode with no ObjC ARC exception safety, tell the ARC 5145 // optimizer it can aggressively ignore unwind edges. 5146 if (CGM.getLangOpts().ObjCAutoRefCount) 5147 AddObjCARCExceptionMetadata(CI); 5148 5149 // Suppress tail calls if requested. 5150 if (llvm::CallInst *Call = dyn_cast<llvm::CallInst>(CI)) { 5151 if (TargetDecl && TargetDecl->hasAttr<NotTailCalledAttr>()) 5152 Call->setTailCallKind(llvm::CallInst::TCK_NoTail); 5153 } 5154 5155 // Add metadata for calls to MSAllocator functions 5156 if (getDebugInfo() && TargetDecl && 5157 TargetDecl->hasAttr<MSAllocatorAttr>()) 5158 getDebugInfo()->addHeapAllocSiteMetadata(CI, RetTy->getPointeeType(), Loc); 5159 5160 // 4. Finish the call. 5161 5162 // If the call doesn't return, finish the basic block and clear the 5163 // insertion point; this allows the rest of IRGen to discard 5164 // unreachable code. 5165 if (CI->doesNotReturn()) { 5166 if (UnusedReturnSizePtr) 5167 PopCleanupBlock(); 5168 5169 // Strip away the noreturn attribute to better diagnose unreachable UB. 5170 if (SanOpts.has(SanitizerKind::Unreachable)) { 5171 // Also remove from function since CallBase::hasFnAttr additionally checks 5172 // attributes of the called function. 5173 if (auto *F = CI->getCalledFunction()) 5174 F->removeFnAttr(llvm::Attribute::NoReturn); 5175 CI->removeAttribute(llvm::AttributeList::FunctionIndex, 5176 llvm::Attribute::NoReturn); 5177 5178 // Avoid incompatibility with ASan which relies on the `noreturn` 5179 // attribute to insert handler calls. 5180 if (SanOpts.hasOneOf(SanitizerKind::Address | 5181 SanitizerKind::KernelAddress)) { 5182 SanitizerScope SanScope(this); 5183 llvm::IRBuilder<>::InsertPointGuard IPGuard(Builder); 5184 Builder.SetInsertPoint(CI); 5185 auto *FnType = llvm::FunctionType::get(CGM.VoidTy, /*isVarArg=*/false); 5186 llvm::FunctionCallee Fn = 5187 CGM.CreateRuntimeFunction(FnType, "__asan_handle_no_return"); 5188 EmitNounwindRuntimeCall(Fn); 5189 } 5190 } 5191 5192 EmitUnreachable(Loc); 5193 Builder.ClearInsertionPoint(); 5194 5195 // FIXME: For now, emit a dummy basic block because expr emitters in 5196 // generally are not ready to handle emitting expressions at unreachable 5197 // points. 5198 EnsureInsertPoint(); 5199 5200 // Return a reasonable RValue. 5201 return GetUndefRValue(RetTy); 5202 } 5203 5204 // Perform the swifterror writeback. 5205 if (swiftErrorTemp.isValid()) { 5206 llvm::Value *errorResult = Builder.CreateLoad(swiftErrorTemp); 5207 Builder.CreateStore(errorResult, swiftErrorArg); 5208 } 5209 5210 // Emit any call-associated writebacks immediately. Arguably this 5211 // should happen after any return-value munging. 5212 if (CallArgs.hasWritebacks()) 5213 emitWritebacks(*this, CallArgs); 5214 5215 // The stack cleanup for inalloca arguments has to run out of the normal 5216 // lexical order, so deactivate it and run it manually here. 5217 CallArgs.freeArgumentMemory(*this); 5218 5219 // Extract the return value. 5220 RValue Ret = [&] { 5221 switch (RetAI.getKind()) { 5222 case ABIArgInfo::CoerceAndExpand: { 5223 auto coercionType = RetAI.getCoerceAndExpandType(); 5224 5225 Address addr = SRetPtr; 5226 addr = Builder.CreateElementBitCast(addr, coercionType); 5227 5228 assert(CI->getType() == RetAI.getUnpaddedCoerceAndExpandType()); 5229 bool requiresExtract = isa<llvm::StructType>(CI->getType()); 5230 5231 unsigned unpaddedIndex = 0; 5232 for (unsigned i = 0, e = coercionType->getNumElements(); i != e; ++i) { 5233 llvm::Type *eltType = coercionType->getElementType(i); 5234 if (ABIArgInfo::isPaddingForCoerceAndExpand(eltType)) continue; 5235 Address eltAddr = Builder.CreateStructGEP(addr, i); 5236 llvm::Value *elt = CI; 5237 if (requiresExtract) 5238 elt = Builder.CreateExtractValue(elt, unpaddedIndex++); 5239 else 5240 assert(unpaddedIndex == 0); 5241 Builder.CreateStore(elt, eltAddr); 5242 } 5243 // FALLTHROUGH 5244 LLVM_FALLTHROUGH; 5245 } 5246 5247 case ABIArgInfo::InAlloca: 5248 case ABIArgInfo::Indirect: { 5249 RValue ret = convertTempToRValue(SRetPtr, RetTy, SourceLocation()); 5250 if (UnusedReturnSizePtr) 5251 PopCleanupBlock(); 5252 return ret; 5253 } 5254 5255 case ABIArgInfo::Ignore: 5256 // If we are ignoring an argument that had a result, make sure to 5257 // construct the appropriate return value for our caller. 5258 return GetUndefRValue(RetTy); 5259 5260 case ABIArgInfo::Extend: 5261 case ABIArgInfo::Direct: { 5262 llvm::Type *RetIRTy = ConvertType(RetTy); 5263 if (RetAI.getCoerceToType() == RetIRTy && RetAI.getDirectOffset() == 0) { 5264 switch (getEvaluationKind(RetTy)) { 5265 case TEK_Complex: { 5266 llvm::Value *Real = Builder.CreateExtractValue(CI, 0); 5267 llvm::Value *Imag = Builder.CreateExtractValue(CI, 1); 5268 return RValue::getComplex(std::make_pair(Real, Imag)); 5269 } 5270 case TEK_Aggregate: { 5271 Address DestPtr = ReturnValue.getValue(); 5272 bool DestIsVolatile = ReturnValue.isVolatile(); 5273 5274 if (!DestPtr.isValid()) { 5275 DestPtr = CreateMemTemp(RetTy, "agg.tmp"); 5276 DestIsVolatile = false; 5277 } 5278 EmitAggregateStore(CI, DestPtr, DestIsVolatile); 5279 return RValue::getAggregate(DestPtr); 5280 } 5281 case TEK_Scalar: { 5282 // If the argument doesn't match, perform a bitcast to coerce it. This 5283 // can happen due to trivial type mismatches. 5284 llvm::Value *V = CI; 5285 if (V->getType() != RetIRTy) 5286 V = Builder.CreateBitCast(V, RetIRTy); 5287 return RValue::get(V); 5288 } 5289 } 5290 llvm_unreachable("bad evaluation kind"); 5291 } 5292 5293 Address DestPtr = ReturnValue.getValue(); 5294 bool DestIsVolatile = ReturnValue.isVolatile(); 5295 5296 if (!DestPtr.isValid()) { 5297 DestPtr = CreateMemTemp(RetTy, "coerce"); 5298 DestIsVolatile = false; 5299 } 5300 5301 // If the value is offset in memory, apply the offset now. 5302 Address StorePtr = emitAddressAtOffset(*this, DestPtr, RetAI); 5303 CreateCoercedStore(CI, StorePtr, DestIsVolatile, *this); 5304 5305 return convertTempToRValue(DestPtr, RetTy, SourceLocation()); 5306 } 5307 5308 case ABIArgInfo::Expand: 5309 case ABIArgInfo::IndirectAliased: 5310 llvm_unreachable("Invalid ABI kind for return argument"); 5311 } 5312 5313 llvm_unreachable("Unhandled ABIArgInfo::Kind"); 5314 } (); 5315 5316 // Emit the assume_aligned check on the return value. 5317 if (Ret.isScalar() && TargetDecl) { 5318 AssumeAlignedAttrEmitter.EmitAsAnAssumption(Loc, RetTy, Ret); 5319 AllocAlignAttrEmitter.EmitAsAnAssumption(Loc, RetTy, Ret); 5320 } 5321 5322 // Explicitly call CallLifetimeEnd::Emit just to re-use the code even though 5323 // we can't use the full cleanup mechanism. 5324 for (CallLifetimeEnd &LifetimeEnd : CallLifetimeEndAfterCall) 5325 LifetimeEnd.Emit(*this, /*Flags=*/{}); 5326 5327 if (!ReturnValue.isExternallyDestructed() && 5328 RetTy.isDestructedType() == QualType::DK_nontrivial_c_struct) 5329 pushDestroy(QualType::DK_nontrivial_c_struct, Ret.getAggregateAddress(), 5330 RetTy); 5331 5332 return Ret; 5333 } 5334 5335 CGCallee CGCallee::prepareConcreteCallee(CodeGenFunction &CGF) const { 5336 if (isVirtual()) { 5337 const CallExpr *CE = getVirtualCallExpr(); 5338 return CGF.CGM.getCXXABI().getVirtualFunctionPointer( 5339 CGF, getVirtualMethodDecl(), getThisAddress(), getVirtualFunctionType(), 5340 CE ? CE->getBeginLoc() : SourceLocation()); 5341 } 5342 5343 return *this; 5344 } 5345 5346 /* VarArg handling */ 5347 5348 Address CodeGenFunction::EmitVAArg(VAArgExpr *VE, Address &VAListAddr) { 5349 VAListAddr = VE->isMicrosoftABI() 5350 ? EmitMSVAListRef(VE->getSubExpr()) 5351 : EmitVAListRef(VE->getSubExpr()); 5352 QualType Ty = VE->getType(); 5353 if (VE->isMicrosoftABI()) 5354 return CGM.getTypes().getABIInfo().EmitMSVAArg(*this, VAListAddr, Ty); 5355 return CGM.getTypes().getABIInfo().EmitVAArg(*this, VAListAddr, Ty); 5356 } 5357