1 //===--- CGCall.cpp - Encapsulate calling convention details --------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // These classes wrap the information about a call or function 10 // definition used to handle ABI compliancy. 11 // 12 //===----------------------------------------------------------------------===// 13 14 #include "CGCall.h" 15 #include "ABIInfo.h" 16 #include "CGBlocks.h" 17 #include "CGCXXABI.h" 18 #include "CGCleanup.h" 19 #include "CodeGenFunction.h" 20 #include "CodeGenModule.h" 21 #include "TargetInfo.h" 22 #include "clang/AST/Decl.h" 23 #include "clang/AST/DeclCXX.h" 24 #include "clang/AST/DeclObjC.h" 25 #include "clang/Basic/CodeGenOptions.h" 26 #include "clang/Basic/TargetBuiltins.h" 27 #include "clang/Basic/TargetInfo.h" 28 #include "clang/CodeGen/CGFunctionInfo.h" 29 #include "clang/CodeGen/SwiftCallingConv.h" 30 #include "llvm/ADT/StringExtras.h" 31 #include "llvm/Transforms/Utils/Local.h" 32 #include "llvm/Analysis/ValueTracking.h" 33 #include "llvm/IR/Attributes.h" 34 #include "llvm/IR/CallingConv.h" 35 #include "llvm/IR/DataLayout.h" 36 #include "llvm/IR/InlineAsm.h" 37 #include "llvm/IR/IntrinsicInst.h" 38 #include "llvm/IR/Intrinsics.h" 39 using namespace clang; 40 using namespace CodeGen; 41 42 /***/ 43 44 unsigned CodeGenTypes::ClangCallConvToLLVMCallConv(CallingConv CC) { 45 switch (CC) { 46 default: return llvm::CallingConv::C; 47 case CC_X86StdCall: return llvm::CallingConv::X86_StdCall; 48 case CC_X86FastCall: return llvm::CallingConv::X86_FastCall; 49 case CC_X86RegCall: return llvm::CallingConv::X86_RegCall; 50 case CC_X86ThisCall: return llvm::CallingConv::X86_ThisCall; 51 case CC_Win64: return llvm::CallingConv::Win64; 52 case CC_X86_64SysV: return llvm::CallingConv::X86_64_SysV; 53 case CC_AAPCS: return llvm::CallingConv::ARM_AAPCS; 54 case CC_AAPCS_VFP: return llvm::CallingConv::ARM_AAPCS_VFP; 55 case CC_IntelOclBicc: return llvm::CallingConv::Intel_OCL_BI; 56 // TODO: Add support for __pascal to LLVM. 57 case CC_X86Pascal: return llvm::CallingConv::C; 58 // TODO: Add support for __vectorcall to LLVM. 59 case CC_X86VectorCall: return llvm::CallingConv::X86_VectorCall; 60 case CC_AArch64VectorCall: return llvm::CallingConv::AArch64_VectorCall; 61 case CC_SpirFunction: return llvm::CallingConv::SPIR_FUNC; 62 case CC_OpenCLKernel: return CGM.getTargetCodeGenInfo().getOpenCLKernelCallingConv(); 63 case CC_PreserveMost: return llvm::CallingConv::PreserveMost; 64 case CC_PreserveAll: return llvm::CallingConv::PreserveAll; 65 case CC_Swift: return llvm::CallingConv::Swift; 66 } 67 } 68 69 /// Derives the 'this' type for codegen purposes, i.e. ignoring method CVR 70 /// qualification. 71 static CanQualType GetThisType(ASTContext &Context, const CXXRecordDecl *RD, 72 const CXXMethodDecl *MD) { 73 QualType RecTy = Context.getTagDeclType(RD)->getCanonicalTypeInternal(); 74 if (MD) 75 RecTy = Context.getAddrSpaceQualType(RecTy, MD->getMethodQualifiers().getAddressSpace()); 76 return Context.getPointerType(CanQualType::CreateUnsafe(RecTy)); 77 } 78 79 /// Returns the canonical formal type of the given C++ method. 80 static CanQual<FunctionProtoType> GetFormalType(const CXXMethodDecl *MD) { 81 return MD->getType()->getCanonicalTypeUnqualified() 82 .getAs<FunctionProtoType>(); 83 } 84 85 /// Returns the "extra-canonicalized" return type, which discards 86 /// qualifiers on the return type. Codegen doesn't care about them, 87 /// and it makes ABI code a little easier to be able to assume that 88 /// all parameter and return types are top-level unqualified. 89 static CanQualType GetReturnType(QualType RetTy) { 90 return RetTy->getCanonicalTypeUnqualified().getUnqualifiedType(); 91 } 92 93 /// Arrange the argument and result information for a value of the given 94 /// unprototyped freestanding function type. 95 const CGFunctionInfo & 96 CodeGenTypes::arrangeFreeFunctionType(CanQual<FunctionNoProtoType> FTNP) { 97 // When translating an unprototyped function type, always use a 98 // variadic type. 99 return arrangeLLVMFunctionInfo(FTNP->getReturnType().getUnqualifiedType(), 100 /*instanceMethod=*/false, 101 /*chainCall=*/false, None, 102 FTNP->getExtInfo(), {}, RequiredArgs(0)); 103 } 104 105 static void addExtParameterInfosForCall( 106 llvm::SmallVectorImpl<FunctionProtoType::ExtParameterInfo> ¶mInfos, 107 const FunctionProtoType *proto, 108 unsigned prefixArgs, 109 unsigned totalArgs) { 110 assert(proto->hasExtParameterInfos()); 111 assert(paramInfos.size() <= prefixArgs); 112 assert(proto->getNumParams() + prefixArgs <= totalArgs); 113 114 paramInfos.reserve(totalArgs); 115 116 // Add default infos for any prefix args that don't already have infos. 117 paramInfos.resize(prefixArgs); 118 119 // Add infos for the prototype. 120 for (const auto &ParamInfo : proto->getExtParameterInfos()) { 121 paramInfos.push_back(ParamInfo); 122 // pass_object_size params have no parameter info. 123 if (ParamInfo.hasPassObjectSize()) 124 paramInfos.emplace_back(); 125 } 126 127 assert(paramInfos.size() <= totalArgs && 128 "Did we forget to insert pass_object_size args?"); 129 // Add default infos for the variadic and/or suffix arguments. 130 paramInfos.resize(totalArgs); 131 } 132 133 /// Adds the formal parameters in FPT to the given prefix. If any parameter in 134 /// FPT has pass_object_size attrs, then we'll add parameters for those, too. 135 static void appendParameterTypes(const CodeGenTypes &CGT, 136 SmallVectorImpl<CanQualType> &prefix, 137 SmallVectorImpl<FunctionProtoType::ExtParameterInfo> ¶mInfos, 138 CanQual<FunctionProtoType> FPT) { 139 // Fast path: don't touch param info if we don't need to. 140 if (!FPT->hasExtParameterInfos()) { 141 assert(paramInfos.empty() && 142 "We have paramInfos, but the prototype doesn't?"); 143 prefix.append(FPT->param_type_begin(), FPT->param_type_end()); 144 return; 145 } 146 147 unsigned PrefixSize = prefix.size(); 148 // In the vast majority of cases, we'll have precisely FPT->getNumParams() 149 // parameters; the only thing that can change this is the presence of 150 // pass_object_size. So, we preallocate for the common case. 151 prefix.reserve(prefix.size() + FPT->getNumParams()); 152 153 auto ExtInfos = FPT->getExtParameterInfos(); 154 assert(ExtInfos.size() == FPT->getNumParams()); 155 for (unsigned I = 0, E = FPT->getNumParams(); I != E; ++I) { 156 prefix.push_back(FPT->getParamType(I)); 157 if (ExtInfos[I].hasPassObjectSize()) 158 prefix.push_back(CGT.getContext().getSizeType()); 159 } 160 161 addExtParameterInfosForCall(paramInfos, FPT.getTypePtr(), PrefixSize, 162 prefix.size()); 163 } 164 165 /// Arrange the LLVM function layout for a value of the given function 166 /// type, on top of any implicit parameters already stored. 167 static const CGFunctionInfo & 168 arrangeLLVMFunctionInfo(CodeGenTypes &CGT, bool instanceMethod, 169 SmallVectorImpl<CanQualType> &prefix, 170 CanQual<FunctionProtoType> FTP) { 171 SmallVector<FunctionProtoType::ExtParameterInfo, 16> paramInfos; 172 RequiredArgs Required = RequiredArgs::forPrototypePlus(FTP, prefix.size()); 173 // FIXME: Kill copy. 174 appendParameterTypes(CGT, prefix, paramInfos, FTP); 175 CanQualType resultType = FTP->getReturnType().getUnqualifiedType(); 176 177 return CGT.arrangeLLVMFunctionInfo(resultType, instanceMethod, 178 /*chainCall=*/false, prefix, 179 FTP->getExtInfo(), paramInfos, 180 Required); 181 } 182 183 /// Arrange the argument and result information for a value of the 184 /// given freestanding function type. 185 const CGFunctionInfo & 186 CodeGenTypes::arrangeFreeFunctionType(CanQual<FunctionProtoType> FTP) { 187 SmallVector<CanQualType, 16> argTypes; 188 return ::arrangeLLVMFunctionInfo(*this, /*instanceMethod=*/false, argTypes, 189 FTP); 190 } 191 192 static CallingConv getCallingConventionForDecl(const Decl *D, bool IsWindows) { 193 // Set the appropriate calling convention for the Function. 194 if (D->hasAttr<StdCallAttr>()) 195 return CC_X86StdCall; 196 197 if (D->hasAttr<FastCallAttr>()) 198 return CC_X86FastCall; 199 200 if (D->hasAttr<RegCallAttr>()) 201 return CC_X86RegCall; 202 203 if (D->hasAttr<ThisCallAttr>()) 204 return CC_X86ThisCall; 205 206 if (D->hasAttr<VectorCallAttr>()) 207 return CC_X86VectorCall; 208 209 if (D->hasAttr<PascalAttr>()) 210 return CC_X86Pascal; 211 212 if (PcsAttr *PCS = D->getAttr<PcsAttr>()) 213 return (PCS->getPCS() == PcsAttr::AAPCS ? CC_AAPCS : CC_AAPCS_VFP); 214 215 if (D->hasAttr<AArch64VectorPcsAttr>()) 216 return CC_AArch64VectorCall; 217 218 if (D->hasAttr<IntelOclBiccAttr>()) 219 return CC_IntelOclBicc; 220 221 if (D->hasAttr<MSABIAttr>()) 222 return IsWindows ? CC_C : CC_Win64; 223 224 if (D->hasAttr<SysVABIAttr>()) 225 return IsWindows ? CC_X86_64SysV : CC_C; 226 227 if (D->hasAttr<PreserveMostAttr>()) 228 return CC_PreserveMost; 229 230 if (D->hasAttr<PreserveAllAttr>()) 231 return CC_PreserveAll; 232 233 return CC_C; 234 } 235 236 /// Arrange the argument and result information for a call to an 237 /// unknown C++ non-static member function of the given abstract type. 238 /// (Zero value of RD means we don't have any meaningful "this" argument type, 239 /// so fall back to a generic pointer type). 240 /// The member function must be an ordinary function, i.e. not a 241 /// constructor or destructor. 242 const CGFunctionInfo & 243 CodeGenTypes::arrangeCXXMethodType(const CXXRecordDecl *RD, 244 const FunctionProtoType *FTP, 245 const CXXMethodDecl *MD) { 246 SmallVector<CanQualType, 16> argTypes; 247 248 // Add the 'this' pointer. 249 if (RD) 250 argTypes.push_back(GetThisType(Context, RD, MD)); 251 else 252 argTypes.push_back(Context.VoidPtrTy); 253 254 return ::arrangeLLVMFunctionInfo( 255 *this, true, argTypes, 256 FTP->getCanonicalTypeUnqualified().getAs<FunctionProtoType>()); 257 } 258 259 /// Set calling convention for CUDA/HIP kernel. 260 static void setCUDAKernelCallingConvention(CanQualType &FTy, CodeGenModule &CGM, 261 const FunctionDecl *FD) { 262 if (FD->hasAttr<CUDAGlobalAttr>()) { 263 const FunctionType *FT = FTy->getAs<FunctionType>(); 264 CGM.getTargetCodeGenInfo().setCUDAKernelCallingConvention(FT); 265 FTy = FT->getCanonicalTypeUnqualified(); 266 } 267 } 268 269 /// Arrange the argument and result information for a declaration or 270 /// definition of the given C++ non-static member function. The 271 /// member function must be an ordinary function, i.e. not a 272 /// constructor or destructor. 273 const CGFunctionInfo & 274 CodeGenTypes::arrangeCXXMethodDeclaration(const CXXMethodDecl *MD) { 275 assert(!isa<CXXConstructorDecl>(MD) && "wrong method for constructors!"); 276 assert(!isa<CXXDestructorDecl>(MD) && "wrong method for destructors!"); 277 278 CanQualType FT = GetFormalType(MD).getAs<Type>(); 279 setCUDAKernelCallingConvention(FT, CGM, MD); 280 auto prototype = FT.getAs<FunctionProtoType>(); 281 282 if (MD->isInstance()) { 283 // The abstract case is perfectly fine. 284 const CXXRecordDecl *ThisType = TheCXXABI.getThisArgumentTypeForMethod(MD); 285 return arrangeCXXMethodType(ThisType, prototype.getTypePtr(), MD); 286 } 287 288 return arrangeFreeFunctionType(prototype); 289 } 290 291 bool CodeGenTypes::inheritingCtorHasParams( 292 const InheritedConstructor &Inherited, CXXCtorType Type) { 293 // Parameters are unnecessary if we're constructing a base class subobject 294 // and the inherited constructor lives in a virtual base. 295 return Type == Ctor_Complete || 296 !Inherited.getShadowDecl()->constructsVirtualBase() || 297 !Target.getCXXABI().hasConstructorVariants(); 298 } 299 300 const CGFunctionInfo & 301 CodeGenTypes::arrangeCXXStructorDeclaration(const CXXMethodDecl *MD, 302 StructorType Type) { 303 304 SmallVector<CanQualType, 16> argTypes; 305 SmallVector<FunctionProtoType::ExtParameterInfo, 16> paramInfos; 306 argTypes.push_back(GetThisType(Context, MD->getParent(), MD)); 307 308 bool PassParams = true; 309 310 GlobalDecl GD; 311 if (auto *CD = dyn_cast<CXXConstructorDecl>(MD)) { 312 GD = GlobalDecl(CD, toCXXCtorType(Type)); 313 314 // A base class inheriting constructor doesn't get forwarded arguments 315 // needed to construct a virtual base (or base class thereof). 316 if (auto Inherited = CD->getInheritedConstructor()) 317 PassParams = inheritingCtorHasParams(Inherited, toCXXCtorType(Type)); 318 } else { 319 auto *DD = dyn_cast<CXXDestructorDecl>(MD); 320 GD = GlobalDecl(DD, toCXXDtorType(Type)); 321 } 322 323 CanQual<FunctionProtoType> FTP = GetFormalType(MD); 324 325 // Add the formal parameters. 326 if (PassParams) 327 appendParameterTypes(*this, argTypes, paramInfos, FTP); 328 329 CGCXXABI::AddedStructorArgs AddedArgs = 330 TheCXXABI.buildStructorSignature(MD, Type, argTypes); 331 if (!paramInfos.empty()) { 332 // Note: prefix implies after the first param. 333 if (AddedArgs.Prefix) 334 paramInfos.insert(paramInfos.begin() + 1, AddedArgs.Prefix, 335 FunctionProtoType::ExtParameterInfo{}); 336 if (AddedArgs.Suffix) 337 paramInfos.append(AddedArgs.Suffix, 338 FunctionProtoType::ExtParameterInfo{}); 339 } 340 341 RequiredArgs required = 342 (PassParams && MD->isVariadic() ? RequiredArgs(argTypes.size()) 343 : RequiredArgs::All); 344 345 FunctionType::ExtInfo extInfo = FTP->getExtInfo(); 346 CanQualType resultType = TheCXXABI.HasThisReturn(GD) 347 ? argTypes.front() 348 : TheCXXABI.hasMostDerivedReturn(GD) 349 ? CGM.getContext().VoidPtrTy 350 : Context.VoidTy; 351 return arrangeLLVMFunctionInfo(resultType, /*instanceMethod=*/true, 352 /*chainCall=*/false, argTypes, extInfo, 353 paramInfos, required); 354 } 355 356 static SmallVector<CanQualType, 16> 357 getArgTypesForCall(ASTContext &ctx, const CallArgList &args) { 358 SmallVector<CanQualType, 16> argTypes; 359 for (auto &arg : args) 360 argTypes.push_back(ctx.getCanonicalParamType(arg.Ty)); 361 return argTypes; 362 } 363 364 static SmallVector<CanQualType, 16> 365 getArgTypesForDeclaration(ASTContext &ctx, const FunctionArgList &args) { 366 SmallVector<CanQualType, 16> argTypes; 367 for (auto &arg : args) 368 argTypes.push_back(ctx.getCanonicalParamType(arg->getType())); 369 return argTypes; 370 } 371 372 static llvm::SmallVector<FunctionProtoType::ExtParameterInfo, 16> 373 getExtParameterInfosForCall(const FunctionProtoType *proto, 374 unsigned prefixArgs, unsigned totalArgs) { 375 llvm::SmallVector<FunctionProtoType::ExtParameterInfo, 16> result; 376 if (proto->hasExtParameterInfos()) { 377 addExtParameterInfosForCall(result, proto, prefixArgs, totalArgs); 378 } 379 return result; 380 } 381 382 /// Arrange a call to a C++ method, passing the given arguments. 383 /// 384 /// ExtraPrefixArgs is the number of ABI-specific args passed after the `this` 385 /// parameter. 386 /// ExtraSuffixArgs is the number of ABI-specific args passed at the end of 387 /// args. 388 /// PassProtoArgs indicates whether `args` has args for the parameters in the 389 /// given CXXConstructorDecl. 390 const CGFunctionInfo & 391 CodeGenTypes::arrangeCXXConstructorCall(const CallArgList &args, 392 const CXXConstructorDecl *D, 393 CXXCtorType CtorKind, 394 unsigned ExtraPrefixArgs, 395 unsigned ExtraSuffixArgs, 396 bool PassProtoArgs) { 397 // FIXME: Kill copy. 398 SmallVector<CanQualType, 16> ArgTypes; 399 for (const auto &Arg : args) 400 ArgTypes.push_back(Context.getCanonicalParamType(Arg.Ty)); 401 402 // +1 for implicit this, which should always be args[0]. 403 unsigned TotalPrefixArgs = 1 + ExtraPrefixArgs; 404 405 CanQual<FunctionProtoType> FPT = GetFormalType(D); 406 RequiredArgs Required = 407 RequiredArgs::forPrototypePlus(FPT, TotalPrefixArgs + ExtraSuffixArgs); 408 GlobalDecl GD(D, CtorKind); 409 CanQualType ResultType = TheCXXABI.HasThisReturn(GD) 410 ? ArgTypes.front() 411 : TheCXXABI.hasMostDerivedReturn(GD) 412 ? CGM.getContext().VoidPtrTy 413 : Context.VoidTy; 414 415 FunctionType::ExtInfo Info = FPT->getExtInfo(); 416 llvm::SmallVector<FunctionProtoType::ExtParameterInfo, 16> ParamInfos; 417 // If the prototype args are elided, we should only have ABI-specific args, 418 // which never have param info. 419 if (PassProtoArgs && FPT->hasExtParameterInfos()) { 420 // ABI-specific suffix arguments are treated the same as variadic arguments. 421 addExtParameterInfosForCall(ParamInfos, FPT.getTypePtr(), TotalPrefixArgs, 422 ArgTypes.size()); 423 } 424 return arrangeLLVMFunctionInfo(ResultType, /*instanceMethod=*/true, 425 /*chainCall=*/false, ArgTypes, Info, 426 ParamInfos, Required); 427 } 428 429 /// Arrange the argument and result information for the declaration or 430 /// definition of the given function. 431 const CGFunctionInfo & 432 CodeGenTypes::arrangeFunctionDeclaration(const FunctionDecl *FD) { 433 if (const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(FD)) 434 if (MD->isInstance()) 435 return arrangeCXXMethodDeclaration(MD); 436 437 CanQualType FTy = FD->getType()->getCanonicalTypeUnqualified(); 438 439 assert(isa<FunctionType>(FTy)); 440 setCUDAKernelCallingConvention(FTy, CGM, FD); 441 442 // When declaring a function without a prototype, always use a 443 // non-variadic type. 444 if (CanQual<FunctionNoProtoType> noProto = FTy.getAs<FunctionNoProtoType>()) { 445 return arrangeLLVMFunctionInfo( 446 noProto->getReturnType(), /*instanceMethod=*/false, 447 /*chainCall=*/false, None, noProto->getExtInfo(), {},RequiredArgs::All); 448 } 449 450 return arrangeFreeFunctionType(FTy.castAs<FunctionProtoType>()); 451 } 452 453 /// Arrange the argument and result information for the declaration or 454 /// definition of an Objective-C method. 455 const CGFunctionInfo & 456 CodeGenTypes::arrangeObjCMethodDeclaration(const ObjCMethodDecl *MD) { 457 // It happens that this is the same as a call with no optional 458 // arguments, except also using the formal 'self' type. 459 return arrangeObjCMessageSendSignature(MD, MD->getSelfDecl()->getType()); 460 } 461 462 /// Arrange the argument and result information for the function type 463 /// through which to perform a send to the given Objective-C method, 464 /// using the given receiver type. The receiver type is not always 465 /// the 'self' type of the method or even an Objective-C pointer type. 466 /// This is *not* the right method for actually performing such a 467 /// message send, due to the possibility of optional arguments. 468 const CGFunctionInfo & 469 CodeGenTypes::arrangeObjCMessageSendSignature(const ObjCMethodDecl *MD, 470 QualType receiverType) { 471 SmallVector<CanQualType, 16> argTys; 472 SmallVector<FunctionProtoType::ExtParameterInfo, 4> extParamInfos(2); 473 argTys.push_back(Context.getCanonicalParamType(receiverType)); 474 argTys.push_back(Context.getCanonicalParamType(Context.getObjCSelType())); 475 // FIXME: Kill copy? 476 for (const auto *I : MD->parameters()) { 477 argTys.push_back(Context.getCanonicalParamType(I->getType())); 478 auto extParamInfo = FunctionProtoType::ExtParameterInfo().withIsNoEscape( 479 I->hasAttr<NoEscapeAttr>()); 480 extParamInfos.push_back(extParamInfo); 481 } 482 483 FunctionType::ExtInfo einfo; 484 bool IsWindows = getContext().getTargetInfo().getTriple().isOSWindows(); 485 einfo = einfo.withCallingConv(getCallingConventionForDecl(MD, IsWindows)); 486 487 if (getContext().getLangOpts().ObjCAutoRefCount && 488 MD->hasAttr<NSReturnsRetainedAttr>()) 489 einfo = einfo.withProducesResult(true); 490 491 RequiredArgs required = 492 (MD->isVariadic() ? RequiredArgs(argTys.size()) : RequiredArgs::All); 493 494 return arrangeLLVMFunctionInfo( 495 GetReturnType(MD->getReturnType()), /*instanceMethod=*/false, 496 /*chainCall=*/false, argTys, einfo, extParamInfos, required); 497 } 498 499 const CGFunctionInfo & 500 CodeGenTypes::arrangeUnprototypedObjCMessageSend(QualType returnType, 501 const CallArgList &args) { 502 auto argTypes = getArgTypesForCall(Context, args); 503 FunctionType::ExtInfo einfo; 504 505 return arrangeLLVMFunctionInfo( 506 GetReturnType(returnType), /*instanceMethod=*/false, 507 /*chainCall=*/false, argTypes, einfo, {}, RequiredArgs::All); 508 } 509 510 const CGFunctionInfo & 511 CodeGenTypes::arrangeGlobalDeclaration(GlobalDecl GD) { 512 // FIXME: Do we need to handle ObjCMethodDecl? 513 const FunctionDecl *FD = cast<FunctionDecl>(GD.getDecl()); 514 515 if (const CXXConstructorDecl *CD = dyn_cast<CXXConstructorDecl>(FD)) 516 return arrangeCXXStructorDeclaration(CD, getFromCtorType(GD.getCtorType())); 517 518 if (const CXXDestructorDecl *DD = dyn_cast<CXXDestructorDecl>(FD)) 519 return arrangeCXXStructorDeclaration(DD, getFromDtorType(GD.getDtorType())); 520 521 return arrangeFunctionDeclaration(FD); 522 } 523 524 /// Arrange a thunk that takes 'this' as the first parameter followed by 525 /// varargs. Return a void pointer, regardless of the actual return type. 526 /// The body of the thunk will end in a musttail call to a function of the 527 /// correct type, and the caller will bitcast the function to the correct 528 /// prototype. 529 const CGFunctionInfo & 530 CodeGenTypes::arrangeUnprototypedMustTailThunk(const CXXMethodDecl *MD) { 531 assert(MD->isVirtual() && "only methods have thunks"); 532 CanQual<FunctionProtoType> FTP = GetFormalType(MD); 533 CanQualType ArgTys[] = { GetThisType(Context, MD->getParent(), MD) }; 534 return arrangeLLVMFunctionInfo(Context.VoidTy, /*instanceMethod=*/false, 535 /*chainCall=*/false, ArgTys, 536 FTP->getExtInfo(), {}, RequiredArgs(1)); 537 } 538 539 const CGFunctionInfo & 540 CodeGenTypes::arrangeMSCtorClosure(const CXXConstructorDecl *CD, 541 CXXCtorType CT) { 542 assert(CT == Ctor_CopyingClosure || CT == Ctor_DefaultClosure); 543 544 CanQual<FunctionProtoType> FTP = GetFormalType(CD); 545 SmallVector<CanQualType, 2> ArgTys; 546 const CXXRecordDecl *RD = CD->getParent(); 547 ArgTys.push_back(GetThisType(Context, RD, CD)); 548 if (CT == Ctor_CopyingClosure) 549 ArgTys.push_back(*FTP->param_type_begin()); 550 if (RD->getNumVBases() > 0) 551 ArgTys.push_back(Context.IntTy); 552 CallingConv CC = Context.getDefaultCallingConvention( 553 /*IsVariadic=*/false, /*IsCXXMethod=*/true); 554 return arrangeLLVMFunctionInfo(Context.VoidTy, /*instanceMethod=*/true, 555 /*chainCall=*/false, ArgTys, 556 FunctionType::ExtInfo(CC), {}, 557 RequiredArgs::All); 558 } 559 560 /// Arrange a call as unto a free function, except possibly with an 561 /// additional number of formal parameters considered required. 562 static const CGFunctionInfo & 563 arrangeFreeFunctionLikeCall(CodeGenTypes &CGT, 564 CodeGenModule &CGM, 565 const CallArgList &args, 566 const FunctionType *fnType, 567 unsigned numExtraRequiredArgs, 568 bool chainCall) { 569 assert(args.size() >= numExtraRequiredArgs); 570 571 llvm::SmallVector<FunctionProtoType::ExtParameterInfo, 16> paramInfos; 572 573 // In most cases, there are no optional arguments. 574 RequiredArgs required = RequiredArgs::All; 575 576 // If we have a variadic prototype, the required arguments are the 577 // extra prefix plus the arguments in the prototype. 578 if (const FunctionProtoType *proto = dyn_cast<FunctionProtoType>(fnType)) { 579 if (proto->isVariadic()) 580 required = RequiredArgs(proto->getNumParams() + numExtraRequiredArgs); 581 582 if (proto->hasExtParameterInfos()) 583 addExtParameterInfosForCall(paramInfos, proto, numExtraRequiredArgs, 584 args.size()); 585 586 // If we don't have a prototype at all, but we're supposed to 587 // explicitly use the variadic convention for unprototyped calls, 588 // treat all of the arguments as required but preserve the nominal 589 // possibility of variadics. 590 } else if (CGM.getTargetCodeGenInfo() 591 .isNoProtoCallVariadic(args, 592 cast<FunctionNoProtoType>(fnType))) { 593 required = RequiredArgs(args.size()); 594 } 595 596 // FIXME: Kill copy. 597 SmallVector<CanQualType, 16> argTypes; 598 for (const auto &arg : args) 599 argTypes.push_back(CGT.getContext().getCanonicalParamType(arg.Ty)); 600 return CGT.arrangeLLVMFunctionInfo(GetReturnType(fnType->getReturnType()), 601 /*instanceMethod=*/false, chainCall, 602 argTypes, fnType->getExtInfo(), paramInfos, 603 required); 604 } 605 606 /// Figure out the rules for calling a function with the given formal 607 /// type using the given arguments. The arguments are necessary 608 /// because the function might be unprototyped, in which case it's 609 /// target-dependent in crazy ways. 610 const CGFunctionInfo & 611 CodeGenTypes::arrangeFreeFunctionCall(const CallArgList &args, 612 const FunctionType *fnType, 613 bool chainCall) { 614 return arrangeFreeFunctionLikeCall(*this, CGM, args, fnType, 615 chainCall ? 1 : 0, chainCall); 616 } 617 618 /// A block function is essentially a free function with an 619 /// extra implicit argument. 620 const CGFunctionInfo & 621 CodeGenTypes::arrangeBlockFunctionCall(const CallArgList &args, 622 const FunctionType *fnType) { 623 return arrangeFreeFunctionLikeCall(*this, CGM, args, fnType, 1, 624 /*chainCall=*/false); 625 } 626 627 const CGFunctionInfo & 628 CodeGenTypes::arrangeBlockFunctionDeclaration(const FunctionProtoType *proto, 629 const FunctionArgList ¶ms) { 630 auto paramInfos = getExtParameterInfosForCall(proto, 1, params.size()); 631 auto argTypes = getArgTypesForDeclaration(Context, params); 632 633 return arrangeLLVMFunctionInfo(GetReturnType(proto->getReturnType()), 634 /*instanceMethod*/ false, /*chainCall*/ false, 635 argTypes, proto->getExtInfo(), paramInfos, 636 RequiredArgs::forPrototypePlus(proto, 1)); 637 } 638 639 const CGFunctionInfo & 640 CodeGenTypes::arrangeBuiltinFunctionCall(QualType resultType, 641 const CallArgList &args) { 642 // FIXME: Kill copy. 643 SmallVector<CanQualType, 16> argTypes; 644 for (const auto &Arg : args) 645 argTypes.push_back(Context.getCanonicalParamType(Arg.Ty)); 646 return arrangeLLVMFunctionInfo( 647 GetReturnType(resultType), /*instanceMethod=*/false, 648 /*chainCall=*/false, argTypes, FunctionType::ExtInfo(), 649 /*paramInfos=*/ {}, RequiredArgs::All); 650 } 651 652 const CGFunctionInfo & 653 CodeGenTypes::arrangeBuiltinFunctionDeclaration(QualType resultType, 654 const FunctionArgList &args) { 655 auto argTypes = getArgTypesForDeclaration(Context, args); 656 657 return arrangeLLVMFunctionInfo( 658 GetReturnType(resultType), /*instanceMethod=*/false, /*chainCall=*/false, 659 argTypes, FunctionType::ExtInfo(), {}, RequiredArgs::All); 660 } 661 662 const CGFunctionInfo & 663 CodeGenTypes::arrangeBuiltinFunctionDeclaration(CanQualType resultType, 664 ArrayRef<CanQualType> argTypes) { 665 return arrangeLLVMFunctionInfo( 666 resultType, /*instanceMethod=*/false, /*chainCall=*/false, 667 argTypes, FunctionType::ExtInfo(), {}, RequiredArgs::All); 668 } 669 670 /// Arrange a call to a C++ method, passing the given arguments. 671 /// 672 /// numPrefixArgs is the number of ABI-specific prefix arguments we have. It 673 /// does not count `this`. 674 const CGFunctionInfo & 675 CodeGenTypes::arrangeCXXMethodCall(const CallArgList &args, 676 const FunctionProtoType *proto, 677 RequiredArgs required, 678 unsigned numPrefixArgs) { 679 assert(numPrefixArgs + 1 <= args.size() && 680 "Emitting a call with less args than the required prefix?"); 681 // Add one to account for `this`. It's a bit awkward here, but we don't count 682 // `this` in similar places elsewhere. 683 auto paramInfos = 684 getExtParameterInfosForCall(proto, numPrefixArgs + 1, args.size()); 685 686 // FIXME: Kill copy. 687 auto argTypes = getArgTypesForCall(Context, args); 688 689 FunctionType::ExtInfo info = proto->getExtInfo(); 690 return arrangeLLVMFunctionInfo( 691 GetReturnType(proto->getReturnType()), /*instanceMethod=*/true, 692 /*chainCall=*/false, argTypes, info, paramInfos, required); 693 } 694 695 const CGFunctionInfo &CodeGenTypes::arrangeNullaryFunction() { 696 return arrangeLLVMFunctionInfo( 697 getContext().VoidTy, /*instanceMethod=*/false, /*chainCall=*/false, 698 None, FunctionType::ExtInfo(), {}, RequiredArgs::All); 699 } 700 701 const CGFunctionInfo & 702 CodeGenTypes::arrangeCall(const CGFunctionInfo &signature, 703 const CallArgList &args) { 704 assert(signature.arg_size() <= args.size()); 705 if (signature.arg_size() == args.size()) 706 return signature; 707 708 SmallVector<FunctionProtoType::ExtParameterInfo, 16> paramInfos; 709 auto sigParamInfos = signature.getExtParameterInfos(); 710 if (!sigParamInfos.empty()) { 711 paramInfos.append(sigParamInfos.begin(), sigParamInfos.end()); 712 paramInfos.resize(args.size()); 713 } 714 715 auto argTypes = getArgTypesForCall(Context, args); 716 717 assert(signature.getRequiredArgs().allowsOptionalArgs()); 718 return arrangeLLVMFunctionInfo(signature.getReturnType(), 719 signature.isInstanceMethod(), 720 signature.isChainCall(), 721 argTypes, 722 signature.getExtInfo(), 723 paramInfos, 724 signature.getRequiredArgs()); 725 } 726 727 namespace clang { 728 namespace CodeGen { 729 void computeSPIRKernelABIInfo(CodeGenModule &CGM, CGFunctionInfo &FI); 730 } 731 } 732 733 /// Arrange the argument and result information for an abstract value 734 /// of a given function type. This is the method which all of the 735 /// above functions ultimately defer to. 736 const CGFunctionInfo & 737 CodeGenTypes::arrangeLLVMFunctionInfo(CanQualType resultType, 738 bool instanceMethod, 739 bool chainCall, 740 ArrayRef<CanQualType> argTypes, 741 FunctionType::ExtInfo info, 742 ArrayRef<FunctionProtoType::ExtParameterInfo> paramInfos, 743 RequiredArgs required) { 744 assert(llvm::all_of(argTypes, 745 [](CanQualType T) { return T.isCanonicalAsParam(); })); 746 747 // Lookup or create unique function info. 748 llvm::FoldingSetNodeID ID; 749 CGFunctionInfo::Profile(ID, instanceMethod, chainCall, info, paramInfos, 750 required, resultType, argTypes); 751 752 void *insertPos = nullptr; 753 CGFunctionInfo *FI = FunctionInfos.FindNodeOrInsertPos(ID, insertPos); 754 if (FI) 755 return *FI; 756 757 unsigned CC = ClangCallConvToLLVMCallConv(info.getCC()); 758 759 // Construct the function info. We co-allocate the ArgInfos. 760 FI = CGFunctionInfo::create(CC, instanceMethod, chainCall, info, 761 paramInfos, resultType, argTypes, required); 762 FunctionInfos.InsertNode(FI, insertPos); 763 764 bool inserted = FunctionsBeingProcessed.insert(FI).second; 765 (void)inserted; 766 assert(inserted && "Recursively being processed?"); 767 768 // Compute ABI information. 769 if (CC == llvm::CallingConv::SPIR_KERNEL) { 770 // Force target independent argument handling for the host visible 771 // kernel functions. 772 computeSPIRKernelABIInfo(CGM, *FI); 773 } else if (info.getCC() == CC_Swift) { 774 swiftcall::computeABIInfo(CGM, *FI); 775 } else { 776 getABIInfo().computeInfo(*FI); 777 } 778 779 // Loop over all of the computed argument and return value info. If any of 780 // them are direct or extend without a specified coerce type, specify the 781 // default now. 782 ABIArgInfo &retInfo = FI->getReturnInfo(); 783 if (retInfo.canHaveCoerceToType() && retInfo.getCoerceToType() == nullptr) 784 retInfo.setCoerceToType(ConvertType(FI->getReturnType())); 785 786 for (auto &I : FI->arguments()) 787 if (I.info.canHaveCoerceToType() && I.info.getCoerceToType() == nullptr) 788 I.info.setCoerceToType(ConvertType(I.type)); 789 790 bool erased = FunctionsBeingProcessed.erase(FI); (void)erased; 791 assert(erased && "Not in set?"); 792 793 return *FI; 794 } 795 796 CGFunctionInfo *CGFunctionInfo::create(unsigned llvmCC, 797 bool instanceMethod, 798 bool chainCall, 799 const FunctionType::ExtInfo &info, 800 ArrayRef<ExtParameterInfo> paramInfos, 801 CanQualType resultType, 802 ArrayRef<CanQualType> argTypes, 803 RequiredArgs required) { 804 assert(paramInfos.empty() || paramInfos.size() == argTypes.size()); 805 806 void *buffer = 807 operator new(totalSizeToAlloc<ArgInfo, ExtParameterInfo>( 808 argTypes.size() + 1, paramInfos.size())); 809 810 CGFunctionInfo *FI = new(buffer) CGFunctionInfo(); 811 FI->CallingConvention = llvmCC; 812 FI->EffectiveCallingConvention = llvmCC; 813 FI->ASTCallingConvention = info.getCC(); 814 FI->InstanceMethod = instanceMethod; 815 FI->ChainCall = chainCall; 816 FI->NoReturn = info.getNoReturn(); 817 FI->ReturnsRetained = info.getProducesResult(); 818 FI->NoCallerSavedRegs = info.getNoCallerSavedRegs(); 819 FI->NoCfCheck = info.getNoCfCheck(); 820 FI->Required = required; 821 FI->HasRegParm = info.getHasRegParm(); 822 FI->RegParm = info.getRegParm(); 823 FI->ArgStruct = nullptr; 824 FI->ArgStructAlign = 0; 825 FI->NumArgs = argTypes.size(); 826 FI->HasExtParameterInfos = !paramInfos.empty(); 827 FI->getArgsBuffer()[0].type = resultType; 828 for (unsigned i = 0, e = argTypes.size(); i != e; ++i) 829 FI->getArgsBuffer()[i + 1].type = argTypes[i]; 830 for (unsigned i = 0, e = paramInfos.size(); i != e; ++i) 831 FI->getExtParameterInfosBuffer()[i] = paramInfos[i]; 832 return FI; 833 } 834 835 /***/ 836 837 namespace { 838 // ABIArgInfo::Expand implementation. 839 840 // Specifies the way QualType passed as ABIArgInfo::Expand is expanded. 841 struct TypeExpansion { 842 enum TypeExpansionKind { 843 // Elements of constant arrays are expanded recursively. 844 TEK_ConstantArray, 845 // Record fields are expanded recursively (but if record is a union, only 846 // the field with the largest size is expanded). 847 TEK_Record, 848 // For complex types, real and imaginary parts are expanded recursively. 849 TEK_Complex, 850 // All other types are not expandable. 851 TEK_None 852 }; 853 854 const TypeExpansionKind Kind; 855 856 TypeExpansion(TypeExpansionKind K) : Kind(K) {} 857 virtual ~TypeExpansion() {} 858 }; 859 860 struct ConstantArrayExpansion : TypeExpansion { 861 QualType EltTy; 862 uint64_t NumElts; 863 864 ConstantArrayExpansion(QualType EltTy, uint64_t NumElts) 865 : TypeExpansion(TEK_ConstantArray), EltTy(EltTy), NumElts(NumElts) {} 866 static bool classof(const TypeExpansion *TE) { 867 return TE->Kind == TEK_ConstantArray; 868 } 869 }; 870 871 struct RecordExpansion : TypeExpansion { 872 SmallVector<const CXXBaseSpecifier *, 1> Bases; 873 874 SmallVector<const FieldDecl *, 1> Fields; 875 876 RecordExpansion(SmallVector<const CXXBaseSpecifier *, 1> &&Bases, 877 SmallVector<const FieldDecl *, 1> &&Fields) 878 : TypeExpansion(TEK_Record), Bases(std::move(Bases)), 879 Fields(std::move(Fields)) {} 880 static bool classof(const TypeExpansion *TE) { 881 return TE->Kind == TEK_Record; 882 } 883 }; 884 885 struct ComplexExpansion : TypeExpansion { 886 QualType EltTy; 887 888 ComplexExpansion(QualType EltTy) : TypeExpansion(TEK_Complex), EltTy(EltTy) {} 889 static bool classof(const TypeExpansion *TE) { 890 return TE->Kind == TEK_Complex; 891 } 892 }; 893 894 struct NoExpansion : TypeExpansion { 895 NoExpansion() : TypeExpansion(TEK_None) {} 896 static bool classof(const TypeExpansion *TE) { 897 return TE->Kind == TEK_None; 898 } 899 }; 900 } // namespace 901 902 static std::unique_ptr<TypeExpansion> 903 getTypeExpansion(QualType Ty, const ASTContext &Context) { 904 if (const ConstantArrayType *AT = Context.getAsConstantArrayType(Ty)) { 905 return llvm::make_unique<ConstantArrayExpansion>( 906 AT->getElementType(), AT->getSize().getZExtValue()); 907 } 908 if (const RecordType *RT = Ty->getAs<RecordType>()) { 909 SmallVector<const CXXBaseSpecifier *, 1> Bases; 910 SmallVector<const FieldDecl *, 1> Fields; 911 const RecordDecl *RD = RT->getDecl(); 912 assert(!RD->hasFlexibleArrayMember() && 913 "Cannot expand structure with flexible array."); 914 if (RD->isUnion()) { 915 // Unions can be here only in degenerative cases - all the fields are same 916 // after flattening. Thus we have to use the "largest" field. 917 const FieldDecl *LargestFD = nullptr; 918 CharUnits UnionSize = CharUnits::Zero(); 919 920 for (const auto *FD : RD->fields()) { 921 if (FD->isZeroLengthBitField(Context)) 922 continue; 923 assert(!FD->isBitField() && 924 "Cannot expand structure with bit-field members."); 925 CharUnits FieldSize = Context.getTypeSizeInChars(FD->getType()); 926 if (UnionSize < FieldSize) { 927 UnionSize = FieldSize; 928 LargestFD = FD; 929 } 930 } 931 if (LargestFD) 932 Fields.push_back(LargestFD); 933 } else { 934 if (const auto *CXXRD = dyn_cast<CXXRecordDecl>(RD)) { 935 assert(!CXXRD->isDynamicClass() && 936 "cannot expand vtable pointers in dynamic classes"); 937 for (const CXXBaseSpecifier &BS : CXXRD->bases()) 938 Bases.push_back(&BS); 939 } 940 941 for (const auto *FD : RD->fields()) { 942 if (FD->isZeroLengthBitField(Context)) 943 continue; 944 assert(!FD->isBitField() && 945 "Cannot expand structure with bit-field members."); 946 Fields.push_back(FD); 947 } 948 } 949 return llvm::make_unique<RecordExpansion>(std::move(Bases), 950 std::move(Fields)); 951 } 952 if (const ComplexType *CT = Ty->getAs<ComplexType>()) { 953 return llvm::make_unique<ComplexExpansion>(CT->getElementType()); 954 } 955 return llvm::make_unique<NoExpansion>(); 956 } 957 958 static int getExpansionSize(QualType Ty, const ASTContext &Context) { 959 auto Exp = getTypeExpansion(Ty, Context); 960 if (auto CAExp = dyn_cast<ConstantArrayExpansion>(Exp.get())) { 961 return CAExp->NumElts * getExpansionSize(CAExp->EltTy, Context); 962 } 963 if (auto RExp = dyn_cast<RecordExpansion>(Exp.get())) { 964 int Res = 0; 965 for (auto BS : RExp->Bases) 966 Res += getExpansionSize(BS->getType(), Context); 967 for (auto FD : RExp->Fields) 968 Res += getExpansionSize(FD->getType(), Context); 969 return Res; 970 } 971 if (isa<ComplexExpansion>(Exp.get())) 972 return 2; 973 assert(isa<NoExpansion>(Exp.get())); 974 return 1; 975 } 976 977 void 978 CodeGenTypes::getExpandedTypes(QualType Ty, 979 SmallVectorImpl<llvm::Type *>::iterator &TI) { 980 auto Exp = getTypeExpansion(Ty, Context); 981 if (auto CAExp = dyn_cast<ConstantArrayExpansion>(Exp.get())) { 982 for (int i = 0, n = CAExp->NumElts; i < n; i++) { 983 getExpandedTypes(CAExp->EltTy, TI); 984 } 985 } else if (auto RExp = dyn_cast<RecordExpansion>(Exp.get())) { 986 for (auto BS : RExp->Bases) 987 getExpandedTypes(BS->getType(), TI); 988 for (auto FD : RExp->Fields) 989 getExpandedTypes(FD->getType(), TI); 990 } else if (auto CExp = dyn_cast<ComplexExpansion>(Exp.get())) { 991 llvm::Type *EltTy = ConvertType(CExp->EltTy); 992 *TI++ = EltTy; 993 *TI++ = EltTy; 994 } else { 995 assert(isa<NoExpansion>(Exp.get())); 996 *TI++ = ConvertType(Ty); 997 } 998 } 999 1000 static void forConstantArrayExpansion(CodeGenFunction &CGF, 1001 ConstantArrayExpansion *CAE, 1002 Address BaseAddr, 1003 llvm::function_ref<void(Address)> Fn) { 1004 CharUnits EltSize = CGF.getContext().getTypeSizeInChars(CAE->EltTy); 1005 CharUnits EltAlign = 1006 BaseAddr.getAlignment().alignmentOfArrayElement(EltSize); 1007 1008 for (int i = 0, n = CAE->NumElts; i < n; i++) { 1009 llvm::Value *EltAddr = 1010 CGF.Builder.CreateConstGEP2_32(nullptr, BaseAddr.getPointer(), 0, i); 1011 Fn(Address(EltAddr, EltAlign)); 1012 } 1013 } 1014 1015 void CodeGenFunction::ExpandTypeFromArgs( 1016 QualType Ty, LValue LV, SmallVectorImpl<llvm::Value *>::iterator &AI) { 1017 assert(LV.isSimple() && 1018 "Unexpected non-simple lvalue during struct expansion."); 1019 1020 auto Exp = getTypeExpansion(Ty, getContext()); 1021 if (auto CAExp = dyn_cast<ConstantArrayExpansion>(Exp.get())) { 1022 forConstantArrayExpansion(*this, CAExp, LV.getAddress(), 1023 [&](Address EltAddr) { 1024 LValue LV = MakeAddrLValue(EltAddr, CAExp->EltTy); 1025 ExpandTypeFromArgs(CAExp->EltTy, LV, AI); 1026 }); 1027 } else if (auto RExp = dyn_cast<RecordExpansion>(Exp.get())) { 1028 Address This = LV.getAddress(); 1029 for (const CXXBaseSpecifier *BS : RExp->Bases) { 1030 // Perform a single step derived-to-base conversion. 1031 Address Base = 1032 GetAddressOfBaseClass(This, Ty->getAsCXXRecordDecl(), &BS, &BS + 1, 1033 /*NullCheckValue=*/false, SourceLocation()); 1034 LValue SubLV = MakeAddrLValue(Base, BS->getType()); 1035 1036 // Recurse onto bases. 1037 ExpandTypeFromArgs(BS->getType(), SubLV, AI); 1038 } 1039 for (auto FD : RExp->Fields) { 1040 // FIXME: What are the right qualifiers here? 1041 LValue SubLV = EmitLValueForFieldInitialization(LV, FD); 1042 ExpandTypeFromArgs(FD->getType(), SubLV, AI); 1043 } 1044 } else if (isa<ComplexExpansion>(Exp.get())) { 1045 auto realValue = *AI++; 1046 auto imagValue = *AI++; 1047 EmitStoreOfComplex(ComplexPairTy(realValue, imagValue), LV, /*init*/ true); 1048 } else { 1049 assert(isa<NoExpansion>(Exp.get())); 1050 EmitStoreThroughLValue(RValue::get(*AI++), LV); 1051 } 1052 } 1053 1054 void CodeGenFunction::ExpandTypeToArgs( 1055 QualType Ty, CallArg Arg, llvm::FunctionType *IRFuncTy, 1056 SmallVectorImpl<llvm::Value *> &IRCallArgs, unsigned &IRCallArgPos) { 1057 auto Exp = getTypeExpansion(Ty, getContext()); 1058 if (auto CAExp = dyn_cast<ConstantArrayExpansion>(Exp.get())) { 1059 Address Addr = Arg.hasLValue() ? Arg.getKnownLValue().getAddress() 1060 : Arg.getKnownRValue().getAggregateAddress(); 1061 forConstantArrayExpansion( 1062 *this, CAExp, Addr, [&](Address EltAddr) { 1063 CallArg EltArg = CallArg( 1064 convertTempToRValue(EltAddr, CAExp->EltTy, SourceLocation()), 1065 CAExp->EltTy); 1066 ExpandTypeToArgs(CAExp->EltTy, EltArg, IRFuncTy, IRCallArgs, 1067 IRCallArgPos); 1068 }); 1069 } else if (auto RExp = dyn_cast<RecordExpansion>(Exp.get())) { 1070 Address This = Arg.hasLValue() ? Arg.getKnownLValue().getAddress() 1071 : Arg.getKnownRValue().getAggregateAddress(); 1072 for (const CXXBaseSpecifier *BS : RExp->Bases) { 1073 // Perform a single step derived-to-base conversion. 1074 Address Base = 1075 GetAddressOfBaseClass(This, Ty->getAsCXXRecordDecl(), &BS, &BS + 1, 1076 /*NullCheckValue=*/false, SourceLocation()); 1077 CallArg BaseArg = CallArg(RValue::getAggregate(Base), BS->getType()); 1078 1079 // Recurse onto bases. 1080 ExpandTypeToArgs(BS->getType(), BaseArg, IRFuncTy, IRCallArgs, 1081 IRCallArgPos); 1082 } 1083 1084 LValue LV = MakeAddrLValue(This, Ty); 1085 for (auto FD : RExp->Fields) { 1086 CallArg FldArg = 1087 CallArg(EmitRValueForField(LV, FD, SourceLocation()), FD->getType()); 1088 ExpandTypeToArgs(FD->getType(), FldArg, IRFuncTy, IRCallArgs, 1089 IRCallArgPos); 1090 } 1091 } else if (isa<ComplexExpansion>(Exp.get())) { 1092 ComplexPairTy CV = Arg.getKnownRValue().getComplexVal(); 1093 IRCallArgs[IRCallArgPos++] = CV.first; 1094 IRCallArgs[IRCallArgPos++] = CV.second; 1095 } else { 1096 assert(isa<NoExpansion>(Exp.get())); 1097 auto RV = Arg.getKnownRValue(); 1098 assert(RV.isScalar() && 1099 "Unexpected non-scalar rvalue during struct expansion."); 1100 1101 // Insert a bitcast as needed. 1102 llvm::Value *V = RV.getScalarVal(); 1103 if (IRCallArgPos < IRFuncTy->getNumParams() && 1104 V->getType() != IRFuncTy->getParamType(IRCallArgPos)) 1105 V = Builder.CreateBitCast(V, IRFuncTy->getParamType(IRCallArgPos)); 1106 1107 IRCallArgs[IRCallArgPos++] = V; 1108 } 1109 } 1110 1111 /// Create a temporary allocation for the purposes of coercion. 1112 static Address CreateTempAllocaForCoercion(CodeGenFunction &CGF, llvm::Type *Ty, 1113 CharUnits MinAlign) { 1114 // Don't use an alignment that's worse than what LLVM would prefer. 1115 auto PrefAlign = CGF.CGM.getDataLayout().getPrefTypeAlignment(Ty); 1116 CharUnits Align = std::max(MinAlign, CharUnits::fromQuantity(PrefAlign)); 1117 1118 return CGF.CreateTempAlloca(Ty, Align); 1119 } 1120 1121 /// EnterStructPointerForCoercedAccess - Given a struct pointer that we are 1122 /// accessing some number of bytes out of it, try to gep into the struct to get 1123 /// at its inner goodness. Dive as deep as possible without entering an element 1124 /// with an in-memory size smaller than DstSize. 1125 static Address 1126 EnterStructPointerForCoercedAccess(Address SrcPtr, 1127 llvm::StructType *SrcSTy, 1128 uint64_t DstSize, CodeGenFunction &CGF) { 1129 // We can't dive into a zero-element struct. 1130 if (SrcSTy->getNumElements() == 0) return SrcPtr; 1131 1132 llvm::Type *FirstElt = SrcSTy->getElementType(0); 1133 1134 // If the first elt is at least as large as what we're looking for, or if the 1135 // first element is the same size as the whole struct, we can enter it. The 1136 // comparison must be made on the store size and not the alloca size. Using 1137 // the alloca size may overstate the size of the load. 1138 uint64_t FirstEltSize = 1139 CGF.CGM.getDataLayout().getTypeStoreSize(FirstElt); 1140 if (FirstEltSize < DstSize && 1141 FirstEltSize < CGF.CGM.getDataLayout().getTypeStoreSize(SrcSTy)) 1142 return SrcPtr; 1143 1144 // GEP into the first element. 1145 SrcPtr = CGF.Builder.CreateStructGEP(SrcPtr, 0, CharUnits(), "coerce.dive"); 1146 1147 // If the first element is a struct, recurse. 1148 llvm::Type *SrcTy = SrcPtr.getElementType(); 1149 if (llvm::StructType *SrcSTy = dyn_cast<llvm::StructType>(SrcTy)) 1150 return EnterStructPointerForCoercedAccess(SrcPtr, SrcSTy, DstSize, CGF); 1151 1152 return SrcPtr; 1153 } 1154 1155 /// CoerceIntOrPtrToIntOrPtr - Convert a value Val to the specific Ty where both 1156 /// are either integers or pointers. This does a truncation of the value if it 1157 /// is too large or a zero extension if it is too small. 1158 /// 1159 /// This behaves as if the value were coerced through memory, so on big-endian 1160 /// targets the high bits are preserved in a truncation, while little-endian 1161 /// targets preserve the low bits. 1162 static llvm::Value *CoerceIntOrPtrToIntOrPtr(llvm::Value *Val, 1163 llvm::Type *Ty, 1164 CodeGenFunction &CGF) { 1165 if (Val->getType() == Ty) 1166 return Val; 1167 1168 if (isa<llvm::PointerType>(Val->getType())) { 1169 // If this is Pointer->Pointer avoid conversion to and from int. 1170 if (isa<llvm::PointerType>(Ty)) 1171 return CGF.Builder.CreateBitCast(Val, Ty, "coerce.val"); 1172 1173 // Convert the pointer to an integer so we can play with its width. 1174 Val = CGF.Builder.CreatePtrToInt(Val, CGF.IntPtrTy, "coerce.val.pi"); 1175 } 1176 1177 llvm::Type *DestIntTy = Ty; 1178 if (isa<llvm::PointerType>(DestIntTy)) 1179 DestIntTy = CGF.IntPtrTy; 1180 1181 if (Val->getType() != DestIntTy) { 1182 const llvm::DataLayout &DL = CGF.CGM.getDataLayout(); 1183 if (DL.isBigEndian()) { 1184 // Preserve the high bits on big-endian targets. 1185 // That is what memory coercion does. 1186 uint64_t SrcSize = DL.getTypeSizeInBits(Val->getType()); 1187 uint64_t DstSize = DL.getTypeSizeInBits(DestIntTy); 1188 1189 if (SrcSize > DstSize) { 1190 Val = CGF.Builder.CreateLShr(Val, SrcSize - DstSize, "coerce.highbits"); 1191 Val = CGF.Builder.CreateTrunc(Val, DestIntTy, "coerce.val.ii"); 1192 } else { 1193 Val = CGF.Builder.CreateZExt(Val, DestIntTy, "coerce.val.ii"); 1194 Val = CGF.Builder.CreateShl(Val, DstSize - SrcSize, "coerce.highbits"); 1195 } 1196 } else { 1197 // Little-endian targets preserve the low bits. No shifts required. 1198 Val = CGF.Builder.CreateIntCast(Val, DestIntTy, false, "coerce.val.ii"); 1199 } 1200 } 1201 1202 if (isa<llvm::PointerType>(Ty)) 1203 Val = CGF.Builder.CreateIntToPtr(Val, Ty, "coerce.val.ip"); 1204 return Val; 1205 } 1206 1207 1208 1209 /// CreateCoercedLoad - Create a load from \arg SrcPtr interpreted as 1210 /// a pointer to an object of type \arg Ty, known to be aligned to 1211 /// \arg SrcAlign bytes. 1212 /// 1213 /// This safely handles the case when the src type is smaller than the 1214 /// destination type; in this situation the values of bits which not 1215 /// present in the src are undefined. 1216 static llvm::Value *CreateCoercedLoad(Address Src, llvm::Type *Ty, 1217 CodeGenFunction &CGF) { 1218 llvm::Type *SrcTy = Src.getElementType(); 1219 1220 // If SrcTy and Ty are the same, just do a load. 1221 if (SrcTy == Ty) 1222 return CGF.Builder.CreateLoad(Src); 1223 1224 uint64_t DstSize = CGF.CGM.getDataLayout().getTypeAllocSize(Ty); 1225 1226 if (llvm::StructType *SrcSTy = dyn_cast<llvm::StructType>(SrcTy)) { 1227 Src = EnterStructPointerForCoercedAccess(Src, SrcSTy, DstSize, CGF); 1228 SrcTy = Src.getType()->getElementType(); 1229 } 1230 1231 uint64_t SrcSize = CGF.CGM.getDataLayout().getTypeAllocSize(SrcTy); 1232 1233 // If the source and destination are integer or pointer types, just do an 1234 // extension or truncation to the desired type. 1235 if ((isa<llvm::IntegerType>(Ty) || isa<llvm::PointerType>(Ty)) && 1236 (isa<llvm::IntegerType>(SrcTy) || isa<llvm::PointerType>(SrcTy))) { 1237 llvm::Value *Load = CGF.Builder.CreateLoad(Src); 1238 return CoerceIntOrPtrToIntOrPtr(Load, Ty, CGF); 1239 } 1240 1241 // If load is legal, just bitcast the src pointer. 1242 if (SrcSize >= DstSize) { 1243 // Generally SrcSize is never greater than DstSize, since this means we are 1244 // losing bits. However, this can happen in cases where the structure has 1245 // additional padding, for example due to a user specified alignment. 1246 // 1247 // FIXME: Assert that we aren't truncating non-padding bits when have access 1248 // to that information. 1249 Src = CGF.Builder.CreateBitCast(Src, 1250 Ty->getPointerTo(Src.getAddressSpace())); 1251 return CGF.Builder.CreateLoad(Src); 1252 } 1253 1254 // Otherwise do coercion through memory. This is stupid, but simple. 1255 Address Tmp = CreateTempAllocaForCoercion(CGF, Ty, Src.getAlignment()); 1256 Address Casted = CGF.Builder.CreateElementBitCast(Tmp,CGF.Int8Ty); 1257 Address SrcCasted = CGF.Builder.CreateElementBitCast(Src,CGF.Int8Ty); 1258 CGF.Builder.CreateMemCpy(Casted, SrcCasted, 1259 llvm::ConstantInt::get(CGF.IntPtrTy, SrcSize), 1260 false); 1261 return CGF.Builder.CreateLoad(Tmp); 1262 } 1263 1264 // Function to store a first-class aggregate into memory. We prefer to 1265 // store the elements rather than the aggregate to be more friendly to 1266 // fast-isel. 1267 // FIXME: Do we need to recurse here? 1268 static void BuildAggStore(CodeGenFunction &CGF, llvm::Value *Val, 1269 Address Dest, bool DestIsVolatile) { 1270 // Prefer scalar stores to first-class aggregate stores. 1271 if (llvm::StructType *STy = 1272 dyn_cast<llvm::StructType>(Val->getType())) { 1273 const llvm::StructLayout *Layout = 1274 CGF.CGM.getDataLayout().getStructLayout(STy); 1275 1276 for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) { 1277 auto EltOffset = CharUnits::fromQuantity(Layout->getElementOffset(i)); 1278 Address EltPtr = CGF.Builder.CreateStructGEP(Dest, i, EltOffset); 1279 llvm::Value *Elt = CGF.Builder.CreateExtractValue(Val, i); 1280 CGF.Builder.CreateStore(Elt, EltPtr, DestIsVolatile); 1281 } 1282 } else { 1283 CGF.Builder.CreateStore(Val, Dest, DestIsVolatile); 1284 } 1285 } 1286 1287 /// CreateCoercedStore - Create a store to \arg DstPtr from \arg Src, 1288 /// where the source and destination may have different types. The 1289 /// destination is known to be aligned to \arg DstAlign bytes. 1290 /// 1291 /// This safely handles the case when the src type is larger than the 1292 /// destination type; the upper bits of the src will be lost. 1293 static void CreateCoercedStore(llvm::Value *Src, 1294 Address Dst, 1295 bool DstIsVolatile, 1296 CodeGenFunction &CGF) { 1297 llvm::Type *SrcTy = Src->getType(); 1298 llvm::Type *DstTy = Dst.getType()->getElementType(); 1299 if (SrcTy == DstTy) { 1300 CGF.Builder.CreateStore(Src, Dst, DstIsVolatile); 1301 return; 1302 } 1303 1304 uint64_t SrcSize = CGF.CGM.getDataLayout().getTypeAllocSize(SrcTy); 1305 1306 if (llvm::StructType *DstSTy = dyn_cast<llvm::StructType>(DstTy)) { 1307 Dst = EnterStructPointerForCoercedAccess(Dst, DstSTy, SrcSize, CGF); 1308 DstTy = Dst.getType()->getElementType(); 1309 } 1310 1311 // If the source and destination are integer or pointer types, just do an 1312 // extension or truncation to the desired type. 1313 if ((isa<llvm::IntegerType>(SrcTy) || isa<llvm::PointerType>(SrcTy)) && 1314 (isa<llvm::IntegerType>(DstTy) || isa<llvm::PointerType>(DstTy))) { 1315 Src = CoerceIntOrPtrToIntOrPtr(Src, DstTy, CGF); 1316 CGF.Builder.CreateStore(Src, Dst, DstIsVolatile); 1317 return; 1318 } 1319 1320 uint64_t DstSize = CGF.CGM.getDataLayout().getTypeAllocSize(DstTy); 1321 1322 // If store is legal, just bitcast the src pointer. 1323 if (SrcSize <= DstSize) { 1324 Dst = CGF.Builder.CreateElementBitCast(Dst, SrcTy); 1325 BuildAggStore(CGF, Src, Dst, DstIsVolatile); 1326 } else { 1327 // Otherwise do coercion through memory. This is stupid, but 1328 // simple. 1329 1330 // Generally SrcSize is never greater than DstSize, since this means we are 1331 // losing bits. However, this can happen in cases where the structure has 1332 // additional padding, for example due to a user specified alignment. 1333 // 1334 // FIXME: Assert that we aren't truncating non-padding bits when have access 1335 // to that information. 1336 Address Tmp = CreateTempAllocaForCoercion(CGF, SrcTy, Dst.getAlignment()); 1337 CGF.Builder.CreateStore(Src, Tmp); 1338 Address Casted = CGF.Builder.CreateElementBitCast(Tmp,CGF.Int8Ty); 1339 Address DstCasted = CGF.Builder.CreateElementBitCast(Dst,CGF.Int8Ty); 1340 CGF.Builder.CreateMemCpy(DstCasted, Casted, 1341 llvm::ConstantInt::get(CGF.IntPtrTy, DstSize), 1342 false); 1343 } 1344 } 1345 1346 static Address emitAddressAtOffset(CodeGenFunction &CGF, Address addr, 1347 const ABIArgInfo &info) { 1348 if (unsigned offset = info.getDirectOffset()) { 1349 addr = CGF.Builder.CreateElementBitCast(addr, CGF.Int8Ty); 1350 addr = CGF.Builder.CreateConstInBoundsByteGEP(addr, 1351 CharUnits::fromQuantity(offset)); 1352 addr = CGF.Builder.CreateElementBitCast(addr, info.getCoerceToType()); 1353 } 1354 return addr; 1355 } 1356 1357 namespace { 1358 1359 /// Encapsulates information about the way function arguments from 1360 /// CGFunctionInfo should be passed to actual LLVM IR function. 1361 class ClangToLLVMArgMapping { 1362 static const unsigned InvalidIndex = ~0U; 1363 unsigned InallocaArgNo; 1364 unsigned SRetArgNo; 1365 unsigned TotalIRArgs; 1366 1367 /// Arguments of LLVM IR function corresponding to single Clang argument. 1368 struct IRArgs { 1369 unsigned PaddingArgIndex; 1370 // Argument is expanded to IR arguments at positions 1371 // [FirstArgIndex, FirstArgIndex + NumberOfArgs). 1372 unsigned FirstArgIndex; 1373 unsigned NumberOfArgs; 1374 1375 IRArgs() 1376 : PaddingArgIndex(InvalidIndex), FirstArgIndex(InvalidIndex), 1377 NumberOfArgs(0) {} 1378 }; 1379 1380 SmallVector<IRArgs, 8> ArgInfo; 1381 1382 public: 1383 ClangToLLVMArgMapping(const ASTContext &Context, const CGFunctionInfo &FI, 1384 bool OnlyRequiredArgs = false) 1385 : InallocaArgNo(InvalidIndex), SRetArgNo(InvalidIndex), TotalIRArgs(0), 1386 ArgInfo(OnlyRequiredArgs ? FI.getNumRequiredArgs() : FI.arg_size()) { 1387 construct(Context, FI, OnlyRequiredArgs); 1388 } 1389 1390 bool hasInallocaArg() const { return InallocaArgNo != InvalidIndex; } 1391 unsigned getInallocaArgNo() const { 1392 assert(hasInallocaArg()); 1393 return InallocaArgNo; 1394 } 1395 1396 bool hasSRetArg() const { return SRetArgNo != InvalidIndex; } 1397 unsigned getSRetArgNo() const { 1398 assert(hasSRetArg()); 1399 return SRetArgNo; 1400 } 1401 1402 unsigned totalIRArgs() const { return TotalIRArgs; } 1403 1404 bool hasPaddingArg(unsigned ArgNo) const { 1405 assert(ArgNo < ArgInfo.size()); 1406 return ArgInfo[ArgNo].PaddingArgIndex != InvalidIndex; 1407 } 1408 unsigned getPaddingArgNo(unsigned ArgNo) const { 1409 assert(hasPaddingArg(ArgNo)); 1410 return ArgInfo[ArgNo].PaddingArgIndex; 1411 } 1412 1413 /// Returns index of first IR argument corresponding to ArgNo, and their 1414 /// quantity. 1415 std::pair<unsigned, unsigned> getIRArgs(unsigned ArgNo) const { 1416 assert(ArgNo < ArgInfo.size()); 1417 return std::make_pair(ArgInfo[ArgNo].FirstArgIndex, 1418 ArgInfo[ArgNo].NumberOfArgs); 1419 } 1420 1421 private: 1422 void construct(const ASTContext &Context, const CGFunctionInfo &FI, 1423 bool OnlyRequiredArgs); 1424 }; 1425 1426 void ClangToLLVMArgMapping::construct(const ASTContext &Context, 1427 const CGFunctionInfo &FI, 1428 bool OnlyRequiredArgs) { 1429 unsigned IRArgNo = 0; 1430 bool SwapThisWithSRet = false; 1431 const ABIArgInfo &RetAI = FI.getReturnInfo(); 1432 1433 if (RetAI.getKind() == ABIArgInfo::Indirect) { 1434 SwapThisWithSRet = RetAI.isSRetAfterThis(); 1435 SRetArgNo = SwapThisWithSRet ? 1 : IRArgNo++; 1436 } 1437 1438 unsigned ArgNo = 0; 1439 unsigned NumArgs = OnlyRequiredArgs ? FI.getNumRequiredArgs() : FI.arg_size(); 1440 for (CGFunctionInfo::const_arg_iterator I = FI.arg_begin(); ArgNo < NumArgs; 1441 ++I, ++ArgNo) { 1442 assert(I != FI.arg_end()); 1443 QualType ArgType = I->type; 1444 const ABIArgInfo &AI = I->info; 1445 // Collect data about IR arguments corresponding to Clang argument ArgNo. 1446 auto &IRArgs = ArgInfo[ArgNo]; 1447 1448 if (AI.getPaddingType()) 1449 IRArgs.PaddingArgIndex = IRArgNo++; 1450 1451 switch (AI.getKind()) { 1452 case ABIArgInfo::Extend: 1453 case ABIArgInfo::Direct: { 1454 // FIXME: handle sseregparm someday... 1455 llvm::StructType *STy = dyn_cast<llvm::StructType>(AI.getCoerceToType()); 1456 if (AI.isDirect() && AI.getCanBeFlattened() && STy) { 1457 IRArgs.NumberOfArgs = STy->getNumElements(); 1458 } else { 1459 IRArgs.NumberOfArgs = 1; 1460 } 1461 break; 1462 } 1463 case ABIArgInfo::Indirect: 1464 IRArgs.NumberOfArgs = 1; 1465 break; 1466 case ABIArgInfo::Ignore: 1467 case ABIArgInfo::InAlloca: 1468 // ignore and inalloca doesn't have matching LLVM parameters. 1469 IRArgs.NumberOfArgs = 0; 1470 break; 1471 case ABIArgInfo::CoerceAndExpand: 1472 IRArgs.NumberOfArgs = AI.getCoerceAndExpandTypeSequence().size(); 1473 break; 1474 case ABIArgInfo::Expand: 1475 IRArgs.NumberOfArgs = getExpansionSize(ArgType, Context); 1476 break; 1477 } 1478 1479 if (IRArgs.NumberOfArgs > 0) { 1480 IRArgs.FirstArgIndex = IRArgNo; 1481 IRArgNo += IRArgs.NumberOfArgs; 1482 } 1483 1484 // Skip over the sret parameter when it comes second. We already handled it 1485 // above. 1486 if (IRArgNo == 1 && SwapThisWithSRet) 1487 IRArgNo++; 1488 } 1489 assert(ArgNo == ArgInfo.size()); 1490 1491 if (FI.usesInAlloca()) 1492 InallocaArgNo = IRArgNo++; 1493 1494 TotalIRArgs = IRArgNo; 1495 } 1496 } // namespace 1497 1498 /***/ 1499 1500 bool CodeGenModule::ReturnTypeUsesSRet(const CGFunctionInfo &FI) { 1501 const auto &RI = FI.getReturnInfo(); 1502 return RI.isIndirect() || (RI.isInAlloca() && RI.getInAllocaSRet()); 1503 } 1504 1505 bool CodeGenModule::ReturnSlotInterferesWithArgs(const CGFunctionInfo &FI) { 1506 return ReturnTypeUsesSRet(FI) && 1507 getTargetCodeGenInfo().doesReturnSlotInterfereWithArgs(); 1508 } 1509 1510 bool CodeGenModule::ReturnTypeUsesFPRet(QualType ResultType) { 1511 if (const BuiltinType *BT = ResultType->getAs<BuiltinType>()) { 1512 switch (BT->getKind()) { 1513 default: 1514 return false; 1515 case BuiltinType::Float: 1516 return getTarget().useObjCFPRetForRealType(TargetInfo::Float); 1517 case BuiltinType::Double: 1518 return getTarget().useObjCFPRetForRealType(TargetInfo::Double); 1519 case BuiltinType::LongDouble: 1520 return getTarget().useObjCFPRetForRealType(TargetInfo::LongDouble); 1521 } 1522 } 1523 1524 return false; 1525 } 1526 1527 bool CodeGenModule::ReturnTypeUsesFP2Ret(QualType ResultType) { 1528 if (const ComplexType *CT = ResultType->getAs<ComplexType>()) { 1529 if (const BuiltinType *BT = CT->getElementType()->getAs<BuiltinType>()) { 1530 if (BT->getKind() == BuiltinType::LongDouble) 1531 return getTarget().useObjCFP2RetForComplexLongDouble(); 1532 } 1533 } 1534 1535 return false; 1536 } 1537 1538 llvm::FunctionType *CodeGenTypes::GetFunctionType(GlobalDecl GD) { 1539 const CGFunctionInfo &FI = arrangeGlobalDeclaration(GD); 1540 return GetFunctionType(FI); 1541 } 1542 1543 llvm::FunctionType * 1544 CodeGenTypes::GetFunctionType(const CGFunctionInfo &FI) { 1545 1546 bool Inserted = FunctionsBeingProcessed.insert(&FI).second; 1547 (void)Inserted; 1548 assert(Inserted && "Recursively being processed?"); 1549 1550 llvm::Type *resultType = nullptr; 1551 const ABIArgInfo &retAI = FI.getReturnInfo(); 1552 switch (retAI.getKind()) { 1553 case ABIArgInfo::Expand: 1554 llvm_unreachable("Invalid ABI kind for return argument"); 1555 1556 case ABIArgInfo::Extend: 1557 case ABIArgInfo::Direct: 1558 resultType = retAI.getCoerceToType(); 1559 break; 1560 1561 case ABIArgInfo::InAlloca: 1562 if (retAI.getInAllocaSRet()) { 1563 // sret things on win32 aren't void, they return the sret pointer. 1564 QualType ret = FI.getReturnType(); 1565 llvm::Type *ty = ConvertType(ret); 1566 unsigned addressSpace = Context.getTargetAddressSpace(ret); 1567 resultType = llvm::PointerType::get(ty, addressSpace); 1568 } else { 1569 resultType = llvm::Type::getVoidTy(getLLVMContext()); 1570 } 1571 break; 1572 1573 case ABIArgInfo::Indirect: 1574 case ABIArgInfo::Ignore: 1575 resultType = llvm::Type::getVoidTy(getLLVMContext()); 1576 break; 1577 1578 case ABIArgInfo::CoerceAndExpand: 1579 resultType = retAI.getUnpaddedCoerceAndExpandType(); 1580 break; 1581 } 1582 1583 ClangToLLVMArgMapping IRFunctionArgs(getContext(), FI, true); 1584 SmallVector<llvm::Type*, 8> ArgTypes(IRFunctionArgs.totalIRArgs()); 1585 1586 // Add type for sret argument. 1587 if (IRFunctionArgs.hasSRetArg()) { 1588 QualType Ret = FI.getReturnType(); 1589 llvm::Type *Ty = ConvertType(Ret); 1590 unsigned AddressSpace = Context.getTargetAddressSpace(Ret); 1591 ArgTypes[IRFunctionArgs.getSRetArgNo()] = 1592 llvm::PointerType::get(Ty, AddressSpace); 1593 } 1594 1595 // Add type for inalloca argument. 1596 if (IRFunctionArgs.hasInallocaArg()) { 1597 auto ArgStruct = FI.getArgStruct(); 1598 assert(ArgStruct); 1599 ArgTypes[IRFunctionArgs.getInallocaArgNo()] = ArgStruct->getPointerTo(); 1600 } 1601 1602 // Add in all of the required arguments. 1603 unsigned ArgNo = 0; 1604 CGFunctionInfo::const_arg_iterator it = FI.arg_begin(), 1605 ie = it + FI.getNumRequiredArgs(); 1606 for (; it != ie; ++it, ++ArgNo) { 1607 const ABIArgInfo &ArgInfo = it->info; 1608 1609 // Insert a padding type to ensure proper alignment. 1610 if (IRFunctionArgs.hasPaddingArg(ArgNo)) 1611 ArgTypes[IRFunctionArgs.getPaddingArgNo(ArgNo)] = 1612 ArgInfo.getPaddingType(); 1613 1614 unsigned FirstIRArg, NumIRArgs; 1615 std::tie(FirstIRArg, NumIRArgs) = IRFunctionArgs.getIRArgs(ArgNo); 1616 1617 switch (ArgInfo.getKind()) { 1618 case ABIArgInfo::Ignore: 1619 case ABIArgInfo::InAlloca: 1620 assert(NumIRArgs == 0); 1621 break; 1622 1623 case ABIArgInfo::Indirect: { 1624 assert(NumIRArgs == 1); 1625 // indirect arguments are always on the stack, which is alloca addr space. 1626 llvm::Type *LTy = ConvertTypeForMem(it->type); 1627 ArgTypes[FirstIRArg] = LTy->getPointerTo( 1628 CGM.getDataLayout().getAllocaAddrSpace()); 1629 break; 1630 } 1631 1632 case ABIArgInfo::Extend: 1633 case ABIArgInfo::Direct: { 1634 // Fast-isel and the optimizer generally like scalar values better than 1635 // FCAs, so we flatten them if this is safe to do for this argument. 1636 llvm::Type *argType = ArgInfo.getCoerceToType(); 1637 llvm::StructType *st = dyn_cast<llvm::StructType>(argType); 1638 if (st && ArgInfo.isDirect() && ArgInfo.getCanBeFlattened()) { 1639 assert(NumIRArgs == st->getNumElements()); 1640 for (unsigned i = 0, e = st->getNumElements(); i != e; ++i) 1641 ArgTypes[FirstIRArg + i] = st->getElementType(i); 1642 } else { 1643 assert(NumIRArgs == 1); 1644 ArgTypes[FirstIRArg] = argType; 1645 } 1646 break; 1647 } 1648 1649 case ABIArgInfo::CoerceAndExpand: { 1650 auto ArgTypesIter = ArgTypes.begin() + FirstIRArg; 1651 for (auto EltTy : ArgInfo.getCoerceAndExpandTypeSequence()) { 1652 *ArgTypesIter++ = EltTy; 1653 } 1654 assert(ArgTypesIter == ArgTypes.begin() + FirstIRArg + NumIRArgs); 1655 break; 1656 } 1657 1658 case ABIArgInfo::Expand: 1659 auto ArgTypesIter = ArgTypes.begin() + FirstIRArg; 1660 getExpandedTypes(it->type, ArgTypesIter); 1661 assert(ArgTypesIter == ArgTypes.begin() + FirstIRArg + NumIRArgs); 1662 break; 1663 } 1664 } 1665 1666 bool Erased = FunctionsBeingProcessed.erase(&FI); (void)Erased; 1667 assert(Erased && "Not in set?"); 1668 1669 return llvm::FunctionType::get(resultType, ArgTypes, FI.isVariadic()); 1670 } 1671 1672 llvm::Type *CodeGenTypes::GetFunctionTypeForVTable(GlobalDecl GD) { 1673 const CXXMethodDecl *MD = cast<CXXMethodDecl>(GD.getDecl()); 1674 const FunctionProtoType *FPT = MD->getType()->getAs<FunctionProtoType>(); 1675 1676 if (!isFuncTypeConvertible(FPT)) 1677 return llvm::StructType::get(getLLVMContext()); 1678 1679 const CGFunctionInfo *Info; 1680 if (isa<CXXDestructorDecl>(MD)) 1681 Info = 1682 &arrangeCXXStructorDeclaration(MD, getFromDtorType(GD.getDtorType())); 1683 else 1684 Info = &arrangeCXXMethodDeclaration(MD); 1685 return GetFunctionType(*Info); 1686 } 1687 1688 static void AddAttributesFromFunctionProtoType(ASTContext &Ctx, 1689 llvm::AttrBuilder &FuncAttrs, 1690 const FunctionProtoType *FPT) { 1691 if (!FPT) 1692 return; 1693 1694 if (!isUnresolvedExceptionSpec(FPT->getExceptionSpecType()) && 1695 FPT->isNothrow()) 1696 FuncAttrs.addAttribute(llvm::Attribute::NoUnwind); 1697 } 1698 1699 void CodeGenModule::ConstructDefaultFnAttrList(StringRef Name, bool HasOptnone, 1700 bool AttrOnCallSite, 1701 llvm::AttrBuilder &FuncAttrs) { 1702 // OptimizeNoneAttr takes precedence over -Os or -Oz. No warning needed. 1703 if (!HasOptnone) { 1704 if (CodeGenOpts.OptimizeSize) 1705 FuncAttrs.addAttribute(llvm::Attribute::OptimizeForSize); 1706 if (CodeGenOpts.OptimizeSize == 2) 1707 FuncAttrs.addAttribute(llvm::Attribute::MinSize); 1708 } 1709 1710 if (CodeGenOpts.DisableRedZone) 1711 FuncAttrs.addAttribute(llvm::Attribute::NoRedZone); 1712 if (CodeGenOpts.IndirectTlsSegRefs) 1713 FuncAttrs.addAttribute("indirect-tls-seg-refs"); 1714 if (CodeGenOpts.NoImplicitFloat) 1715 FuncAttrs.addAttribute(llvm::Attribute::NoImplicitFloat); 1716 1717 if (AttrOnCallSite) { 1718 // Attributes that should go on the call site only. 1719 if (!CodeGenOpts.SimplifyLibCalls || 1720 CodeGenOpts.isNoBuiltinFunc(Name.data())) 1721 FuncAttrs.addAttribute(llvm::Attribute::NoBuiltin); 1722 if (!CodeGenOpts.TrapFuncName.empty()) 1723 FuncAttrs.addAttribute("trap-func-name", CodeGenOpts.TrapFuncName); 1724 } else { 1725 // Attributes that should go on the function, but not the call site. 1726 if (!CodeGenOpts.DisableFPElim) { 1727 FuncAttrs.addAttribute("no-frame-pointer-elim", "false"); 1728 } else if (CodeGenOpts.OmitLeafFramePointer) { 1729 FuncAttrs.addAttribute("no-frame-pointer-elim", "false"); 1730 FuncAttrs.addAttribute("no-frame-pointer-elim-non-leaf"); 1731 } else { 1732 FuncAttrs.addAttribute("no-frame-pointer-elim", "true"); 1733 FuncAttrs.addAttribute("no-frame-pointer-elim-non-leaf"); 1734 } 1735 1736 FuncAttrs.addAttribute("less-precise-fpmad", 1737 llvm::toStringRef(CodeGenOpts.LessPreciseFPMAD)); 1738 1739 if (CodeGenOpts.NullPointerIsValid) 1740 FuncAttrs.addAttribute("null-pointer-is-valid", "true"); 1741 if (!CodeGenOpts.FPDenormalMode.empty()) 1742 FuncAttrs.addAttribute("denormal-fp-math", CodeGenOpts.FPDenormalMode); 1743 1744 FuncAttrs.addAttribute("no-trapping-math", 1745 llvm::toStringRef(CodeGenOpts.NoTrappingMath)); 1746 1747 // Strict (compliant) code is the default, so only add this attribute to 1748 // indicate that we are trying to workaround a problem case. 1749 if (!CodeGenOpts.StrictFloatCastOverflow) 1750 FuncAttrs.addAttribute("strict-float-cast-overflow", "false"); 1751 1752 // TODO: Are these all needed? 1753 // unsafe/inf/nan/nsz are handled by instruction-level FastMathFlags. 1754 FuncAttrs.addAttribute("no-infs-fp-math", 1755 llvm::toStringRef(CodeGenOpts.NoInfsFPMath)); 1756 FuncAttrs.addAttribute("no-nans-fp-math", 1757 llvm::toStringRef(CodeGenOpts.NoNaNsFPMath)); 1758 FuncAttrs.addAttribute("unsafe-fp-math", 1759 llvm::toStringRef(CodeGenOpts.UnsafeFPMath)); 1760 FuncAttrs.addAttribute("use-soft-float", 1761 llvm::toStringRef(CodeGenOpts.SoftFloat)); 1762 FuncAttrs.addAttribute("stack-protector-buffer-size", 1763 llvm::utostr(CodeGenOpts.SSPBufferSize)); 1764 FuncAttrs.addAttribute("no-signed-zeros-fp-math", 1765 llvm::toStringRef(CodeGenOpts.NoSignedZeros)); 1766 FuncAttrs.addAttribute( 1767 "correctly-rounded-divide-sqrt-fp-math", 1768 llvm::toStringRef(CodeGenOpts.CorrectlyRoundedDivSqrt)); 1769 1770 if (getLangOpts().OpenCL) 1771 FuncAttrs.addAttribute("denorms-are-zero", 1772 llvm::toStringRef(CodeGenOpts.FlushDenorm)); 1773 1774 // TODO: Reciprocal estimate codegen options should apply to instructions? 1775 const std::vector<std::string> &Recips = CodeGenOpts.Reciprocals; 1776 if (!Recips.empty()) 1777 FuncAttrs.addAttribute("reciprocal-estimates", 1778 llvm::join(Recips, ",")); 1779 1780 if (!CodeGenOpts.PreferVectorWidth.empty() && 1781 CodeGenOpts.PreferVectorWidth != "none") 1782 FuncAttrs.addAttribute("prefer-vector-width", 1783 CodeGenOpts.PreferVectorWidth); 1784 1785 if (CodeGenOpts.StackRealignment) 1786 FuncAttrs.addAttribute("stackrealign"); 1787 if (CodeGenOpts.Backchain) 1788 FuncAttrs.addAttribute("backchain"); 1789 1790 if (CodeGenOpts.SpeculativeLoadHardening) 1791 FuncAttrs.addAttribute(llvm::Attribute::SpeculativeLoadHardening); 1792 } 1793 1794 if (getLangOpts().assumeFunctionsAreConvergent()) { 1795 // Conservatively, mark all functions and calls in CUDA and OpenCL as 1796 // convergent (meaning, they may call an intrinsically convergent op, such 1797 // as __syncthreads() / barrier(), and so can't have certain optimizations 1798 // applied around them). LLVM will remove this attribute where it safely 1799 // can. 1800 FuncAttrs.addAttribute(llvm::Attribute::Convergent); 1801 } 1802 1803 if (getLangOpts().CUDA && getLangOpts().CUDAIsDevice) { 1804 // Exceptions aren't supported in CUDA device code. 1805 FuncAttrs.addAttribute(llvm::Attribute::NoUnwind); 1806 1807 // Respect -fcuda-flush-denormals-to-zero. 1808 if (CodeGenOpts.FlushDenorm) 1809 FuncAttrs.addAttribute("nvptx-f32ftz", "true"); 1810 } 1811 1812 for (StringRef Attr : CodeGenOpts.DefaultFunctionAttrs) { 1813 StringRef Var, Value; 1814 std::tie(Var, Value) = Attr.split('='); 1815 FuncAttrs.addAttribute(Var, Value); 1816 } 1817 } 1818 1819 void CodeGenModule::AddDefaultFnAttrs(llvm::Function &F) { 1820 llvm::AttrBuilder FuncAttrs; 1821 ConstructDefaultFnAttrList(F.getName(), 1822 F.hasFnAttribute(llvm::Attribute::OptimizeNone), 1823 /* AttrOnCallsite = */ false, FuncAttrs); 1824 F.addAttributes(llvm::AttributeList::FunctionIndex, FuncAttrs); 1825 } 1826 1827 void CodeGenModule::ConstructAttributeList( 1828 StringRef Name, const CGFunctionInfo &FI, CGCalleeInfo CalleeInfo, 1829 llvm::AttributeList &AttrList, unsigned &CallingConv, bool AttrOnCallSite) { 1830 llvm::AttrBuilder FuncAttrs; 1831 llvm::AttrBuilder RetAttrs; 1832 1833 CallingConv = FI.getEffectiveCallingConvention(); 1834 if (FI.isNoReturn()) 1835 FuncAttrs.addAttribute(llvm::Attribute::NoReturn); 1836 1837 // If we have information about the function prototype, we can learn 1838 // attributes from there. 1839 AddAttributesFromFunctionProtoType(getContext(), FuncAttrs, 1840 CalleeInfo.getCalleeFunctionProtoType()); 1841 1842 const Decl *TargetDecl = CalleeInfo.getCalleeDecl().getDecl(); 1843 1844 bool HasOptnone = false; 1845 // FIXME: handle sseregparm someday... 1846 if (TargetDecl) { 1847 if (TargetDecl->hasAttr<ReturnsTwiceAttr>()) 1848 FuncAttrs.addAttribute(llvm::Attribute::ReturnsTwice); 1849 if (TargetDecl->hasAttr<NoThrowAttr>()) 1850 FuncAttrs.addAttribute(llvm::Attribute::NoUnwind); 1851 if (TargetDecl->hasAttr<NoReturnAttr>()) 1852 FuncAttrs.addAttribute(llvm::Attribute::NoReturn); 1853 if (TargetDecl->hasAttr<ColdAttr>()) 1854 FuncAttrs.addAttribute(llvm::Attribute::Cold); 1855 if (TargetDecl->hasAttr<NoDuplicateAttr>()) 1856 FuncAttrs.addAttribute(llvm::Attribute::NoDuplicate); 1857 if (TargetDecl->hasAttr<ConvergentAttr>()) 1858 FuncAttrs.addAttribute(llvm::Attribute::Convergent); 1859 1860 if (const FunctionDecl *Fn = dyn_cast<FunctionDecl>(TargetDecl)) { 1861 AddAttributesFromFunctionProtoType( 1862 getContext(), FuncAttrs, Fn->getType()->getAs<FunctionProtoType>()); 1863 // Don't use [[noreturn]] or _Noreturn for a call to a virtual function. 1864 // These attributes are not inherited by overloads. 1865 const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(Fn); 1866 if (Fn->isNoReturn() && !(AttrOnCallSite && MD && MD->isVirtual())) 1867 FuncAttrs.addAttribute(llvm::Attribute::NoReturn); 1868 } 1869 1870 // 'const', 'pure' and 'noalias' attributed functions are also nounwind. 1871 if (TargetDecl->hasAttr<ConstAttr>()) { 1872 FuncAttrs.addAttribute(llvm::Attribute::ReadNone); 1873 FuncAttrs.addAttribute(llvm::Attribute::NoUnwind); 1874 } else if (TargetDecl->hasAttr<PureAttr>()) { 1875 FuncAttrs.addAttribute(llvm::Attribute::ReadOnly); 1876 FuncAttrs.addAttribute(llvm::Attribute::NoUnwind); 1877 } else if (TargetDecl->hasAttr<NoAliasAttr>()) { 1878 FuncAttrs.addAttribute(llvm::Attribute::ArgMemOnly); 1879 FuncAttrs.addAttribute(llvm::Attribute::NoUnwind); 1880 } 1881 if (TargetDecl->hasAttr<RestrictAttr>()) 1882 RetAttrs.addAttribute(llvm::Attribute::NoAlias); 1883 if (TargetDecl->hasAttr<ReturnsNonNullAttr>() && 1884 !CodeGenOpts.NullPointerIsValid) 1885 RetAttrs.addAttribute(llvm::Attribute::NonNull); 1886 if (TargetDecl->hasAttr<AnyX86NoCallerSavedRegistersAttr>()) 1887 FuncAttrs.addAttribute("no_caller_saved_registers"); 1888 if (TargetDecl->hasAttr<AnyX86NoCfCheckAttr>()) 1889 FuncAttrs.addAttribute(llvm::Attribute::NoCfCheck); 1890 1891 HasOptnone = TargetDecl->hasAttr<OptimizeNoneAttr>(); 1892 if (auto *AllocSize = TargetDecl->getAttr<AllocSizeAttr>()) { 1893 Optional<unsigned> NumElemsParam; 1894 if (AllocSize->getNumElemsParam().isValid()) 1895 NumElemsParam = AllocSize->getNumElemsParam().getLLVMIndex(); 1896 FuncAttrs.addAllocSizeAttr(AllocSize->getElemSizeParam().getLLVMIndex(), 1897 NumElemsParam); 1898 } 1899 } 1900 1901 ConstructDefaultFnAttrList(Name, HasOptnone, AttrOnCallSite, FuncAttrs); 1902 1903 // This must run after constructing the default function attribute list 1904 // to ensure that the speculative load hardening attribute is removed 1905 // in the case where the -mspeculative-load-hardening flag was passed. 1906 if (TargetDecl) { 1907 if (TargetDecl->hasAttr<NoSpeculativeLoadHardeningAttr>()) 1908 FuncAttrs.removeAttribute(llvm::Attribute::SpeculativeLoadHardening); 1909 if (TargetDecl->hasAttr<SpeculativeLoadHardeningAttr>()) 1910 FuncAttrs.addAttribute(llvm::Attribute::SpeculativeLoadHardening); 1911 } 1912 1913 if (CodeGenOpts.EnableSegmentedStacks && 1914 !(TargetDecl && TargetDecl->hasAttr<NoSplitStackAttr>())) 1915 FuncAttrs.addAttribute("split-stack"); 1916 1917 // Add NonLazyBind attribute to function declarations when -fno-plt 1918 // is used. 1919 if (TargetDecl && CodeGenOpts.NoPLT) { 1920 if (auto *Fn = dyn_cast<FunctionDecl>(TargetDecl)) { 1921 if (!Fn->isDefined() && !AttrOnCallSite) { 1922 FuncAttrs.addAttribute(llvm::Attribute::NonLazyBind); 1923 } 1924 } 1925 } 1926 1927 if (TargetDecl && TargetDecl->hasAttr<OpenCLKernelAttr>()) { 1928 if (getLangOpts().OpenCLVersion <= 120) { 1929 // OpenCL v1.2 Work groups are always uniform 1930 FuncAttrs.addAttribute("uniform-work-group-size", "true"); 1931 } else { 1932 // OpenCL v2.0 Work groups may be whether uniform or not. 1933 // '-cl-uniform-work-group-size' compile option gets a hint 1934 // to the compiler that the global work-size be a multiple of 1935 // the work-group size specified to clEnqueueNDRangeKernel 1936 // (i.e. work groups are uniform). 1937 FuncAttrs.addAttribute("uniform-work-group-size", 1938 llvm::toStringRef(CodeGenOpts.UniformWGSize)); 1939 } 1940 } 1941 1942 if (!AttrOnCallSite) { 1943 bool DisableTailCalls = false; 1944 1945 if (CodeGenOpts.DisableTailCalls) 1946 DisableTailCalls = true; 1947 else if (TargetDecl) { 1948 if (TargetDecl->hasAttr<DisableTailCallsAttr>() || 1949 TargetDecl->hasAttr<AnyX86InterruptAttr>()) 1950 DisableTailCalls = true; 1951 else if (CodeGenOpts.NoEscapingBlockTailCalls) { 1952 if (const auto *BD = dyn_cast<BlockDecl>(TargetDecl)) 1953 if (!BD->doesNotEscape()) 1954 DisableTailCalls = true; 1955 } 1956 } 1957 1958 FuncAttrs.addAttribute("disable-tail-calls", 1959 llvm::toStringRef(DisableTailCalls)); 1960 GetCPUAndFeaturesAttributes(CalleeInfo.getCalleeDecl(), FuncAttrs); 1961 } 1962 1963 ClangToLLVMArgMapping IRFunctionArgs(getContext(), FI); 1964 1965 QualType RetTy = FI.getReturnType(); 1966 const ABIArgInfo &RetAI = FI.getReturnInfo(); 1967 switch (RetAI.getKind()) { 1968 case ABIArgInfo::Extend: 1969 if (RetAI.isSignExt()) 1970 RetAttrs.addAttribute(llvm::Attribute::SExt); 1971 else 1972 RetAttrs.addAttribute(llvm::Attribute::ZExt); 1973 LLVM_FALLTHROUGH; 1974 case ABIArgInfo::Direct: 1975 if (RetAI.getInReg()) 1976 RetAttrs.addAttribute(llvm::Attribute::InReg); 1977 break; 1978 case ABIArgInfo::Ignore: 1979 break; 1980 1981 case ABIArgInfo::InAlloca: 1982 case ABIArgInfo::Indirect: { 1983 // inalloca and sret disable readnone and readonly 1984 FuncAttrs.removeAttribute(llvm::Attribute::ReadOnly) 1985 .removeAttribute(llvm::Attribute::ReadNone); 1986 break; 1987 } 1988 1989 case ABIArgInfo::CoerceAndExpand: 1990 break; 1991 1992 case ABIArgInfo::Expand: 1993 llvm_unreachable("Invalid ABI kind for return argument"); 1994 } 1995 1996 if (const auto *RefTy = RetTy->getAs<ReferenceType>()) { 1997 QualType PTy = RefTy->getPointeeType(); 1998 if (!PTy->isIncompleteType() && PTy->isConstantSizeType()) 1999 RetAttrs.addDereferenceableAttr(getContext().getTypeSizeInChars(PTy) 2000 .getQuantity()); 2001 else if (getContext().getTargetAddressSpace(PTy) == 0 && 2002 !CodeGenOpts.NullPointerIsValid) 2003 RetAttrs.addAttribute(llvm::Attribute::NonNull); 2004 } 2005 2006 bool hasUsedSRet = false; 2007 SmallVector<llvm::AttributeSet, 4> ArgAttrs(IRFunctionArgs.totalIRArgs()); 2008 2009 // Attach attributes to sret. 2010 if (IRFunctionArgs.hasSRetArg()) { 2011 llvm::AttrBuilder SRETAttrs; 2012 if (!RetAI.getSuppressSRet()) 2013 SRETAttrs.addAttribute(llvm::Attribute::StructRet); 2014 hasUsedSRet = true; 2015 if (RetAI.getInReg()) 2016 SRETAttrs.addAttribute(llvm::Attribute::InReg); 2017 ArgAttrs[IRFunctionArgs.getSRetArgNo()] = 2018 llvm::AttributeSet::get(getLLVMContext(), SRETAttrs); 2019 } 2020 2021 // Attach attributes to inalloca argument. 2022 if (IRFunctionArgs.hasInallocaArg()) { 2023 llvm::AttrBuilder Attrs; 2024 Attrs.addAttribute(llvm::Attribute::InAlloca); 2025 ArgAttrs[IRFunctionArgs.getInallocaArgNo()] = 2026 llvm::AttributeSet::get(getLLVMContext(), Attrs); 2027 } 2028 2029 unsigned ArgNo = 0; 2030 for (CGFunctionInfo::const_arg_iterator I = FI.arg_begin(), 2031 E = FI.arg_end(); 2032 I != E; ++I, ++ArgNo) { 2033 QualType ParamType = I->type; 2034 const ABIArgInfo &AI = I->info; 2035 llvm::AttrBuilder Attrs; 2036 2037 // Add attribute for padding argument, if necessary. 2038 if (IRFunctionArgs.hasPaddingArg(ArgNo)) { 2039 if (AI.getPaddingInReg()) { 2040 ArgAttrs[IRFunctionArgs.getPaddingArgNo(ArgNo)] = 2041 llvm::AttributeSet::get( 2042 getLLVMContext(), 2043 llvm::AttrBuilder().addAttribute(llvm::Attribute::InReg)); 2044 } 2045 } 2046 2047 // 'restrict' -> 'noalias' is done in EmitFunctionProlog when we 2048 // have the corresponding parameter variable. It doesn't make 2049 // sense to do it here because parameters are so messed up. 2050 switch (AI.getKind()) { 2051 case ABIArgInfo::Extend: 2052 if (AI.isSignExt()) 2053 Attrs.addAttribute(llvm::Attribute::SExt); 2054 else 2055 Attrs.addAttribute(llvm::Attribute::ZExt); 2056 LLVM_FALLTHROUGH; 2057 case ABIArgInfo::Direct: 2058 if (ArgNo == 0 && FI.isChainCall()) 2059 Attrs.addAttribute(llvm::Attribute::Nest); 2060 else if (AI.getInReg()) 2061 Attrs.addAttribute(llvm::Attribute::InReg); 2062 break; 2063 2064 case ABIArgInfo::Indirect: { 2065 if (AI.getInReg()) 2066 Attrs.addAttribute(llvm::Attribute::InReg); 2067 2068 if (AI.getIndirectByVal()) 2069 Attrs.addAttribute(llvm::Attribute::ByVal); 2070 2071 CharUnits Align = AI.getIndirectAlign(); 2072 2073 // In a byval argument, it is important that the required 2074 // alignment of the type is honored, as LLVM might be creating a 2075 // *new* stack object, and needs to know what alignment to give 2076 // it. (Sometimes it can deduce a sensible alignment on its own, 2077 // but not if clang decides it must emit a packed struct, or the 2078 // user specifies increased alignment requirements.) 2079 // 2080 // This is different from indirect *not* byval, where the object 2081 // exists already, and the align attribute is purely 2082 // informative. 2083 assert(!Align.isZero()); 2084 2085 // For now, only add this when we have a byval argument. 2086 // TODO: be less lazy about updating test cases. 2087 if (AI.getIndirectByVal()) 2088 Attrs.addAlignmentAttr(Align.getQuantity()); 2089 2090 // byval disables readnone and readonly. 2091 FuncAttrs.removeAttribute(llvm::Attribute::ReadOnly) 2092 .removeAttribute(llvm::Attribute::ReadNone); 2093 break; 2094 } 2095 case ABIArgInfo::Ignore: 2096 case ABIArgInfo::Expand: 2097 case ABIArgInfo::CoerceAndExpand: 2098 break; 2099 2100 case ABIArgInfo::InAlloca: 2101 // inalloca disables readnone and readonly. 2102 FuncAttrs.removeAttribute(llvm::Attribute::ReadOnly) 2103 .removeAttribute(llvm::Attribute::ReadNone); 2104 continue; 2105 } 2106 2107 if (const auto *RefTy = ParamType->getAs<ReferenceType>()) { 2108 QualType PTy = RefTy->getPointeeType(); 2109 if (!PTy->isIncompleteType() && PTy->isConstantSizeType()) 2110 Attrs.addDereferenceableAttr(getContext().getTypeSizeInChars(PTy) 2111 .getQuantity()); 2112 else if (getContext().getTargetAddressSpace(PTy) == 0 && 2113 !CodeGenOpts.NullPointerIsValid) 2114 Attrs.addAttribute(llvm::Attribute::NonNull); 2115 } 2116 2117 switch (FI.getExtParameterInfo(ArgNo).getABI()) { 2118 case ParameterABI::Ordinary: 2119 break; 2120 2121 case ParameterABI::SwiftIndirectResult: { 2122 // Add 'sret' if we haven't already used it for something, but 2123 // only if the result is void. 2124 if (!hasUsedSRet && RetTy->isVoidType()) { 2125 Attrs.addAttribute(llvm::Attribute::StructRet); 2126 hasUsedSRet = true; 2127 } 2128 2129 // Add 'noalias' in either case. 2130 Attrs.addAttribute(llvm::Attribute::NoAlias); 2131 2132 // Add 'dereferenceable' and 'alignment'. 2133 auto PTy = ParamType->getPointeeType(); 2134 if (!PTy->isIncompleteType() && PTy->isConstantSizeType()) { 2135 auto info = getContext().getTypeInfoInChars(PTy); 2136 Attrs.addDereferenceableAttr(info.first.getQuantity()); 2137 Attrs.addAttribute(llvm::Attribute::getWithAlignment(getLLVMContext(), 2138 info.second.getQuantity())); 2139 } 2140 break; 2141 } 2142 2143 case ParameterABI::SwiftErrorResult: 2144 Attrs.addAttribute(llvm::Attribute::SwiftError); 2145 break; 2146 2147 case ParameterABI::SwiftContext: 2148 Attrs.addAttribute(llvm::Attribute::SwiftSelf); 2149 break; 2150 } 2151 2152 if (FI.getExtParameterInfo(ArgNo).isNoEscape()) 2153 Attrs.addAttribute(llvm::Attribute::NoCapture); 2154 2155 if (Attrs.hasAttributes()) { 2156 unsigned FirstIRArg, NumIRArgs; 2157 std::tie(FirstIRArg, NumIRArgs) = IRFunctionArgs.getIRArgs(ArgNo); 2158 for (unsigned i = 0; i < NumIRArgs; i++) 2159 ArgAttrs[FirstIRArg + i] = 2160 llvm::AttributeSet::get(getLLVMContext(), Attrs); 2161 } 2162 } 2163 assert(ArgNo == FI.arg_size()); 2164 2165 AttrList = llvm::AttributeList::get( 2166 getLLVMContext(), llvm::AttributeSet::get(getLLVMContext(), FuncAttrs), 2167 llvm::AttributeSet::get(getLLVMContext(), RetAttrs), ArgAttrs); 2168 } 2169 2170 /// An argument came in as a promoted argument; demote it back to its 2171 /// declared type. 2172 static llvm::Value *emitArgumentDemotion(CodeGenFunction &CGF, 2173 const VarDecl *var, 2174 llvm::Value *value) { 2175 llvm::Type *varType = CGF.ConvertType(var->getType()); 2176 2177 // This can happen with promotions that actually don't change the 2178 // underlying type, like the enum promotions. 2179 if (value->getType() == varType) return value; 2180 2181 assert((varType->isIntegerTy() || varType->isFloatingPointTy()) 2182 && "unexpected promotion type"); 2183 2184 if (isa<llvm::IntegerType>(varType)) 2185 return CGF.Builder.CreateTrunc(value, varType, "arg.unpromote"); 2186 2187 return CGF.Builder.CreateFPCast(value, varType, "arg.unpromote"); 2188 } 2189 2190 /// Returns the attribute (either parameter attribute, or function 2191 /// attribute), which declares argument ArgNo to be non-null. 2192 static const NonNullAttr *getNonNullAttr(const Decl *FD, const ParmVarDecl *PVD, 2193 QualType ArgType, unsigned ArgNo) { 2194 // FIXME: __attribute__((nonnull)) can also be applied to: 2195 // - references to pointers, where the pointee is known to be 2196 // nonnull (apparently a Clang extension) 2197 // - transparent unions containing pointers 2198 // In the former case, LLVM IR cannot represent the constraint. In 2199 // the latter case, we have no guarantee that the transparent union 2200 // is in fact passed as a pointer. 2201 if (!ArgType->isAnyPointerType() && !ArgType->isBlockPointerType()) 2202 return nullptr; 2203 // First, check attribute on parameter itself. 2204 if (PVD) { 2205 if (auto ParmNNAttr = PVD->getAttr<NonNullAttr>()) 2206 return ParmNNAttr; 2207 } 2208 // Check function attributes. 2209 if (!FD) 2210 return nullptr; 2211 for (const auto *NNAttr : FD->specific_attrs<NonNullAttr>()) { 2212 if (NNAttr->isNonNull(ArgNo)) 2213 return NNAttr; 2214 } 2215 return nullptr; 2216 } 2217 2218 namespace { 2219 struct CopyBackSwiftError final : EHScopeStack::Cleanup { 2220 Address Temp; 2221 Address Arg; 2222 CopyBackSwiftError(Address temp, Address arg) : Temp(temp), Arg(arg) {} 2223 void Emit(CodeGenFunction &CGF, Flags flags) override { 2224 llvm::Value *errorValue = CGF.Builder.CreateLoad(Temp); 2225 CGF.Builder.CreateStore(errorValue, Arg); 2226 } 2227 }; 2228 } 2229 2230 void CodeGenFunction::EmitFunctionProlog(const CGFunctionInfo &FI, 2231 llvm::Function *Fn, 2232 const FunctionArgList &Args) { 2233 if (CurCodeDecl && CurCodeDecl->hasAttr<NakedAttr>()) 2234 // Naked functions don't have prologues. 2235 return; 2236 2237 // If this is an implicit-return-zero function, go ahead and 2238 // initialize the return value. TODO: it might be nice to have 2239 // a more general mechanism for this that didn't require synthesized 2240 // return statements. 2241 if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(CurCodeDecl)) { 2242 if (FD->hasImplicitReturnZero()) { 2243 QualType RetTy = FD->getReturnType().getUnqualifiedType(); 2244 llvm::Type* LLVMTy = CGM.getTypes().ConvertType(RetTy); 2245 llvm::Constant* Zero = llvm::Constant::getNullValue(LLVMTy); 2246 Builder.CreateStore(Zero, ReturnValue); 2247 } 2248 } 2249 2250 // FIXME: We no longer need the types from FunctionArgList; lift up and 2251 // simplify. 2252 2253 ClangToLLVMArgMapping IRFunctionArgs(CGM.getContext(), FI); 2254 // Flattened function arguments. 2255 SmallVector<llvm::Value *, 16> FnArgs; 2256 FnArgs.reserve(IRFunctionArgs.totalIRArgs()); 2257 for (auto &Arg : Fn->args()) { 2258 FnArgs.push_back(&Arg); 2259 } 2260 assert(FnArgs.size() == IRFunctionArgs.totalIRArgs()); 2261 2262 // If we're using inalloca, all the memory arguments are GEPs off of the last 2263 // parameter, which is a pointer to the complete memory area. 2264 Address ArgStruct = Address::invalid(); 2265 const llvm::StructLayout *ArgStructLayout = nullptr; 2266 if (IRFunctionArgs.hasInallocaArg()) { 2267 ArgStructLayout = CGM.getDataLayout().getStructLayout(FI.getArgStruct()); 2268 ArgStruct = Address(FnArgs[IRFunctionArgs.getInallocaArgNo()], 2269 FI.getArgStructAlignment()); 2270 2271 assert(ArgStruct.getType() == FI.getArgStruct()->getPointerTo()); 2272 } 2273 2274 // Name the struct return parameter. 2275 if (IRFunctionArgs.hasSRetArg()) { 2276 auto AI = cast<llvm::Argument>(FnArgs[IRFunctionArgs.getSRetArgNo()]); 2277 AI->setName("agg.result"); 2278 AI->addAttr(llvm::Attribute::NoAlias); 2279 } 2280 2281 // Track if we received the parameter as a pointer (indirect, byval, or 2282 // inalloca). If already have a pointer, EmitParmDecl doesn't need to copy it 2283 // into a local alloca for us. 2284 SmallVector<ParamValue, 16> ArgVals; 2285 ArgVals.reserve(Args.size()); 2286 2287 // Create a pointer value for every parameter declaration. This usually 2288 // entails copying one or more LLVM IR arguments into an alloca. Don't push 2289 // any cleanups or do anything that might unwind. We do that separately, so 2290 // we can push the cleanups in the correct order for the ABI. 2291 assert(FI.arg_size() == Args.size() && 2292 "Mismatch between function signature & arguments."); 2293 unsigned ArgNo = 0; 2294 CGFunctionInfo::const_arg_iterator info_it = FI.arg_begin(); 2295 for (FunctionArgList::const_iterator i = Args.begin(), e = Args.end(); 2296 i != e; ++i, ++info_it, ++ArgNo) { 2297 const VarDecl *Arg = *i; 2298 const ABIArgInfo &ArgI = info_it->info; 2299 2300 bool isPromoted = 2301 isa<ParmVarDecl>(Arg) && cast<ParmVarDecl>(Arg)->isKNRPromoted(); 2302 // We are converting from ABIArgInfo type to VarDecl type directly, unless 2303 // the parameter is promoted. In this case we convert to 2304 // CGFunctionInfo::ArgInfo type with subsequent argument demotion. 2305 QualType Ty = isPromoted ? info_it->type : Arg->getType(); 2306 assert(hasScalarEvaluationKind(Ty) == 2307 hasScalarEvaluationKind(Arg->getType())); 2308 2309 unsigned FirstIRArg, NumIRArgs; 2310 std::tie(FirstIRArg, NumIRArgs) = IRFunctionArgs.getIRArgs(ArgNo); 2311 2312 switch (ArgI.getKind()) { 2313 case ABIArgInfo::InAlloca: { 2314 assert(NumIRArgs == 0); 2315 auto FieldIndex = ArgI.getInAllocaFieldIndex(); 2316 CharUnits FieldOffset = 2317 CharUnits::fromQuantity(ArgStructLayout->getElementOffset(FieldIndex)); 2318 Address V = Builder.CreateStructGEP(ArgStruct, FieldIndex, FieldOffset, 2319 Arg->getName()); 2320 ArgVals.push_back(ParamValue::forIndirect(V)); 2321 break; 2322 } 2323 2324 case ABIArgInfo::Indirect: { 2325 assert(NumIRArgs == 1); 2326 Address ParamAddr = Address(FnArgs[FirstIRArg], ArgI.getIndirectAlign()); 2327 2328 if (!hasScalarEvaluationKind(Ty)) { 2329 // Aggregates and complex variables are accessed by reference. All we 2330 // need to do is realign the value, if requested. 2331 Address V = ParamAddr; 2332 if (ArgI.getIndirectRealign()) { 2333 Address AlignedTemp = CreateMemTemp(Ty, "coerce"); 2334 2335 // Copy from the incoming argument pointer to the temporary with the 2336 // appropriate alignment. 2337 // 2338 // FIXME: We should have a common utility for generating an aggregate 2339 // copy. 2340 CharUnits Size = getContext().getTypeSizeInChars(Ty); 2341 auto SizeVal = llvm::ConstantInt::get(IntPtrTy, Size.getQuantity()); 2342 Address Dst = Builder.CreateBitCast(AlignedTemp, Int8PtrTy); 2343 Address Src = Builder.CreateBitCast(ParamAddr, Int8PtrTy); 2344 Builder.CreateMemCpy(Dst, Src, SizeVal, false); 2345 V = AlignedTemp; 2346 } 2347 ArgVals.push_back(ParamValue::forIndirect(V)); 2348 } else { 2349 // Load scalar value from indirect argument. 2350 llvm::Value *V = 2351 EmitLoadOfScalar(ParamAddr, false, Ty, Arg->getBeginLoc()); 2352 2353 if (isPromoted) 2354 V = emitArgumentDemotion(*this, Arg, V); 2355 ArgVals.push_back(ParamValue::forDirect(V)); 2356 } 2357 break; 2358 } 2359 2360 case ABIArgInfo::Extend: 2361 case ABIArgInfo::Direct: { 2362 2363 // If we have the trivial case, handle it with no muss and fuss. 2364 if (!isa<llvm::StructType>(ArgI.getCoerceToType()) && 2365 ArgI.getCoerceToType() == ConvertType(Ty) && 2366 ArgI.getDirectOffset() == 0) { 2367 assert(NumIRArgs == 1); 2368 llvm::Value *V = FnArgs[FirstIRArg]; 2369 auto AI = cast<llvm::Argument>(V); 2370 2371 if (const ParmVarDecl *PVD = dyn_cast<ParmVarDecl>(Arg)) { 2372 if (getNonNullAttr(CurCodeDecl, PVD, PVD->getType(), 2373 PVD->getFunctionScopeIndex()) && 2374 !CGM.getCodeGenOpts().NullPointerIsValid) 2375 AI->addAttr(llvm::Attribute::NonNull); 2376 2377 QualType OTy = PVD->getOriginalType(); 2378 if (const auto *ArrTy = 2379 getContext().getAsConstantArrayType(OTy)) { 2380 // A C99 array parameter declaration with the static keyword also 2381 // indicates dereferenceability, and if the size is constant we can 2382 // use the dereferenceable attribute (which requires the size in 2383 // bytes). 2384 if (ArrTy->getSizeModifier() == ArrayType::Static) { 2385 QualType ETy = ArrTy->getElementType(); 2386 uint64_t ArrSize = ArrTy->getSize().getZExtValue(); 2387 if (!ETy->isIncompleteType() && ETy->isConstantSizeType() && 2388 ArrSize) { 2389 llvm::AttrBuilder Attrs; 2390 Attrs.addDereferenceableAttr( 2391 getContext().getTypeSizeInChars(ETy).getQuantity()*ArrSize); 2392 AI->addAttrs(Attrs); 2393 } else if (getContext().getTargetAddressSpace(ETy) == 0 && 2394 !CGM.getCodeGenOpts().NullPointerIsValid) { 2395 AI->addAttr(llvm::Attribute::NonNull); 2396 } 2397 } 2398 } else if (const auto *ArrTy = 2399 getContext().getAsVariableArrayType(OTy)) { 2400 // For C99 VLAs with the static keyword, we don't know the size so 2401 // we can't use the dereferenceable attribute, but in addrspace(0) 2402 // we know that it must be nonnull. 2403 if (ArrTy->getSizeModifier() == VariableArrayType::Static && 2404 !getContext().getTargetAddressSpace(ArrTy->getElementType()) && 2405 !CGM.getCodeGenOpts().NullPointerIsValid) 2406 AI->addAttr(llvm::Attribute::NonNull); 2407 } 2408 2409 const auto *AVAttr = PVD->getAttr<AlignValueAttr>(); 2410 if (!AVAttr) 2411 if (const auto *TOTy = dyn_cast<TypedefType>(OTy)) 2412 AVAttr = TOTy->getDecl()->getAttr<AlignValueAttr>(); 2413 if (AVAttr && !SanOpts.has(SanitizerKind::Alignment)) { 2414 // If alignment-assumption sanitizer is enabled, we do *not* add 2415 // alignment attribute here, but emit normal alignment assumption, 2416 // so the UBSAN check could function. 2417 llvm::Value *AlignmentValue = 2418 EmitScalarExpr(AVAttr->getAlignment()); 2419 llvm::ConstantInt *AlignmentCI = 2420 cast<llvm::ConstantInt>(AlignmentValue); 2421 unsigned Alignment = std::min((unsigned)AlignmentCI->getZExtValue(), 2422 +llvm::Value::MaximumAlignment); 2423 AI->addAttrs(llvm::AttrBuilder().addAlignmentAttr(Alignment)); 2424 } 2425 } 2426 2427 if (Arg->getType().isRestrictQualified()) 2428 AI->addAttr(llvm::Attribute::NoAlias); 2429 2430 // LLVM expects swifterror parameters to be used in very restricted 2431 // ways. Copy the value into a less-restricted temporary. 2432 if (FI.getExtParameterInfo(ArgNo).getABI() 2433 == ParameterABI::SwiftErrorResult) { 2434 QualType pointeeTy = Ty->getPointeeType(); 2435 assert(pointeeTy->isPointerType()); 2436 Address temp = 2437 CreateMemTemp(pointeeTy, getPointerAlign(), "swifterror.temp"); 2438 Address arg = Address(V, getContext().getTypeAlignInChars(pointeeTy)); 2439 llvm::Value *incomingErrorValue = Builder.CreateLoad(arg); 2440 Builder.CreateStore(incomingErrorValue, temp); 2441 V = temp.getPointer(); 2442 2443 // Push a cleanup to copy the value back at the end of the function. 2444 // The convention does not guarantee that the value will be written 2445 // back if the function exits with an unwind exception. 2446 EHStack.pushCleanup<CopyBackSwiftError>(NormalCleanup, temp, arg); 2447 } 2448 2449 // Ensure the argument is the correct type. 2450 if (V->getType() != ArgI.getCoerceToType()) 2451 V = Builder.CreateBitCast(V, ArgI.getCoerceToType()); 2452 2453 if (isPromoted) 2454 V = emitArgumentDemotion(*this, Arg, V); 2455 2456 // Because of merging of function types from multiple decls it is 2457 // possible for the type of an argument to not match the corresponding 2458 // type in the function type. Since we are codegening the callee 2459 // in here, add a cast to the argument type. 2460 llvm::Type *LTy = ConvertType(Arg->getType()); 2461 if (V->getType() != LTy) 2462 V = Builder.CreateBitCast(V, LTy); 2463 2464 ArgVals.push_back(ParamValue::forDirect(V)); 2465 break; 2466 } 2467 2468 Address Alloca = CreateMemTemp(Ty, getContext().getDeclAlign(Arg), 2469 Arg->getName()); 2470 2471 // Pointer to store into. 2472 Address Ptr = emitAddressAtOffset(*this, Alloca, ArgI); 2473 2474 // Fast-isel and the optimizer generally like scalar values better than 2475 // FCAs, so we flatten them if this is safe to do for this argument. 2476 llvm::StructType *STy = dyn_cast<llvm::StructType>(ArgI.getCoerceToType()); 2477 if (ArgI.isDirect() && ArgI.getCanBeFlattened() && STy && 2478 STy->getNumElements() > 1) { 2479 auto SrcLayout = CGM.getDataLayout().getStructLayout(STy); 2480 uint64_t SrcSize = CGM.getDataLayout().getTypeAllocSize(STy); 2481 llvm::Type *DstTy = Ptr.getElementType(); 2482 uint64_t DstSize = CGM.getDataLayout().getTypeAllocSize(DstTy); 2483 2484 Address AddrToStoreInto = Address::invalid(); 2485 if (SrcSize <= DstSize) { 2486 AddrToStoreInto = Builder.CreateElementBitCast(Ptr, STy); 2487 } else { 2488 AddrToStoreInto = 2489 CreateTempAlloca(STy, Alloca.getAlignment(), "coerce"); 2490 } 2491 2492 assert(STy->getNumElements() == NumIRArgs); 2493 for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) { 2494 auto AI = FnArgs[FirstIRArg + i]; 2495 AI->setName(Arg->getName() + ".coerce" + Twine(i)); 2496 auto Offset = CharUnits::fromQuantity(SrcLayout->getElementOffset(i)); 2497 Address EltPtr = 2498 Builder.CreateStructGEP(AddrToStoreInto, i, Offset); 2499 Builder.CreateStore(AI, EltPtr); 2500 } 2501 2502 if (SrcSize > DstSize) { 2503 Builder.CreateMemCpy(Ptr, AddrToStoreInto, DstSize); 2504 } 2505 2506 } else { 2507 // Simple case, just do a coerced store of the argument into the alloca. 2508 assert(NumIRArgs == 1); 2509 auto AI = FnArgs[FirstIRArg]; 2510 AI->setName(Arg->getName() + ".coerce"); 2511 CreateCoercedStore(AI, Ptr, /*DestIsVolatile=*/false, *this); 2512 } 2513 2514 // Match to what EmitParmDecl is expecting for this type. 2515 if (CodeGenFunction::hasScalarEvaluationKind(Ty)) { 2516 llvm::Value *V = 2517 EmitLoadOfScalar(Alloca, false, Ty, Arg->getBeginLoc()); 2518 if (isPromoted) 2519 V = emitArgumentDemotion(*this, Arg, V); 2520 ArgVals.push_back(ParamValue::forDirect(V)); 2521 } else { 2522 ArgVals.push_back(ParamValue::forIndirect(Alloca)); 2523 } 2524 break; 2525 } 2526 2527 case ABIArgInfo::CoerceAndExpand: { 2528 // Reconstruct into a temporary. 2529 Address alloca = CreateMemTemp(Ty, getContext().getDeclAlign(Arg)); 2530 ArgVals.push_back(ParamValue::forIndirect(alloca)); 2531 2532 auto coercionType = ArgI.getCoerceAndExpandType(); 2533 alloca = Builder.CreateElementBitCast(alloca, coercionType); 2534 auto layout = CGM.getDataLayout().getStructLayout(coercionType); 2535 2536 unsigned argIndex = FirstIRArg; 2537 for (unsigned i = 0, e = coercionType->getNumElements(); i != e; ++i) { 2538 llvm::Type *eltType = coercionType->getElementType(i); 2539 if (ABIArgInfo::isPaddingForCoerceAndExpand(eltType)) 2540 continue; 2541 2542 auto eltAddr = Builder.CreateStructGEP(alloca, i, layout); 2543 auto elt = FnArgs[argIndex++]; 2544 Builder.CreateStore(elt, eltAddr); 2545 } 2546 assert(argIndex == FirstIRArg + NumIRArgs); 2547 break; 2548 } 2549 2550 case ABIArgInfo::Expand: { 2551 // If this structure was expanded into multiple arguments then 2552 // we need to create a temporary and reconstruct it from the 2553 // arguments. 2554 Address Alloca = CreateMemTemp(Ty, getContext().getDeclAlign(Arg)); 2555 LValue LV = MakeAddrLValue(Alloca, Ty); 2556 ArgVals.push_back(ParamValue::forIndirect(Alloca)); 2557 2558 auto FnArgIter = FnArgs.begin() + FirstIRArg; 2559 ExpandTypeFromArgs(Ty, LV, FnArgIter); 2560 assert(FnArgIter == FnArgs.begin() + FirstIRArg + NumIRArgs); 2561 for (unsigned i = 0, e = NumIRArgs; i != e; ++i) { 2562 auto AI = FnArgs[FirstIRArg + i]; 2563 AI->setName(Arg->getName() + "." + Twine(i)); 2564 } 2565 break; 2566 } 2567 2568 case ABIArgInfo::Ignore: 2569 assert(NumIRArgs == 0); 2570 // Initialize the local variable appropriately. 2571 if (!hasScalarEvaluationKind(Ty)) { 2572 ArgVals.push_back(ParamValue::forIndirect(CreateMemTemp(Ty))); 2573 } else { 2574 llvm::Value *U = llvm::UndefValue::get(ConvertType(Arg->getType())); 2575 ArgVals.push_back(ParamValue::forDirect(U)); 2576 } 2577 break; 2578 } 2579 } 2580 2581 if (getTarget().getCXXABI().areArgsDestroyedLeftToRightInCallee()) { 2582 for (int I = Args.size() - 1; I >= 0; --I) 2583 EmitParmDecl(*Args[I], ArgVals[I], I + 1); 2584 } else { 2585 for (unsigned I = 0, E = Args.size(); I != E; ++I) 2586 EmitParmDecl(*Args[I], ArgVals[I], I + 1); 2587 } 2588 } 2589 2590 static void eraseUnusedBitCasts(llvm::Instruction *insn) { 2591 while (insn->use_empty()) { 2592 llvm::BitCastInst *bitcast = dyn_cast<llvm::BitCastInst>(insn); 2593 if (!bitcast) return; 2594 2595 // This is "safe" because we would have used a ConstantExpr otherwise. 2596 insn = cast<llvm::Instruction>(bitcast->getOperand(0)); 2597 bitcast->eraseFromParent(); 2598 } 2599 } 2600 2601 /// Try to emit a fused autorelease of a return result. 2602 static llvm::Value *tryEmitFusedAutoreleaseOfResult(CodeGenFunction &CGF, 2603 llvm::Value *result) { 2604 // We must be immediately followed the cast. 2605 llvm::BasicBlock *BB = CGF.Builder.GetInsertBlock(); 2606 if (BB->empty()) return nullptr; 2607 if (&BB->back() != result) return nullptr; 2608 2609 llvm::Type *resultType = result->getType(); 2610 2611 // result is in a BasicBlock and is therefore an Instruction. 2612 llvm::Instruction *generator = cast<llvm::Instruction>(result); 2613 2614 SmallVector<llvm::Instruction *, 4> InstsToKill; 2615 2616 // Look for: 2617 // %generator = bitcast %type1* %generator2 to %type2* 2618 while (llvm::BitCastInst *bitcast = dyn_cast<llvm::BitCastInst>(generator)) { 2619 // We would have emitted this as a constant if the operand weren't 2620 // an Instruction. 2621 generator = cast<llvm::Instruction>(bitcast->getOperand(0)); 2622 2623 // Require the generator to be immediately followed by the cast. 2624 if (generator->getNextNode() != bitcast) 2625 return nullptr; 2626 2627 InstsToKill.push_back(bitcast); 2628 } 2629 2630 // Look for: 2631 // %generator = call i8* @objc_retain(i8* %originalResult) 2632 // or 2633 // %generator = call i8* @objc_retainAutoreleasedReturnValue(i8* %originalResult) 2634 llvm::CallInst *call = dyn_cast<llvm::CallInst>(generator); 2635 if (!call) return nullptr; 2636 2637 bool doRetainAutorelease; 2638 2639 if (call->getCalledValue() == CGF.CGM.getObjCEntrypoints().objc_retain) { 2640 doRetainAutorelease = true; 2641 } else if (call->getCalledValue() == CGF.CGM.getObjCEntrypoints() 2642 .objc_retainAutoreleasedReturnValue) { 2643 doRetainAutorelease = false; 2644 2645 // If we emitted an assembly marker for this call (and the 2646 // ARCEntrypoints field should have been set if so), go looking 2647 // for that call. If we can't find it, we can't do this 2648 // optimization. But it should always be the immediately previous 2649 // instruction, unless we needed bitcasts around the call. 2650 if (CGF.CGM.getObjCEntrypoints().retainAutoreleasedReturnValueMarker) { 2651 llvm::Instruction *prev = call->getPrevNode(); 2652 assert(prev); 2653 if (isa<llvm::BitCastInst>(prev)) { 2654 prev = prev->getPrevNode(); 2655 assert(prev); 2656 } 2657 assert(isa<llvm::CallInst>(prev)); 2658 assert(cast<llvm::CallInst>(prev)->getCalledValue() == 2659 CGF.CGM.getObjCEntrypoints().retainAutoreleasedReturnValueMarker); 2660 InstsToKill.push_back(prev); 2661 } 2662 } else { 2663 return nullptr; 2664 } 2665 2666 result = call->getArgOperand(0); 2667 InstsToKill.push_back(call); 2668 2669 // Keep killing bitcasts, for sanity. Note that we no longer care 2670 // about precise ordering as long as there's exactly one use. 2671 while (llvm::BitCastInst *bitcast = dyn_cast<llvm::BitCastInst>(result)) { 2672 if (!bitcast->hasOneUse()) break; 2673 InstsToKill.push_back(bitcast); 2674 result = bitcast->getOperand(0); 2675 } 2676 2677 // Delete all the unnecessary instructions, from latest to earliest. 2678 for (auto *I : InstsToKill) 2679 I->eraseFromParent(); 2680 2681 // Do the fused retain/autorelease if we were asked to. 2682 if (doRetainAutorelease) 2683 result = CGF.EmitARCRetainAutoreleaseReturnValue(result); 2684 2685 // Cast back to the result type. 2686 return CGF.Builder.CreateBitCast(result, resultType); 2687 } 2688 2689 /// If this is a +1 of the value of an immutable 'self', remove it. 2690 static llvm::Value *tryRemoveRetainOfSelf(CodeGenFunction &CGF, 2691 llvm::Value *result) { 2692 // This is only applicable to a method with an immutable 'self'. 2693 const ObjCMethodDecl *method = 2694 dyn_cast_or_null<ObjCMethodDecl>(CGF.CurCodeDecl); 2695 if (!method) return nullptr; 2696 const VarDecl *self = method->getSelfDecl(); 2697 if (!self->getType().isConstQualified()) return nullptr; 2698 2699 // Look for a retain call. 2700 llvm::CallInst *retainCall = 2701 dyn_cast<llvm::CallInst>(result->stripPointerCasts()); 2702 if (!retainCall || 2703 retainCall->getCalledValue() != CGF.CGM.getObjCEntrypoints().objc_retain) 2704 return nullptr; 2705 2706 // Look for an ordinary load of 'self'. 2707 llvm::Value *retainedValue = retainCall->getArgOperand(0); 2708 llvm::LoadInst *load = 2709 dyn_cast<llvm::LoadInst>(retainedValue->stripPointerCasts()); 2710 if (!load || load->isAtomic() || load->isVolatile() || 2711 load->getPointerOperand() != CGF.GetAddrOfLocalVar(self).getPointer()) 2712 return nullptr; 2713 2714 // Okay! Burn it all down. This relies for correctness on the 2715 // assumption that the retain is emitted as part of the return and 2716 // that thereafter everything is used "linearly". 2717 llvm::Type *resultType = result->getType(); 2718 eraseUnusedBitCasts(cast<llvm::Instruction>(result)); 2719 assert(retainCall->use_empty()); 2720 retainCall->eraseFromParent(); 2721 eraseUnusedBitCasts(cast<llvm::Instruction>(retainedValue)); 2722 2723 return CGF.Builder.CreateBitCast(load, resultType); 2724 } 2725 2726 /// Emit an ARC autorelease of the result of a function. 2727 /// 2728 /// \return the value to actually return from the function 2729 static llvm::Value *emitAutoreleaseOfResult(CodeGenFunction &CGF, 2730 llvm::Value *result) { 2731 // If we're returning 'self', kill the initial retain. This is a 2732 // heuristic attempt to "encourage correctness" in the really unfortunate 2733 // case where we have a return of self during a dealloc and we desperately 2734 // need to avoid the possible autorelease. 2735 if (llvm::Value *self = tryRemoveRetainOfSelf(CGF, result)) 2736 return self; 2737 2738 // At -O0, try to emit a fused retain/autorelease. 2739 if (CGF.shouldUseFusedARCCalls()) 2740 if (llvm::Value *fused = tryEmitFusedAutoreleaseOfResult(CGF, result)) 2741 return fused; 2742 2743 return CGF.EmitARCAutoreleaseReturnValue(result); 2744 } 2745 2746 /// Heuristically search for a dominating store to the return-value slot. 2747 static llvm::StoreInst *findDominatingStoreToReturnValue(CodeGenFunction &CGF) { 2748 // Check if a User is a store which pointerOperand is the ReturnValue. 2749 // We are looking for stores to the ReturnValue, not for stores of the 2750 // ReturnValue to some other location. 2751 auto GetStoreIfValid = [&CGF](llvm::User *U) -> llvm::StoreInst * { 2752 auto *SI = dyn_cast<llvm::StoreInst>(U); 2753 if (!SI || SI->getPointerOperand() != CGF.ReturnValue.getPointer()) 2754 return nullptr; 2755 // These aren't actually possible for non-coerced returns, and we 2756 // only care about non-coerced returns on this code path. 2757 assert(!SI->isAtomic() && !SI->isVolatile()); 2758 return SI; 2759 }; 2760 // If there are multiple uses of the return-value slot, just check 2761 // for something immediately preceding the IP. Sometimes this can 2762 // happen with how we generate implicit-returns; it can also happen 2763 // with noreturn cleanups. 2764 if (!CGF.ReturnValue.getPointer()->hasOneUse()) { 2765 llvm::BasicBlock *IP = CGF.Builder.GetInsertBlock(); 2766 if (IP->empty()) return nullptr; 2767 llvm::Instruction *I = &IP->back(); 2768 2769 // Skip lifetime markers 2770 for (llvm::BasicBlock::reverse_iterator II = IP->rbegin(), 2771 IE = IP->rend(); 2772 II != IE; ++II) { 2773 if (llvm::IntrinsicInst *Intrinsic = 2774 dyn_cast<llvm::IntrinsicInst>(&*II)) { 2775 if (Intrinsic->getIntrinsicID() == llvm::Intrinsic::lifetime_end) { 2776 const llvm::Value *CastAddr = Intrinsic->getArgOperand(1); 2777 ++II; 2778 if (II == IE) 2779 break; 2780 if (isa<llvm::BitCastInst>(&*II) && (CastAddr == &*II)) 2781 continue; 2782 } 2783 } 2784 I = &*II; 2785 break; 2786 } 2787 2788 return GetStoreIfValid(I); 2789 } 2790 2791 llvm::StoreInst *store = 2792 GetStoreIfValid(CGF.ReturnValue.getPointer()->user_back()); 2793 if (!store) return nullptr; 2794 2795 // Now do a first-and-dirty dominance check: just walk up the 2796 // single-predecessors chain from the current insertion point. 2797 llvm::BasicBlock *StoreBB = store->getParent(); 2798 llvm::BasicBlock *IP = CGF.Builder.GetInsertBlock(); 2799 while (IP != StoreBB) { 2800 if (!(IP = IP->getSinglePredecessor())) 2801 return nullptr; 2802 } 2803 2804 // Okay, the store's basic block dominates the insertion point; we 2805 // can do our thing. 2806 return store; 2807 } 2808 2809 void CodeGenFunction::EmitFunctionEpilog(const CGFunctionInfo &FI, 2810 bool EmitRetDbgLoc, 2811 SourceLocation EndLoc) { 2812 if (FI.isNoReturn()) { 2813 // Noreturn functions don't return. 2814 EmitUnreachable(EndLoc); 2815 return; 2816 } 2817 2818 if (CurCodeDecl && CurCodeDecl->hasAttr<NakedAttr>()) { 2819 // Naked functions don't have epilogues. 2820 Builder.CreateUnreachable(); 2821 return; 2822 } 2823 2824 // Functions with no result always return void. 2825 if (!ReturnValue.isValid()) { 2826 Builder.CreateRetVoid(); 2827 return; 2828 } 2829 2830 llvm::DebugLoc RetDbgLoc; 2831 llvm::Value *RV = nullptr; 2832 QualType RetTy = FI.getReturnType(); 2833 const ABIArgInfo &RetAI = FI.getReturnInfo(); 2834 2835 switch (RetAI.getKind()) { 2836 case ABIArgInfo::InAlloca: 2837 // Aggregrates get evaluated directly into the destination. Sometimes we 2838 // need to return the sret value in a register, though. 2839 assert(hasAggregateEvaluationKind(RetTy)); 2840 if (RetAI.getInAllocaSRet()) { 2841 llvm::Function::arg_iterator EI = CurFn->arg_end(); 2842 --EI; 2843 llvm::Value *ArgStruct = &*EI; 2844 llvm::Value *SRet = Builder.CreateStructGEP( 2845 nullptr, ArgStruct, RetAI.getInAllocaFieldIndex()); 2846 RV = Builder.CreateAlignedLoad(SRet, getPointerAlign(), "sret"); 2847 } 2848 break; 2849 2850 case ABIArgInfo::Indirect: { 2851 auto AI = CurFn->arg_begin(); 2852 if (RetAI.isSRetAfterThis()) 2853 ++AI; 2854 switch (getEvaluationKind(RetTy)) { 2855 case TEK_Complex: { 2856 ComplexPairTy RT = 2857 EmitLoadOfComplex(MakeAddrLValue(ReturnValue, RetTy), EndLoc); 2858 EmitStoreOfComplex(RT, MakeNaturalAlignAddrLValue(&*AI, RetTy), 2859 /*isInit*/ true); 2860 break; 2861 } 2862 case TEK_Aggregate: 2863 // Do nothing; aggregrates get evaluated directly into the destination. 2864 break; 2865 case TEK_Scalar: 2866 EmitStoreOfScalar(Builder.CreateLoad(ReturnValue), 2867 MakeNaturalAlignAddrLValue(&*AI, RetTy), 2868 /*isInit*/ true); 2869 break; 2870 } 2871 break; 2872 } 2873 2874 case ABIArgInfo::Extend: 2875 case ABIArgInfo::Direct: 2876 if (RetAI.getCoerceToType() == ConvertType(RetTy) && 2877 RetAI.getDirectOffset() == 0) { 2878 // The internal return value temp always will have pointer-to-return-type 2879 // type, just do a load. 2880 2881 // If there is a dominating store to ReturnValue, we can elide 2882 // the load, zap the store, and usually zap the alloca. 2883 if (llvm::StoreInst *SI = 2884 findDominatingStoreToReturnValue(*this)) { 2885 // Reuse the debug location from the store unless there is 2886 // cleanup code to be emitted between the store and return 2887 // instruction. 2888 if (EmitRetDbgLoc && !AutoreleaseResult) 2889 RetDbgLoc = SI->getDebugLoc(); 2890 // Get the stored value and nuke the now-dead store. 2891 RV = SI->getValueOperand(); 2892 SI->eraseFromParent(); 2893 2894 // If that was the only use of the return value, nuke it as well now. 2895 auto returnValueInst = ReturnValue.getPointer(); 2896 if (returnValueInst->use_empty()) { 2897 if (auto alloca = dyn_cast<llvm::AllocaInst>(returnValueInst)) { 2898 alloca->eraseFromParent(); 2899 ReturnValue = Address::invalid(); 2900 } 2901 } 2902 2903 // Otherwise, we have to do a simple load. 2904 } else { 2905 RV = Builder.CreateLoad(ReturnValue); 2906 } 2907 } else { 2908 // If the value is offset in memory, apply the offset now. 2909 Address V = emitAddressAtOffset(*this, ReturnValue, RetAI); 2910 2911 RV = CreateCoercedLoad(V, RetAI.getCoerceToType(), *this); 2912 } 2913 2914 // In ARC, end functions that return a retainable type with a call 2915 // to objc_autoreleaseReturnValue. 2916 if (AutoreleaseResult) { 2917 #ifndef NDEBUG 2918 // Type::isObjCRetainabletype has to be called on a QualType that hasn't 2919 // been stripped of the typedefs, so we cannot use RetTy here. Get the 2920 // original return type of FunctionDecl, CurCodeDecl, and BlockDecl from 2921 // CurCodeDecl or BlockInfo. 2922 QualType RT; 2923 2924 if (auto *FD = dyn_cast<FunctionDecl>(CurCodeDecl)) 2925 RT = FD->getReturnType(); 2926 else if (auto *MD = dyn_cast<ObjCMethodDecl>(CurCodeDecl)) 2927 RT = MD->getReturnType(); 2928 else if (isa<BlockDecl>(CurCodeDecl)) 2929 RT = BlockInfo->BlockExpression->getFunctionType()->getReturnType(); 2930 else 2931 llvm_unreachable("Unexpected function/method type"); 2932 2933 assert(getLangOpts().ObjCAutoRefCount && 2934 !FI.isReturnsRetained() && 2935 RT->isObjCRetainableType()); 2936 #endif 2937 RV = emitAutoreleaseOfResult(*this, RV); 2938 } 2939 2940 break; 2941 2942 case ABIArgInfo::Ignore: 2943 break; 2944 2945 case ABIArgInfo::CoerceAndExpand: { 2946 auto coercionType = RetAI.getCoerceAndExpandType(); 2947 auto layout = CGM.getDataLayout().getStructLayout(coercionType); 2948 2949 // Load all of the coerced elements out into results. 2950 llvm::SmallVector<llvm::Value*, 4> results; 2951 Address addr = Builder.CreateElementBitCast(ReturnValue, coercionType); 2952 for (unsigned i = 0, e = coercionType->getNumElements(); i != e; ++i) { 2953 auto coercedEltType = coercionType->getElementType(i); 2954 if (ABIArgInfo::isPaddingForCoerceAndExpand(coercedEltType)) 2955 continue; 2956 2957 auto eltAddr = Builder.CreateStructGEP(addr, i, layout); 2958 auto elt = Builder.CreateLoad(eltAddr); 2959 results.push_back(elt); 2960 } 2961 2962 // If we have one result, it's the single direct result type. 2963 if (results.size() == 1) { 2964 RV = results[0]; 2965 2966 // Otherwise, we need to make a first-class aggregate. 2967 } else { 2968 // Construct a return type that lacks padding elements. 2969 llvm::Type *returnType = RetAI.getUnpaddedCoerceAndExpandType(); 2970 2971 RV = llvm::UndefValue::get(returnType); 2972 for (unsigned i = 0, e = results.size(); i != e; ++i) { 2973 RV = Builder.CreateInsertValue(RV, results[i], i); 2974 } 2975 } 2976 break; 2977 } 2978 2979 case ABIArgInfo::Expand: 2980 llvm_unreachable("Invalid ABI kind for return argument"); 2981 } 2982 2983 llvm::Instruction *Ret; 2984 if (RV) { 2985 EmitReturnValueCheck(RV); 2986 Ret = Builder.CreateRet(RV); 2987 } else { 2988 Ret = Builder.CreateRetVoid(); 2989 } 2990 2991 if (RetDbgLoc) 2992 Ret->setDebugLoc(std::move(RetDbgLoc)); 2993 } 2994 2995 void CodeGenFunction::EmitReturnValueCheck(llvm::Value *RV) { 2996 // A current decl may not be available when emitting vtable thunks. 2997 if (!CurCodeDecl) 2998 return; 2999 3000 ReturnsNonNullAttr *RetNNAttr = nullptr; 3001 if (SanOpts.has(SanitizerKind::ReturnsNonnullAttribute)) 3002 RetNNAttr = CurCodeDecl->getAttr<ReturnsNonNullAttr>(); 3003 3004 if (!RetNNAttr && !requiresReturnValueNullabilityCheck()) 3005 return; 3006 3007 // Prefer the returns_nonnull attribute if it's present. 3008 SourceLocation AttrLoc; 3009 SanitizerMask CheckKind; 3010 SanitizerHandler Handler; 3011 if (RetNNAttr) { 3012 assert(!requiresReturnValueNullabilityCheck() && 3013 "Cannot check nullability and the nonnull attribute"); 3014 AttrLoc = RetNNAttr->getLocation(); 3015 CheckKind = SanitizerKind::ReturnsNonnullAttribute; 3016 Handler = SanitizerHandler::NonnullReturn; 3017 } else { 3018 if (auto *DD = dyn_cast<DeclaratorDecl>(CurCodeDecl)) 3019 if (auto *TSI = DD->getTypeSourceInfo()) 3020 if (auto FTL = TSI->getTypeLoc().castAs<FunctionTypeLoc>()) 3021 AttrLoc = FTL.getReturnLoc().findNullabilityLoc(); 3022 CheckKind = SanitizerKind::NullabilityReturn; 3023 Handler = SanitizerHandler::NullabilityReturn; 3024 } 3025 3026 SanitizerScope SanScope(this); 3027 3028 // Make sure the "return" source location is valid. If we're checking a 3029 // nullability annotation, make sure the preconditions for the check are met. 3030 llvm::BasicBlock *Check = createBasicBlock("nullcheck"); 3031 llvm::BasicBlock *NoCheck = createBasicBlock("no.nullcheck"); 3032 llvm::Value *SLocPtr = Builder.CreateLoad(ReturnLocation, "return.sloc.load"); 3033 llvm::Value *CanNullCheck = Builder.CreateIsNotNull(SLocPtr); 3034 if (requiresReturnValueNullabilityCheck()) 3035 CanNullCheck = 3036 Builder.CreateAnd(CanNullCheck, RetValNullabilityPrecondition); 3037 Builder.CreateCondBr(CanNullCheck, Check, NoCheck); 3038 EmitBlock(Check); 3039 3040 // Now do the null check. 3041 llvm::Value *Cond = Builder.CreateIsNotNull(RV); 3042 llvm::Constant *StaticData[] = {EmitCheckSourceLocation(AttrLoc)}; 3043 llvm::Value *DynamicData[] = {SLocPtr}; 3044 EmitCheck(std::make_pair(Cond, CheckKind), Handler, StaticData, DynamicData); 3045 3046 EmitBlock(NoCheck); 3047 3048 #ifndef NDEBUG 3049 // The return location should not be used after the check has been emitted. 3050 ReturnLocation = Address::invalid(); 3051 #endif 3052 } 3053 3054 static bool isInAllocaArgument(CGCXXABI &ABI, QualType type) { 3055 const CXXRecordDecl *RD = type->getAsCXXRecordDecl(); 3056 return RD && ABI.getRecordArgABI(RD) == CGCXXABI::RAA_DirectInMemory; 3057 } 3058 3059 static AggValueSlot createPlaceholderSlot(CodeGenFunction &CGF, 3060 QualType Ty) { 3061 // FIXME: Generate IR in one pass, rather than going back and fixing up these 3062 // placeholders. 3063 llvm::Type *IRTy = CGF.ConvertTypeForMem(Ty); 3064 llvm::Type *IRPtrTy = IRTy->getPointerTo(); 3065 llvm::Value *Placeholder = llvm::UndefValue::get(IRPtrTy->getPointerTo()); 3066 3067 // FIXME: When we generate this IR in one pass, we shouldn't need 3068 // this win32-specific alignment hack. 3069 CharUnits Align = CharUnits::fromQuantity(4); 3070 Placeholder = CGF.Builder.CreateAlignedLoad(IRPtrTy, Placeholder, Align); 3071 3072 return AggValueSlot::forAddr(Address(Placeholder, Align), 3073 Ty.getQualifiers(), 3074 AggValueSlot::IsNotDestructed, 3075 AggValueSlot::DoesNotNeedGCBarriers, 3076 AggValueSlot::IsNotAliased, 3077 AggValueSlot::DoesNotOverlap); 3078 } 3079 3080 void CodeGenFunction::EmitDelegateCallArg(CallArgList &args, 3081 const VarDecl *param, 3082 SourceLocation loc) { 3083 // StartFunction converted the ABI-lowered parameter(s) into a 3084 // local alloca. We need to turn that into an r-value suitable 3085 // for EmitCall. 3086 Address local = GetAddrOfLocalVar(param); 3087 3088 QualType type = param->getType(); 3089 3090 if (isInAllocaArgument(CGM.getCXXABI(), type)) { 3091 CGM.ErrorUnsupported(param, "forwarded non-trivially copyable parameter"); 3092 } 3093 3094 // GetAddrOfLocalVar returns a pointer-to-pointer for references, 3095 // but the argument needs to be the original pointer. 3096 if (type->isReferenceType()) { 3097 args.add(RValue::get(Builder.CreateLoad(local)), type); 3098 3099 // In ARC, move out of consumed arguments so that the release cleanup 3100 // entered by StartFunction doesn't cause an over-release. This isn't 3101 // optimal -O0 code generation, but it should get cleaned up when 3102 // optimization is enabled. This also assumes that delegate calls are 3103 // performed exactly once for a set of arguments, but that should be safe. 3104 } else if (getLangOpts().ObjCAutoRefCount && 3105 param->hasAttr<NSConsumedAttr>() && 3106 type->isObjCRetainableType()) { 3107 llvm::Value *ptr = Builder.CreateLoad(local); 3108 auto null = 3109 llvm::ConstantPointerNull::get(cast<llvm::PointerType>(ptr->getType())); 3110 Builder.CreateStore(null, local); 3111 args.add(RValue::get(ptr), type); 3112 3113 // For the most part, we just need to load the alloca, except that 3114 // aggregate r-values are actually pointers to temporaries. 3115 } else { 3116 args.add(convertTempToRValue(local, type, loc), type); 3117 } 3118 3119 // Deactivate the cleanup for the callee-destructed param that was pushed. 3120 if (hasAggregateEvaluationKind(type) && !CurFuncIsThunk && 3121 type->getAs<RecordType>()->getDecl()->isParamDestroyedInCallee() && 3122 type.isDestructedType()) { 3123 EHScopeStack::stable_iterator cleanup = 3124 CalleeDestructedParamCleanups.lookup(cast<ParmVarDecl>(param)); 3125 assert(cleanup.isValid() && 3126 "cleanup for callee-destructed param not recorded"); 3127 // This unreachable is a temporary marker which will be removed later. 3128 llvm::Instruction *isActive = Builder.CreateUnreachable(); 3129 args.addArgCleanupDeactivation(cleanup, isActive); 3130 } 3131 } 3132 3133 static bool isProvablyNull(llvm::Value *addr) { 3134 return isa<llvm::ConstantPointerNull>(addr); 3135 } 3136 3137 /// Emit the actual writing-back of a writeback. 3138 static void emitWriteback(CodeGenFunction &CGF, 3139 const CallArgList::Writeback &writeback) { 3140 const LValue &srcLV = writeback.Source; 3141 Address srcAddr = srcLV.getAddress(); 3142 assert(!isProvablyNull(srcAddr.getPointer()) && 3143 "shouldn't have writeback for provably null argument"); 3144 3145 llvm::BasicBlock *contBB = nullptr; 3146 3147 // If the argument wasn't provably non-null, we need to null check 3148 // before doing the store. 3149 bool provablyNonNull = llvm::isKnownNonZero(srcAddr.getPointer(), 3150 CGF.CGM.getDataLayout()); 3151 if (!provablyNonNull) { 3152 llvm::BasicBlock *writebackBB = CGF.createBasicBlock("icr.writeback"); 3153 contBB = CGF.createBasicBlock("icr.done"); 3154 3155 llvm::Value *isNull = 3156 CGF.Builder.CreateIsNull(srcAddr.getPointer(), "icr.isnull"); 3157 CGF.Builder.CreateCondBr(isNull, contBB, writebackBB); 3158 CGF.EmitBlock(writebackBB); 3159 } 3160 3161 // Load the value to writeback. 3162 llvm::Value *value = CGF.Builder.CreateLoad(writeback.Temporary); 3163 3164 // Cast it back, in case we're writing an id to a Foo* or something. 3165 value = CGF.Builder.CreateBitCast(value, srcAddr.getElementType(), 3166 "icr.writeback-cast"); 3167 3168 // Perform the writeback. 3169 3170 // If we have a "to use" value, it's something we need to emit a use 3171 // of. This has to be carefully threaded in: if it's done after the 3172 // release it's potentially undefined behavior (and the optimizer 3173 // will ignore it), and if it happens before the retain then the 3174 // optimizer could move the release there. 3175 if (writeback.ToUse) { 3176 assert(srcLV.getObjCLifetime() == Qualifiers::OCL_Strong); 3177 3178 // Retain the new value. No need to block-copy here: the block's 3179 // being passed up the stack. 3180 value = CGF.EmitARCRetainNonBlock(value); 3181 3182 // Emit the intrinsic use here. 3183 CGF.EmitARCIntrinsicUse(writeback.ToUse); 3184 3185 // Load the old value (primitively). 3186 llvm::Value *oldValue = CGF.EmitLoadOfScalar(srcLV, SourceLocation()); 3187 3188 // Put the new value in place (primitively). 3189 CGF.EmitStoreOfScalar(value, srcLV, /*init*/ false); 3190 3191 // Release the old value. 3192 CGF.EmitARCRelease(oldValue, srcLV.isARCPreciseLifetime()); 3193 3194 // Otherwise, we can just do a normal lvalue store. 3195 } else { 3196 CGF.EmitStoreThroughLValue(RValue::get(value), srcLV); 3197 } 3198 3199 // Jump to the continuation block. 3200 if (!provablyNonNull) 3201 CGF.EmitBlock(contBB); 3202 } 3203 3204 static void emitWritebacks(CodeGenFunction &CGF, 3205 const CallArgList &args) { 3206 for (const auto &I : args.writebacks()) 3207 emitWriteback(CGF, I); 3208 } 3209 3210 static void deactivateArgCleanupsBeforeCall(CodeGenFunction &CGF, 3211 const CallArgList &CallArgs) { 3212 ArrayRef<CallArgList::CallArgCleanup> Cleanups = 3213 CallArgs.getCleanupsToDeactivate(); 3214 // Iterate in reverse to increase the likelihood of popping the cleanup. 3215 for (const auto &I : llvm::reverse(Cleanups)) { 3216 CGF.DeactivateCleanupBlock(I.Cleanup, I.IsActiveIP); 3217 I.IsActiveIP->eraseFromParent(); 3218 } 3219 } 3220 3221 static const Expr *maybeGetUnaryAddrOfOperand(const Expr *E) { 3222 if (const UnaryOperator *uop = dyn_cast<UnaryOperator>(E->IgnoreParens())) 3223 if (uop->getOpcode() == UO_AddrOf) 3224 return uop->getSubExpr(); 3225 return nullptr; 3226 } 3227 3228 /// Emit an argument that's being passed call-by-writeback. That is, 3229 /// we are passing the address of an __autoreleased temporary; it 3230 /// might be copy-initialized with the current value of the given 3231 /// address, but it will definitely be copied out of after the call. 3232 static void emitWritebackArg(CodeGenFunction &CGF, CallArgList &args, 3233 const ObjCIndirectCopyRestoreExpr *CRE) { 3234 LValue srcLV; 3235 3236 // Make an optimistic effort to emit the address as an l-value. 3237 // This can fail if the argument expression is more complicated. 3238 if (const Expr *lvExpr = maybeGetUnaryAddrOfOperand(CRE->getSubExpr())) { 3239 srcLV = CGF.EmitLValue(lvExpr); 3240 3241 // Otherwise, just emit it as a scalar. 3242 } else { 3243 Address srcAddr = CGF.EmitPointerWithAlignment(CRE->getSubExpr()); 3244 3245 QualType srcAddrType = 3246 CRE->getSubExpr()->getType()->castAs<PointerType>()->getPointeeType(); 3247 srcLV = CGF.MakeAddrLValue(srcAddr, srcAddrType); 3248 } 3249 Address srcAddr = srcLV.getAddress(); 3250 3251 // The dest and src types don't necessarily match in LLVM terms 3252 // because of the crazy ObjC compatibility rules. 3253 3254 llvm::PointerType *destType = 3255 cast<llvm::PointerType>(CGF.ConvertType(CRE->getType())); 3256 3257 // If the address is a constant null, just pass the appropriate null. 3258 if (isProvablyNull(srcAddr.getPointer())) { 3259 args.add(RValue::get(llvm::ConstantPointerNull::get(destType)), 3260 CRE->getType()); 3261 return; 3262 } 3263 3264 // Create the temporary. 3265 Address temp = CGF.CreateTempAlloca(destType->getElementType(), 3266 CGF.getPointerAlign(), 3267 "icr.temp"); 3268 // Loading an l-value can introduce a cleanup if the l-value is __weak, 3269 // and that cleanup will be conditional if we can't prove that the l-value 3270 // isn't null, so we need to register a dominating point so that the cleanups 3271 // system will make valid IR. 3272 CodeGenFunction::ConditionalEvaluation condEval(CGF); 3273 3274 // Zero-initialize it if we're not doing a copy-initialization. 3275 bool shouldCopy = CRE->shouldCopy(); 3276 if (!shouldCopy) { 3277 llvm::Value *null = 3278 llvm::ConstantPointerNull::get( 3279 cast<llvm::PointerType>(destType->getElementType())); 3280 CGF.Builder.CreateStore(null, temp); 3281 } 3282 3283 llvm::BasicBlock *contBB = nullptr; 3284 llvm::BasicBlock *originBB = nullptr; 3285 3286 // If the address is *not* known to be non-null, we need to switch. 3287 llvm::Value *finalArgument; 3288 3289 bool provablyNonNull = llvm::isKnownNonZero(srcAddr.getPointer(), 3290 CGF.CGM.getDataLayout()); 3291 if (provablyNonNull) { 3292 finalArgument = temp.getPointer(); 3293 } else { 3294 llvm::Value *isNull = 3295 CGF.Builder.CreateIsNull(srcAddr.getPointer(), "icr.isnull"); 3296 3297 finalArgument = CGF.Builder.CreateSelect(isNull, 3298 llvm::ConstantPointerNull::get(destType), 3299 temp.getPointer(), "icr.argument"); 3300 3301 // If we need to copy, then the load has to be conditional, which 3302 // means we need control flow. 3303 if (shouldCopy) { 3304 originBB = CGF.Builder.GetInsertBlock(); 3305 contBB = CGF.createBasicBlock("icr.cont"); 3306 llvm::BasicBlock *copyBB = CGF.createBasicBlock("icr.copy"); 3307 CGF.Builder.CreateCondBr(isNull, contBB, copyBB); 3308 CGF.EmitBlock(copyBB); 3309 condEval.begin(CGF); 3310 } 3311 } 3312 3313 llvm::Value *valueToUse = nullptr; 3314 3315 // Perform a copy if necessary. 3316 if (shouldCopy) { 3317 RValue srcRV = CGF.EmitLoadOfLValue(srcLV, SourceLocation()); 3318 assert(srcRV.isScalar()); 3319 3320 llvm::Value *src = srcRV.getScalarVal(); 3321 src = CGF.Builder.CreateBitCast(src, destType->getElementType(), 3322 "icr.cast"); 3323 3324 // Use an ordinary store, not a store-to-lvalue. 3325 CGF.Builder.CreateStore(src, temp); 3326 3327 // If optimization is enabled, and the value was held in a 3328 // __strong variable, we need to tell the optimizer that this 3329 // value has to stay alive until we're doing the store back. 3330 // This is because the temporary is effectively unretained, 3331 // and so otherwise we can violate the high-level semantics. 3332 if (CGF.CGM.getCodeGenOpts().OptimizationLevel != 0 && 3333 srcLV.getObjCLifetime() == Qualifiers::OCL_Strong) { 3334 valueToUse = src; 3335 } 3336 } 3337 3338 // Finish the control flow if we needed it. 3339 if (shouldCopy && !provablyNonNull) { 3340 llvm::BasicBlock *copyBB = CGF.Builder.GetInsertBlock(); 3341 CGF.EmitBlock(contBB); 3342 3343 // Make a phi for the value to intrinsically use. 3344 if (valueToUse) { 3345 llvm::PHINode *phiToUse = CGF.Builder.CreatePHI(valueToUse->getType(), 2, 3346 "icr.to-use"); 3347 phiToUse->addIncoming(valueToUse, copyBB); 3348 phiToUse->addIncoming(llvm::UndefValue::get(valueToUse->getType()), 3349 originBB); 3350 valueToUse = phiToUse; 3351 } 3352 3353 condEval.end(CGF); 3354 } 3355 3356 args.addWriteback(srcLV, temp, valueToUse); 3357 args.add(RValue::get(finalArgument), CRE->getType()); 3358 } 3359 3360 void CallArgList::allocateArgumentMemory(CodeGenFunction &CGF) { 3361 assert(!StackBase); 3362 3363 // Save the stack. 3364 llvm::Function *F = CGF.CGM.getIntrinsic(llvm::Intrinsic::stacksave); 3365 StackBase = CGF.Builder.CreateCall(F, {}, "inalloca.save"); 3366 } 3367 3368 void CallArgList::freeArgumentMemory(CodeGenFunction &CGF) const { 3369 if (StackBase) { 3370 // Restore the stack after the call. 3371 llvm::Function *F = CGF.CGM.getIntrinsic(llvm::Intrinsic::stackrestore); 3372 CGF.Builder.CreateCall(F, StackBase); 3373 } 3374 } 3375 3376 void CodeGenFunction::EmitNonNullArgCheck(RValue RV, QualType ArgType, 3377 SourceLocation ArgLoc, 3378 AbstractCallee AC, 3379 unsigned ParmNum) { 3380 if (!AC.getDecl() || !(SanOpts.has(SanitizerKind::NonnullAttribute) || 3381 SanOpts.has(SanitizerKind::NullabilityArg))) 3382 return; 3383 3384 // The param decl may be missing in a variadic function. 3385 auto PVD = ParmNum < AC.getNumParams() ? AC.getParamDecl(ParmNum) : nullptr; 3386 unsigned ArgNo = PVD ? PVD->getFunctionScopeIndex() : ParmNum; 3387 3388 // Prefer the nonnull attribute if it's present. 3389 const NonNullAttr *NNAttr = nullptr; 3390 if (SanOpts.has(SanitizerKind::NonnullAttribute)) 3391 NNAttr = getNonNullAttr(AC.getDecl(), PVD, ArgType, ArgNo); 3392 3393 bool CanCheckNullability = false; 3394 if (SanOpts.has(SanitizerKind::NullabilityArg) && !NNAttr && PVD) { 3395 auto Nullability = PVD->getType()->getNullability(getContext()); 3396 CanCheckNullability = Nullability && 3397 *Nullability == NullabilityKind::NonNull && 3398 PVD->getTypeSourceInfo(); 3399 } 3400 3401 if (!NNAttr && !CanCheckNullability) 3402 return; 3403 3404 SourceLocation AttrLoc; 3405 SanitizerMask CheckKind; 3406 SanitizerHandler Handler; 3407 if (NNAttr) { 3408 AttrLoc = NNAttr->getLocation(); 3409 CheckKind = SanitizerKind::NonnullAttribute; 3410 Handler = SanitizerHandler::NonnullArg; 3411 } else { 3412 AttrLoc = PVD->getTypeSourceInfo()->getTypeLoc().findNullabilityLoc(); 3413 CheckKind = SanitizerKind::NullabilityArg; 3414 Handler = SanitizerHandler::NullabilityArg; 3415 } 3416 3417 SanitizerScope SanScope(this); 3418 assert(RV.isScalar()); 3419 llvm::Value *V = RV.getScalarVal(); 3420 llvm::Value *Cond = 3421 Builder.CreateICmpNE(V, llvm::Constant::getNullValue(V->getType())); 3422 llvm::Constant *StaticData[] = { 3423 EmitCheckSourceLocation(ArgLoc), EmitCheckSourceLocation(AttrLoc), 3424 llvm::ConstantInt::get(Int32Ty, ArgNo + 1), 3425 }; 3426 EmitCheck(std::make_pair(Cond, CheckKind), Handler, StaticData, None); 3427 } 3428 3429 void CodeGenFunction::EmitCallArgs( 3430 CallArgList &Args, ArrayRef<QualType> ArgTypes, 3431 llvm::iterator_range<CallExpr::const_arg_iterator> ArgRange, 3432 AbstractCallee AC, unsigned ParamsToSkip, EvaluationOrder Order) { 3433 assert((int)ArgTypes.size() == (ArgRange.end() - ArgRange.begin())); 3434 3435 // We *have* to evaluate arguments from right to left in the MS C++ ABI, 3436 // because arguments are destroyed left to right in the callee. As a special 3437 // case, there are certain language constructs that require left-to-right 3438 // evaluation, and in those cases we consider the evaluation order requirement 3439 // to trump the "destruction order is reverse construction order" guarantee. 3440 bool LeftToRight = 3441 CGM.getTarget().getCXXABI().areArgsDestroyedLeftToRightInCallee() 3442 ? Order == EvaluationOrder::ForceLeftToRight 3443 : Order != EvaluationOrder::ForceRightToLeft; 3444 3445 auto MaybeEmitImplicitObjectSize = [&](unsigned I, const Expr *Arg, 3446 RValue EmittedArg) { 3447 if (!AC.hasFunctionDecl() || I >= AC.getNumParams()) 3448 return; 3449 auto *PS = AC.getParamDecl(I)->getAttr<PassObjectSizeAttr>(); 3450 if (PS == nullptr) 3451 return; 3452 3453 const auto &Context = getContext(); 3454 auto SizeTy = Context.getSizeType(); 3455 auto T = Builder.getIntNTy(Context.getTypeSize(SizeTy)); 3456 assert(EmittedArg.getScalarVal() && "We emitted nothing for the arg?"); 3457 llvm::Value *V = evaluateOrEmitBuiltinObjectSize(Arg, PS->getType(), T, 3458 EmittedArg.getScalarVal(), 3459 /*IsDynamic=*/false); 3460 Args.add(RValue::get(V), SizeTy); 3461 // If we're emitting args in reverse, be sure to do so with 3462 // pass_object_size, as well. 3463 if (!LeftToRight) 3464 std::swap(Args.back(), *(&Args.back() - 1)); 3465 }; 3466 3467 // Insert a stack save if we're going to need any inalloca args. 3468 bool HasInAllocaArgs = false; 3469 if (CGM.getTarget().getCXXABI().isMicrosoft()) { 3470 for (ArrayRef<QualType>::iterator I = ArgTypes.begin(), E = ArgTypes.end(); 3471 I != E && !HasInAllocaArgs; ++I) 3472 HasInAllocaArgs = isInAllocaArgument(CGM.getCXXABI(), *I); 3473 if (HasInAllocaArgs) { 3474 assert(getTarget().getTriple().getArch() == llvm::Triple::x86); 3475 Args.allocateArgumentMemory(*this); 3476 } 3477 } 3478 3479 // Evaluate each argument in the appropriate order. 3480 size_t CallArgsStart = Args.size(); 3481 for (unsigned I = 0, E = ArgTypes.size(); I != E; ++I) { 3482 unsigned Idx = LeftToRight ? I : E - I - 1; 3483 CallExpr::const_arg_iterator Arg = ArgRange.begin() + Idx; 3484 unsigned InitialArgSize = Args.size(); 3485 // If *Arg is an ObjCIndirectCopyRestoreExpr, check that either the types of 3486 // the argument and parameter match or the objc method is parameterized. 3487 assert((!isa<ObjCIndirectCopyRestoreExpr>(*Arg) || 3488 getContext().hasSameUnqualifiedType((*Arg)->getType(), 3489 ArgTypes[Idx]) || 3490 (isa<ObjCMethodDecl>(AC.getDecl()) && 3491 isObjCMethodWithTypeParams(cast<ObjCMethodDecl>(AC.getDecl())))) && 3492 "Argument and parameter types don't match"); 3493 EmitCallArg(Args, *Arg, ArgTypes[Idx]); 3494 // In particular, we depend on it being the last arg in Args, and the 3495 // objectsize bits depend on there only being one arg if !LeftToRight. 3496 assert(InitialArgSize + 1 == Args.size() && 3497 "The code below depends on only adding one arg per EmitCallArg"); 3498 (void)InitialArgSize; 3499 // Since pointer argument are never emitted as LValue, it is safe to emit 3500 // non-null argument check for r-value only. 3501 if (!Args.back().hasLValue()) { 3502 RValue RVArg = Args.back().getKnownRValue(); 3503 EmitNonNullArgCheck(RVArg, ArgTypes[Idx], (*Arg)->getExprLoc(), AC, 3504 ParamsToSkip + Idx); 3505 // @llvm.objectsize should never have side-effects and shouldn't need 3506 // destruction/cleanups, so we can safely "emit" it after its arg, 3507 // regardless of right-to-leftness 3508 MaybeEmitImplicitObjectSize(Idx, *Arg, RVArg); 3509 } 3510 } 3511 3512 if (!LeftToRight) { 3513 // Un-reverse the arguments we just evaluated so they match up with the LLVM 3514 // IR function. 3515 std::reverse(Args.begin() + CallArgsStart, Args.end()); 3516 } 3517 } 3518 3519 namespace { 3520 3521 struct DestroyUnpassedArg final : EHScopeStack::Cleanup { 3522 DestroyUnpassedArg(Address Addr, QualType Ty) 3523 : Addr(Addr), Ty(Ty) {} 3524 3525 Address Addr; 3526 QualType Ty; 3527 3528 void Emit(CodeGenFunction &CGF, Flags flags) override { 3529 QualType::DestructionKind DtorKind = Ty.isDestructedType(); 3530 if (DtorKind == QualType::DK_cxx_destructor) { 3531 const CXXDestructorDecl *Dtor = Ty->getAsCXXRecordDecl()->getDestructor(); 3532 assert(!Dtor->isTrivial()); 3533 CGF.EmitCXXDestructorCall(Dtor, Dtor_Complete, /*for vbase*/ false, 3534 /*Delegating=*/false, Addr); 3535 } else { 3536 CGF.callCStructDestructor(CGF.MakeAddrLValue(Addr, Ty)); 3537 } 3538 } 3539 }; 3540 3541 struct DisableDebugLocationUpdates { 3542 CodeGenFunction &CGF; 3543 bool disabledDebugInfo; 3544 DisableDebugLocationUpdates(CodeGenFunction &CGF, const Expr *E) : CGF(CGF) { 3545 if ((disabledDebugInfo = isa<CXXDefaultArgExpr>(E) && CGF.getDebugInfo())) 3546 CGF.disableDebugInfo(); 3547 } 3548 ~DisableDebugLocationUpdates() { 3549 if (disabledDebugInfo) 3550 CGF.enableDebugInfo(); 3551 } 3552 }; 3553 3554 } // end anonymous namespace 3555 3556 RValue CallArg::getRValue(CodeGenFunction &CGF) const { 3557 if (!HasLV) 3558 return RV; 3559 LValue Copy = CGF.MakeAddrLValue(CGF.CreateMemTemp(Ty), Ty); 3560 CGF.EmitAggregateCopy(Copy, LV, Ty, AggValueSlot::DoesNotOverlap, 3561 LV.isVolatile()); 3562 IsUsed = true; 3563 return RValue::getAggregate(Copy.getAddress()); 3564 } 3565 3566 void CallArg::copyInto(CodeGenFunction &CGF, Address Addr) const { 3567 LValue Dst = CGF.MakeAddrLValue(Addr, Ty); 3568 if (!HasLV && RV.isScalar()) 3569 CGF.EmitStoreOfScalar(RV.getScalarVal(), Dst, /*init=*/true); 3570 else if (!HasLV && RV.isComplex()) 3571 CGF.EmitStoreOfComplex(RV.getComplexVal(), Dst, /*init=*/true); 3572 else { 3573 auto Addr = HasLV ? LV.getAddress() : RV.getAggregateAddress(); 3574 LValue SrcLV = CGF.MakeAddrLValue(Addr, Ty); 3575 // We assume that call args are never copied into subobjects. 3576 CGF.EmitAggregateCopy(Dst, SrcLV, Ty, AggValueSlot::DoesNotOverlap, 3577 HasLV ? LV.isVolatileQualified() 3578 : RV.isVolatileQualified()); 3579 } 3580 IsUsed = true; 3581 } 3582 3583 void CodeGenFunction::EmitCallArg(CallArgList &args, const Expr *E, 3584 QualType type) { 3585 DisableDebugLocationUpdates Dis(*this, E); 3586 if (const ObjCIndirectCopyRestoreExpr *CRE 3587 = dyn_cast<ObjCIndirectCopyRestoreExpr>(E)) { 3588 assert(getLangOpts().ObjCAutoRefCount); 3589 return emitWritebackArg(*this, args, CRE); 3590 } 3591 3592 assert(type->isReferenceType() == E->isGLValue() && 3593 "reference binding to unmaterialized r-value!"); 3594 3595 if (E->isGLValue()) { 3596 assert(E->getObjectKind() == OK_Ordinary); 3597 return args.add(EmitReferenceBindingToExpr(E), type); 3598 } 3599 3600 bool HasAggregateEvalKind = hasAggregateEvaluationKind(type); 3601 3602 // In the Microsoft C++ ABI, aggregate arguments are destructed by the callee. 3603 // However, we still have to push an EH-only cleanup in case we unwind before 3604 // we make it to the call. 3605 if (HasAggregateEvalKind && 3606 type->getAs<RecordType>()->getDecl()->isParamDestroyedInCallee()) { 3607 // If we're using inalloca, use the argument memory. Otherwise, use a 3608 // temporary. 3609 AggValueSlot Slot; 3610 if (args.isUsingInAlloca()) 3611 Slot = createPlaceholderSlot(*this, type); 3612 else 3613 Slot = CreateAggTemp(type, "agg.tmp"); 3614 3615 bool DestroyedInCallee = true, NeedsEHCleanup = true; 3616 if (const auto *RD = type->getAsCXXRecordDecl()) 3617 DestroyedInCallee = RD->hasNonTrivialDestructor(); 3618 else 3619 NeedsEHCleanup = needsEHCleanup(type.isDestructedType()); 3620 3621 if (DestroyedInCallee) 3622 Slot.setExternallyDestructed(); 3623 3624 EmitAggExpr(E, Slot); 3625 RValue RV = Slot.asRValue(); 3626 args.add(RV, type); 3627 3628 if (DestroyedInCallee && NeedsEHCleanup) { 3629 // Create a no-op GEP between the placeholder and the cleanup so we can 3630 // RAUW it successfully. It also serves as a marker of the first 3631 // instruction where the cleanup is active. 3632 pushFullExprCleanup<DestroyUnpassedArg>(EHCleanup, Slot.getAddress(), 3633 type); 3634 // This unreachable is a temporary marker which will be removed later. 3635 llvm::Instruction *IsActive = Builder.CreateUnreachable(); 3636 args.addArgCleanupDeactivation(EHStack.getInnermostEHScope(), IsActive); 3637 } 3638 return; 3639 } 3640 3641 if (HasAggregateEvalKind && isa<ImplicitCastExpr>(E) && 3642 cast<CastExpr>(E)->getCastKind() == CK_LValueToRValue) { 3643 LValue L = EmitLValue(cast<CastExpr>(E)->getSubExpr()); 3644 assert(L.isSimple()); 3645 args.addUncopiedAggregate(L, type); 3646 return; 3647 } 3648 3649 args.add(EmitAnyExprToTemp(E), type); 3650 } 3651 3652 QualType CodeGenFunction::getVarArgType(const Expr *Arg) { 3653 // System headers on Windows define NULL to 0 instead of 0LL on Win64. MSVC 3654 // implicitly widens null pointer constants that are arguments to varargs 3655 // functions to pointer-sized ints. 3656 if (!getTarget().getTriple().isOSWindows()) 3657 return Arg->getType(); 3658 3659 if (Arg->getType()->isIntegerType() && 3660 getContext().getTypeSize(Arg->getType()) < 3661 getContext().getTargetInfo().getPointerWidth(0) && 3662 Arg->isNullPointerConstant(getContext(), 3663 Expr::NPC_ValueDependentIsNotNull)) { 3664 return getContext().getIntPtrType(); 3665 } 3666 3667 return Arg->getType(); 3668 } 3669 3670 // In ObjC ARC mode with no ObjC ARC exception safety, tell the ARC 3671 // optimizer it can aggressively ignore unwind edges. 3672 void 3673 CodeGenFunction::AddObjCARCExceptionMetadata(llvm::Instruction *Inst) { 3674 if (CGM.getCodeGenOpts().OptimizationLevel != 0 && 3675 !CGM.getCodeGenOpts().ObjCAutoRefCountExceptions) 3676 Inst->setMetadata("clang.arc.no_objc_arc_exceptions", 3677 CGM.getNoObjCARCExceptionsMetadata()); 3678 } 3679 3680 /// Emits a call to the given no-arguments nounwind runtime function. 3681 llvm::CallInst * 3682 CodeGenFunction::EmitNounwindRuntimeCall(llvm::Value *callee, 3683 const llvm::Twine &name) { 3684 return EmitNounwindRuntimeCall(callee, None, name); 3685 } 3686 3687 /// Emits a call to the given nounwind runtime function. 3688 llvm::CallInst * 3689 CodeGenFunction::EmitNounwindRuntimeCall(llvm::Value *callee, 3690 ArrayRef<llvm::Value*> args, 3691 const llvm::Twine &name) { 3692 llvm::CallInst *call = EmitRuntimeCall(callee, args, name); 3693 call->setDoesNotThrow(); 3694 return call; 3695 } 3696 3697 /// Emits a simple call (never an invoke) to the given no-arguments 3698 /// runtime function. 3699 llvm::CallInst * 3700 CodeGenFunction::EmitRuntimeCall(llvm::Value *callee, 3701 const llvm::Twine &name) { 3702 return EmitRuntimeCall(callee, None, name); 3703 } 3704 3705 // Calls which may throw must have operand bundles indicating which funclet 3706 // they are nested within. 3707 SmallVector<llvm::OperandBundleDef, 1> 3708 CodeGenFunction::getBundlesForFunclet(llvm::Value *Callee) { 3709 SmallVector<llvm::OperandBundleDef, 1> BundleList; 3710 // There is no need for a funclet operand bundle if we aren't inside a 3711 // funclet. 3712 if (!CurrentFuncletPad) 3713 return BundleList; 3714 3715 // Skip intrinsics which cannot throw. 3716 auto *CalleeFn = dyn_cast<llvm::Function>(Callee->stripPointerCasts()); 3717 if (CalleeFn && CalleeFn->isIntrinsic() && CalleeFn->doesNotThrow()) 3718 return BundleList; 3719 3720 BundleList.emplace_back("funclet", CurrentFuncletPad); 3721 return BundleList; 3722 } 3723 3724 /// Emits a simple call (never an invoke) to the given runtime function. 3725 llvm::CallInst * 3726 CodeGenFunction::EmitRuntimeCall(llvm::Value *callee, 3727 ArrayRef<llvm::Value*> args, 3728 const llvm::Twine &name) { 3729 llvm::CallInst *call = 3730 Builder.CreateCall(callee, args, getBundlesForFunclet(callee), name); 3731 call->setCallingConv(getRuntimeCC()); 3732 return call; 3733 } 3734 3735 /// Emits a call or invoke to the given noreturn runtime function. 3736 void CodeGenFunction::EmitNoreturnRuntimeCallOrInvoke(llvm::Value *callee, 3737 ArrayRef<llvm::Value*> args) { 3738 SmallVector<llvm::OperandBundleDef, 1> BundleList = 3739 getBundlesForFunclet(callee); 3740 3741 if (getInvokeDest()) { 3742 llvm::InvokeInst *invoke = 3743 Builder.CreateInvoke(callee, 3744 getUnreachableBlock(), 3745 getInvokeDest(), 3746 args, 3747 BundleList); 3748 invoke->setDoesNotReturn(); 3749 invoke->setCallingConv(getRuntimeCC()); 3750 } else { 3751 llvm::CallInst *call = Builder.CreateCall(callee, args, BundleList); 3752 call->setDoesNotReturn(); 3753 call->setCallingConv(getRuntimeCC()); 3754 Builder.CreateUnreachable(); 3755 } 3756 } 3757 3758 /// Emits a call or invoke instruction to the given nullary runtime function. 3759 llvm::CallBase *CodeGenFunction::EmitRuntimeCallOrInvoke(llvm::Value *callee, 3760 const Twine &name) { 3761 return EmitRuntimeCallOrInvoke(callee, None, name); 3762 } 3763 3764 /// Emits a call or invoke instruction to the given runtime function. 3765 llvm::CallBase *CodeGenFunction::EmitRuntimeCallOrInvoke( 3766 llvm::Value *callee, ArrayRef<llvm::Value *> args, const Twine &name) { 3767 llvm::CallBase *call = EmitCallOrInvoke(callee, args, name); 3768 call->setCallingConv(getRuntimeCC()); 3769 return call; 3770 } 3771 3772 /// Emits a call or invoke instruction to the given function, depending 3773 /// on the current state of the EH stack. 3774 llvm::CallBase *CodeGenFunction::EmitCallOrInvoke(llvm::Value *Callee, 3775 ArrayRef<llvm::Value *> Args, 3776 const Twine &Name) { 3777 llvm::BasicBlock *InvokeDest = getInvokeDest(); 3778 SmallVector<llvm::OperandBundleDef, 1> BundleList = 3779 getBundlesForFunclet(Callee); 3780 3781 llvm::CallBase *Inst; 3782 if (!InvokeDest) 3783 Inst = Builder.CreateCall(Callee, Args, BundleList, Name); 3784 else { 3785 llvm::BasicBlock *ContBB = createBasicBlock("invoke.cont"); 3786 Inst = Builder.CreateInvoke(Callee, ContBB, InvokeDest, Args, BundleList, 3787 Name); 3788 EmitBlock(ContBB); 3789 } 3790 3791 // In ObjC ARC mode with no ObjC ARC exception safety, tell the ARC 3792 // optimizer it can aggressively ignore unwind edges. 3793 if (CGM.getLangOpts().ObjCAutoRefCount) 3794 AddObjCARCExceptionMetadata(Inst); 3795 3796 return Inst; 3797 } 3798 3799 void CodeGenFunction::deferPlaceholderReplacement(llvm::Instruction *Old, 3800 llvm::Value *New) { 3801 DeferredReplacements.push_back(std::make_pair(Old, New)); 3802 } 3803 3804 RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo, 3805 const CGCallee &Callee, 3806 ReturnValueSlot ReturnValue, 3807 const CallArgList &CallArgs, 3808 llvm::CallBase **callOrInvoke, 3809 SourceLocation Loc) { 3810 // FIXME: We no longer need the types from CallArgs; lift up and simplify. 3811 3812 assert(Callee.isOrdinary() || Callee.isVirtual()); 3813 3814 // Handle struct-return functions by passing a pointer to the 3815 // location that we would like to return into. 3816 QualType RetTy = CallInfo.getReturnType(); 3817 const ABIArgInfo &RetAI = CallInfo.getReturnInfo(); 3818 3819 llvm::FunctionType *IRFuncTy = Callee.getFunctionType(); 3820 3821 // 1. Set up the arguments. 3822 3823 // If we're using inalloca, insert the allocation after the stack save. 3824 // FIXME: Do this earlier rather than hacking it in here! 3825 Address ArgMemory = Address::invalid(); 3826 const llvm::StructLayout *ArgMemoryLayout = nullptr; 3827 if (llvm::StructType *ArgStruct = CallInfo.getArgStruct()) { 3828 const llvm::DataLayout &DL = CGM.getDataLayout(); 3829 ArgMemoryLayout = DL.getStructLayout(ArgStruct); 3830 llvm::Instruction *IP = CallArgs.getStackBase(); 3831 llvm::AllocaInst *AI; 3832 if (IP) { 3833 IP = IP->getNextNode(); 3834 AI = new llvm::AllocaInst(ArgStruct, DL.getAllocaAddrSpace(), 3835 "argmem", IP); 3836 } else { 3837 AI = CreateTempAlloca(ArgStruct, "argmem"); 3838 } 3839 auto Align = CallInfo.getArgStructAlignment(); 3840 AI->setAlignment(Align.getQuantity()); 3841 AI->setUsedWithInAlloca(true); 3842 assert(AI->isUsedWithInAlloca() && !AI->isStaticAlloca()); 3843 ArgMemory = Address(AI, Align); 3844 } 3845 3846 // Helper function to drill into the inalloca allocation. 3847 auto createInAllocaStructGEP = [&](unsigned FieldIndex) -> Address { 3848 auto FieldOffset = 3849 CharUnits::fromQuantity(ArgMemoryLayout->getElementOffset(FieldIndex)); 3850 return Builder.CreateStructGEP(ArgMemory, FieldIndex, FieldOffset); 3851 }; 3852 3853 ClangToLLVMArgMapping IRFunctionArgs(CGM.getContext(), CallInfo); 3854 SmallVector<llvm::Value *, 16> IRCallArgs(IRFunctionArgs.totalIRArgs()); 3855 3856 // If the call returns a temporary with struct return, create a temporary 3857 // alloca to hold the result, unless one is given to us. 3858 Address SRetPtr = Address::invalid(); 3859 Address SRetAlloca = Address::invalid(); 3860 llvm::Value *UnusedReturnSizePtr = nullptr; 3861 if (RetAI.isIndirect() || RetAI.isInAlloca() || RetAI.isCoerceAndExpand()) { 3862 if (!ReturnValue.isNull()) { 3863 SRetPtr = ReturnValue.getValue(); 3864 } else { 3865 SRetPtr = CreateMemTemp(RetTy, "tmp", &SRetAlloca); 3866 if (HaveInsertPoint() && ReturnValue.isUnused()) { 3867 uint64_t size = 3868 CGM.getDataLayout().getTypeAllocSize(ConvertTypeForMem(RetTy)); 3869 UnusedReturnSizePtr = EmitLifetimeStart(size, SRetAlloca.getPointer()); 3870 } 3871 } 3872 if (IRFunctionArgs.hasSRetArg()) { 3873 IRCallArgs[IRFunctionArgs.getSRetArgNo()] = SRetPtr.getPointer(); 3874 } else if (RetAI.isInAlloca()) { 3875 Address Addr = createInAllocaStructGEP(RetAI.getInAllocaFieldIndex()); 3876 Builder.CreateStore(SRetPtr.getPointer(), Addr); 3877 } 3878 } 3879 3880 Address swiftErrorTemp = Address::invalid(); 3881 Address swiftErrorArg = Address::invalid(); 3882 3883 // Translate all of the arguments as necessary to match the IR lowering. 3884 assert(CallInfo.arg_size() == CallArgs.size() && 3885 "Mismatch between function signature & arguments."); 3886 unsigned ArgNo = 0; 3887 CGFunctionInfo::const_arg_iterator info_it = CallInfo.arg_begin(); 3888 for (CallArgList::const_iterator I = CallArgs.begin(), E = CallArgs.end(); 3889 I != E; ++I, ++info_it, ++ArgNo) { 3890 const ABIArgInfo &ArgInfo = info_it->info; 3891 3892 // Insert a padding argument to ensure proper alignment. 3893 if (IRFunctionArgs.hasPaddingArg(ArgNo)) 3894 IRCallArgs[IRFunctionArgs.getPaddingArgNo(ArgNo)] = 3895 llvm::UndefValue::get(ArgInfo.getPaddingType()); 3896 3897 unsigned FirstIRArg, NumIRArgs; 3898 std::tie(FirstIRArg, NumIRArgs) = IRFunctionArgs.getIRArgs(ArgNo); 3899 3900 switch (ArgInfo.getKind()) { 3901 case ABIArgInfo::InAlloca: { 3902 assert(NumIRArgs == 0); 3903 assert(getTarget().getTriple().getArch() == llvm::Triple::x86); 3904 if (I->isAggregate()) { 3905 // Replace the placeholder with the appropriate argument slot GEP. 3906 Address Addr = I->hasLValue() 3907 ? I->getKnownLValue().getAddress() 3908 : I->getKnownRValue().getAggregateAddress(); 3909 llvm::Instruction *Placeholder = 3910 cast<llvm::Instruction>(Addr.getPointer()); 3911 CGBuilderTy::InsertPoint IP = Builder.saveIP(); 3912 Builder.SetInsertPoint(Placeholder); 3913 Addr = createInAllocaStructGEP(ArgInfo.getInAllocaFieldIndex()); 3914 Builder.restoreIP(IP); 3915 deferPlaceholderReplacement(Placeholder, Addr.getPointer()); 3916 } else { 3917 // Store the RValue into the argument struct. 3918 Address Addr = createInAllocaStructGEP(ArgInfo.getInAllocaFieldIndex()); 3919 unsigned AS = Addr.getType()->getPointerAddressSpace(); 3920 llvm::Type *MemType = ConvertTypeForMem(I->Ty)->getPointerTo(AS); 3921 // There are some cases where a trivial bitcast is not avoidable. The 3922 // definition of a type later in a translation unit may change it's type 3923 // from {}* to (%struct.foo*)*. 3924 if (Addr.getType() != MemType) 3925 Addr = Builder.CreateBitCast(Addr, MemType); 3926 I->copyInto(*this, Addr); 3927 } 3928 break; 3929 } 3930 3931 case ABIArgInfo::Indirect: { 3932 assert(NumIRArgs == 1); 3933 if (!I->isAggregate()) { 3934 // Make a temporary alloca to pass the argument. 3935 Address Addr = CreateMemTempWithoutCast( 3936 I->Ty, ArgInfo.getIndirectAlign(), "indirect-arg-temp"); 3937 IRCallArgs[FirstIRArg] = Addr.getPointer(); 3938 3939 I->copyInto(*this, Addr); 3940 } else { 3941 // We want to avoid creating an unnecessary temporary+copy here; 3942 // however, we need one in three cases: 3943 // 1. If the argument is not byval, and we are required to copy the 3944 // source. (This case doesn't occur on any common architecture.) 3945 // 2. If the argument is byval, RV is not sufficiently aligned, and 3946 // we cannot force it to be sufficiently aligned. 3947 // 3. If the argument is byval, but RV is not located in default 3948 // or alloca address space. 3949 Address Addr = I->hasLValue() 3950 ? I->getKnownLValue().getAddress() 3951 : I->getKnownRValue().getAggregateAddress(); 3952 llvm::Value *V = Addr.getPointer(); 3953 CharUnits Align = ArgInfo.getIndirectAlign(); 3954 const llvm::DataLayout *TD = &CGM.getDataLayout(); 3955 3956 assert((FirstIRArg >= IRFuncTy->getNumParams() || 3957 IRFuncTy->getParamType(FirstIRArg)->getPointerAddressSpace() == 3958 TD->getAllocaAddrSpace()) && 3959 "indirect argument must be in alloca address space"); 3960 3961 bool NeedCopy = false; 3962 3963 if (Addr.getAlignment() < Align && 3964 llvm::getOrEnforceKnownAlignment(V, Align.getQuantity(), *TD) < 3965 Align.getQuantity()) { 3966 NeedCopy = true; 3967 } else if (I->hasLValue()) { 3968 auto LV = I->getKnownLValue(); 3969 auto AS = LV.getAddressSpace(); 3970 3971 if ((!ArgInfo.getIndirectByVal() && 3972 (LV.getAlignment() >= 3973 getContext().getTypeAlignInChars(I->Ty)))) { 3974 NeedCopy = true; 3975 } 3976 if (!getLangOpts().OpenCL) { 3977 if ((ArgInfo.getIndirectByVal() && 3978 (AS != LangAS::Default && 3979 AS != CGM.getASTAllocaAddressSpace()))) { 3980 NeedCopy = true; 3981 } 3982 } 3983 // For OpenCL even if RV is located in default or alloca address space 3984 // we don't want to perform address space cast for it. 3985 else if ((ArgInfo.getIndirectByVal() && 3986 Addr.getType()->getAddressSpace() != IRFuncTy-> 3987 getParamType(FirstIRArg)->getPointerAddressSpace())) { 3988 NeedCopy = true; 3989 } 3990 } 3991 3992 if (NeedCopy) { 3993 // Create an aligned temporary, and copy to it. 3994 Address AI = CreateMemTempWithoutCast( 3995 I->Ty, ArgInfo.getIndirectAlign(), "byval-temp"); 3996 IRCallArgs[FirstIRArg] = AI.getPointer(); 3997 I->copyInto(*this, AI); 3998 } else { 3999 // Skip the extra memcpy call. 4000 auto *T = V->getType()->getPointerElementType()->getPointerTo( 4001 CGM.getDataLayout().getAllocaAddrSpace()); 4002 IRCallArgs[FirstIRArg] = getTargetHooks().performAddrSpaceCast( 4003 *this, V, LangAS::Default, CGM.getASTAllocaAddressSpace(), T, 4004 true); 4005 } 4006 } 4007 break; 4008 } 4009 4010 case ABIArgInfo::Ignore: 4011 assert(NumIRArgs == 0); 4012 break; 4013 4014 case ABIArgInfo::Extend: 4015 case ABIArgInfo::Direct: { 4016 if (!isa<llvm::StructType>(ArgInfo.getCoerceToType()) && 4017 ArgInfo.getCoerceToType() == ConvertType(info_it->type) && 4018 ArgInfo.getDirectOffset() == 0) { 4019 assert(NumIRArgs == 1); 4020 llvm::Value *V; 4021 if (!I->isAggregate()) 4022 V = I->getKnownRValue().getScalarVal(); 4023 else 4024 V = Builder.CreateLoad( 4025 I->hasLValue() ? I->getKnownLValue().getAddress() 4026 : I->getKnownRValue().getAggregateAddress()); 4027 4028 // Implement swifterror by copying into a new swifterror argument. 4029 // We'll write back in the normal path out of the call. 4030 if (CallInfo.getExtParameterInfo(ArgNo).getABI() 4031 == ParameterABI::SwiftErrorResult) { 4032 assert(!swiftErrorTemp.isValid() && "multiple swifterror args"); 4033 4034 QualType pointeeTy = I->Ty->getPointeeType(); 4035 swiftErrorArg = 4036 Address(V, getContext().getTypeAlignInChars(pointeeTy)); 4037 4038 swiftErrorTemp = 4039 CreateMemTemp(pointeeTy, getPointerAlign(), "swifterror.temp"); 4040 V = swiftErrorTemp.getPointer(); 4041 cast<llvm::AllocaInst>(V)->setSwiftError(true); 4042 4043 llvm::Value *errorValue = Builder.CreateLoad(swiftErrorArg); 4044 Builder.CreateStore(errorValue, swiftErrorTemp); 4045 } 4046 4047 // We might have to widen integers, but we should never truncate. 4048 if (ArgInfo.getCoerceToType() != V->getType() && 4049 V->getType()->isIntegerTy()) 4050 V = Builder.CreateZExt(V, ArgInfo.getCoerceToType()); 4051 4052 // If the argument doesn't match, perform a bitcast to coerce it. This 4053 // can happen due to trivial type mismatches. 4054 if (FirstIRArg < IRFuncTy->getNumParams() && 4055 V->getType() != IRFuncTy->getParamType(FirstIRArg)) 4056 V = Builder.CreateBitCast(V, IRFuncTy->getParamType(FirstIRArg)); 4057 4058 IRCallArgs[FirstIRArg] = V; 4059 break; 4060 } 4061 4062 // FIXME: Avoid the conversion through memory if possible. 4063 Address Src = Address::invalid(); 4064 if (!I->isAggregate()) { 4065 Src = CreateMemTemp(I->Ty, "coerce"); 4066 I->copyInto(*this, Src); 4067 } else { 4068 Src = I->hasLValue() ? I->getKnownLValue().getAddress() 4069 : I->getKnownRValue().getAggregateAddress(); 4070 } 4071 4072 // If the value is offset in memory, apply the offset now. 4073 Src = emitAddressAtOffset(*this, Src, ArgInfo); 4074 4075 // Fast-isel and the optimizer generally like scalar values better than 4076 // FCAs, so we flatten them if this is safe to do for this argument. 4077 llvm::StructType *STy = 4078 dyn_cast<llvm::StructType>(ArgInfo.getCoerceToType()); 4079 if (STy && ArgInfo.isDirect() && ArgInfo.getCanBeFlattened()) { 4080 llvm::Type *SrcTy = Src.getType()->getElementType(); 4081 uint64_t SrcSize = CGM.getDataLayout().getTypeAllocSize(SrcTy); 4082 uint64_t DstSize = CGM.getDataLayout().getTypeAllocSize(STy); 4083 4084 // If the source type is smaller than the destination type of the 4085 // coerce-to logic, copy the source value into a temp alloca the size 4086 // of the destination type to allow loading all of it. The bits past 4087 // the source value are left undef. 4088 if (SrcSize < DstSize) { 4089 Address TempAlloca 4090 = CreateTempAlloca(STy, Src.getAlignment(), 4091 Src.getName() + ".coerce"); 4092 Builder.CreateMemCpy(TempAlloca, Src, SrcSize); 4093 Src = TempAlloca; 4094 } else { 4095 Src = Builder.CreateBitCast(Src, 4096 STy->getPointerTo(Src.getAddressSpace())); 4097 } 4098 4099 auto SrcLayout = CGM.getDataLayout().getStructLayout(STy); 4100 assert(NumIRArgs == STy->getNumElements()); 4101 for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) { 4102 auto Offset = CharUnits::fromQuantity(SrcLayout->getElementOffset(i)); 4103 Address EltPtr = Builder.CreateStructGEP(Src, i, Offset); 4104 llvm::Value *LI = Builder.CreateLoad(EltPtr); 4105 IRCallArgs[FirstIRArg + i] = LI; 4106 } 4107 } else { 4108 // In the simple case, just pass the coerced loaded value. 4109 assert(NumIRArgs == 1); 4110 IRCallArgs[FirstIRArg] = 4111 CreateCoercedLoad(Src, ArgInfo.getCoerceToType(), *this); 4112 } 4113 4114 break; 4115 } 4116 4117 case ABIArgInfo::CoerceAndExpand: { 4118 auto coercionType = ArgInfo.getCoerceAndExpandType(); 4119 auto layout = CGM.getDataLayout().getStructLayout(coercionType); 4120 4121 llvm::Value *tempSize = nullptr; 4122 Address addr = Address::invalid(); 4123 Address AllocaAddr = Address::invalid(); 4124 if (I->isAggregate()) { 4125 addr = I->hasLValue() ? I->getKnownLValue().getAddress() 4126 : I->getKnownRValue().getAggregateAddress(); 4127 4128 } else { 4129 RValue RV = I->getKnownRValue(); 4130 assert(RV.isScalar()); // complex should always just be direct 4131 4132 llvm::Type *scalarType = RV.getScalarVal()->getType(); 4133 auto scalarSize = CGM.getDataLayout().getTypeAllocSize(scalarType); 4134 auto scalarAlign = CGM.getDataLayout().getPrefTypeAlignment(scalarType); 4135 4136 // Materialize to a temporary. 4137 addr = CreateTempAlloca(RV.getScalarVal()->getType(), 4138 CharUnits::fromQuantity(std::max( 4139 layout->getAlignment(), scalarAlign)), 4140 "tmp", 4141 /*ArraySize=*/nullptr, &AllocaAddr); 4142 tempSize = EmitLifetimeStart(scalarSize, AllocaAddr.getPointer()); 4143 4144 Builder.CreateStore(RV.getScalarVal(), addr); 4145 } 4146 4147 addr = Builder.CreateElementBitCast(addr, coercionType); 4148 4149 unsigned IRArgPos = FirstIRArg; 4150 for (unsigned i = 0, e = coercionType->getNumElements(); i != e; ++i) { 4151 llvm::Type *eltType = coercionType->getElementType(i); 4152 if (ABIArgInfo::isPaddingForCoerceAndExpand(eltType)) continue; 4153 Address eltAddr = Builder.CreateStructGEP(addr, i, layout); 4154 llvm::Value *elt = Builder.CreateLoad(eltAddr); 4155 IRCallArgs[IRArgPos++] = elt; 4156 } 4157 assert(IRArgPos == FirstIRArg + NumIRArgs); 4158 4159 if (tempSize) { 4160 EmitLifetimeEnd(tempSize, AllocaAddr.getPointer()); 4161 } 4162 4163 break; 4164 } 4165 4166 case ABIArgInfo::Expand: 4167 unsigned IRArgPos = FirstIRArg; 4168 ExpandTypeToArgs(I->Ty, *I, IRFuncTy, IRCallArgs, IRArgPos); 4169 assert(IRArgPos == FirstIRArg + NumIRArgs); 4170 break; 4171 } 4172 } 4173 4174 const CGCallee &ConcreteCallee = Callee.prepareConcreteCallee(*this); 4175 llvm::Value *CalleePtr = ConcreteCallee.getFunctionPointer(); 4176 4177 // If we're using inalloca, set up that argument. 4178 if (ArgMemory.isValid()) { 4179 llvm::Value *Arg = ArgMemory.getPointer(); 4180 if (CallInfo.isVariadic()) { 4181 // When passing non-POD arguments by value to variadic functions, we will 4182 // end up with a variadic prototype and an inalloca call site. In such 4183 // cases, we can't do any parameter mismatch checks. Give up and bitcast 4184 // the callee. 4185 unsigned CalleeAS = CalleePtr->getType()->getPointerAddressSpace(); 4186 auto FnTy = getTypes().GetFunctionType(CallInfo)->getPointerTo(CalleeAS); 4187 CalleePtr = Builder.CreateBitCast(CalleePtr, FnTy); 4188 } else { 4189 llvm::Type *LastParamTy = 4190 IRFuncTy->getParamType(IRFuncTy->getNumParams() - 1); 4191 if (Arg->getType() != LastParamTy) { 4192 #ifndef NDEBUG 4193 // Assert that these structs have equivalent element types. 4194 llvm::StructType *FullTy = CallInfo.getArgStruct(); 4195 llvm::StructType *DeclaredTy = cast<llvm::StructType>( 4196 cast<llvm::PointerType>(LastParamTy)->getElementType()); 4197 assert(DeclaredTy->getNumElements() == FullTy->getNumElements()); 4198 for (llvm::StructType::element_iterator DI = DeclaredTy->element_begin(), 4199 DE = DeclaredTy->element_end(), 4200 FI = FullTy->element_begin(); 4201 DI != DE; ++DI, ++FI) 4202 assert(*DI == *FI); 4203 #endif 4204 Arg = Builder.CreateBitCast(Arg, LastParamTy); 4205 } 4206 } 4207 assert(IRFunctionArgs.hasInallocaArg()); 4208 IRCallArgs[IRFunctionArgs.getInallocaArgNo()] = Arg; 4209 } 4210 4211 // 2. Prepare the function pointer. 4212 4213 // If the callee is a bitcast of a non-variadic function to have a 4214 // variadic function pointer type, check to see if we can remove the 4215 // bitcast. This comes up with unprototyped functions. 4216 // 4217 // This makes the IR nicer, but more importantly it ensures that we 4218 // can inline the function at -O0 if it is marked always_inline. 4219 auto simplifyVariadicCallee = [](llvm::Value *Ptr) -> llvm::Value* { 4220 llvm::FunctionType *CalleeFT = 4221 cast<llvm::FunctionType>(Ptr->getType()->getPointerElementType()); 4222 if (!CalleeFT->isVarArg()) 4223 return Ptr; 4224 4225 llvm::ConstantExpr *CE = dyn_cast<llvm::ConstantExpr>(Ptr); 4226 if (!CE || CE->getOpcode() != llvm::Instruction::BitCast) 4227 return Ptr; 4228 4229 llvm::Function *OrigFn = dyn_cast<llvm::Function>(CE->getOperand(0)); 4230 if (!OrigFn) 4231 return Ptr; 4232 4233 llvm::FunctionType *OrigFT = OrigFn->getFunctionType(); 4234 4235 // If the original type is variadic, or if any of the component types 4236 // disagree, we cannot remove the cast. 4237 if (OrigFT->isVarArg() || 4238 OrigFT->getNumParams() != CalleeFT->getNumParams() || 4239 OrigFT->getReturnType() != CalleeFT->getReturnType()) 4240 return Ptr; 4241 4242 for (unsigned i = 0, e = OrigFT->getNumParams(); i != e; ++i) 4243 if (OrigFT->getParamType(i) != CalleeFT->getParamType(i)) 4244 return Ptr; 4245 4246 return OrigFn; 4247 }; 4248 CalleePtr = simplifyVariadicCallee(CalleePtr); 4249 4250 // 3. Perform the actual call. 4251 4252 // Deactivate any cleanups that we're supposed to do immediately before 4253 // the call. 4254 if (!CallArgs.getCleanupsToDeactivate().empty()) 4255 deactivateArgCleanupsBeforeCall(*this, CallArgs); 4256 4257 // Assert that the arguments we computed match up. The IR verifier 4258 // will catch this, but this is a common enough source of problems 4259 // during IRGen changes that it's way better for debugging to catch 4260 // it ourselves here. 4261 #ifndef NDEBUG 4262 assert(IRCallArgs.size() == IRFuncTy->getNumParams() || IRFuncTy->isVarArg()); 4263 for (unsigned i = 0; i < IRCallArgs.size(); ++i) { 4264 // Inalloca argument can have different type. 4265 if (IRFunctionArgs.hasInallocaArg() && 4266 i == IRFunctionArgs.getInallocaArgNo()) 4267 continue; 4268 if (i < IRFuncTy->getNumParams()) 4269 assert(IRCallArgs[i]->getType() == IRFuncTy->getParamType(i)); 4270 } 4271 #endif 4272 4273 // Update the largest vector width if any arguments have vector types. 4274 for (unsigned i = 0; i < IRCallArgs.size(); ++i) { 4275 if (auto *VT = dyn_cast<llvm::VectorType>(IRCallArgs[i]->getType())) 4276 LargestVectorWidth = std::max(LargestVectorWidth, 4277 VT->getPrimitiveSizeInBits()); 4278 } 4279 4280 // Compute the calling convention and attributes. 4281 unsigned CallingConv; 4282 llvm::AttributeList Attrs; 4283 CGM.ConstructAttributeList(CalleePtr->getName(), CallInfo, 4284 Callee.getAbstractInfo(), Attrs, CallingConv, 4285 /*AttrOnCallSite=*/true); 4286 4287 // Apply some call-site-specific attributes. 4288 // TODO: work this into building the attribute set. 4289 4290 // Apply always_inline to all calls within flatten functions. 4291 // FIXME: should this really take priority over __try, below? 4292 if (CurCodeDecl && CurCodeDecl->hasAttr<FlattenAttr>() && 4293 !(Callee.getAbstractInfo().getCalleeDecl().getDecl() && 4294 Callee.getAbstractInfo() 4295 .getCalleeDecl() 4296 .getDecl() 4297 ->hasAttr<NoInlineAttr>())) { 4298 Attrs = 4299 Attrs.addAttribute(getLLVMContext(), llvm::AttributeList::FunctionIndex, 4300 llvm::Attribute::AlwaysInline); 4301 } 4302 4303 // Disable inlining inside SEH __try blocks. 4304 if (isSEHTryScope()) { 4305 Attrs = 4306 Attrs.addAttribute(getLLVMContext(), llvm::AttributeList::FunctionIndex, 4307 llvm::Attribute::NoInline); 4308 } 4309 4310 // Decide whether to use a call or an invoke. 4311 bool CannotThrow; 4312 if (currentFunctionUsesSEHTry()) { 4313 // SEH cares about asynchronous exceptions, so everything can "throw." 4314 CannotThrow = false; 4315 } else if (isCleanupPadScope() && 4316 EHPersonality::get(*this).isMSVCXXPersonality()) { 4317 // The MSVC++ personality will implicitly terminate the program if an 4318 // exception is thrown during a cleanup outside of a try/catch. 4319 // We don't need to model anything in IR to get this behavior. 4320 CannotThrow = true; 4321 } else { 4322 // Otherwise, nounwind call sites will never throw. 4323 CannotThrow = Attrs.hasAttribute(llvm::AttributeList::FunctionIndex, 4324 llvm::Attribute::NoUnwind); 4325 } 4326 4327 // If we made a temporary, be sure to clean up after ourselves. Note that we 4328 // can't depend on being inside of an ExprWithCleanups, so we need to manually 4329 // pop this cleanup later on. Being eager about this is OK, since this 4330 // temporary is 'invisible' outside of the callee. 4331 if (UnusedReturnSizePtr) 4332 pushFullExprCleanup<CallLifetimeEnd>(NormalEHLifetimeMarker, SRetAlloca, 4333 UnusedReturnSizePtr); 4334 4335 llvm::BasicBlock *InvokeDest = CannotThrow ? nullptr : getInvokeDest(); 4336 4337 SmallVector<llvm::OperandBundleDef, 1> BundleList = 4338 getBundlesForFunclet(CalleePtr); 4339 4340 // Emit the actual call/invoke instruction. 4341 llvm::CallBase *CI; 4342 if (!InvokeDest) { 4343 CI = Builder.CreateCall(CalleePtr, IRCallArgs, BundleList); 4344 } else { 4345 llvm::BasicBlock *Cont = createBasicBlock("invoke.cont"); 4346 CI = Builder.CreateInvoke(CalleePtr, Cont, InvokeDest, IRCallArgs, 4347 BundleList); 4348 EmitBlock(Cont); 4349 } 4350 if (callOrInvoke) 4351 *callOrInvoke = CI; 4352 4353 // Apply the attributes and calling convention. 4354 CI->setAttributes(Attrs); 4355 CI->setCallingConv(static_cast<llvm::CallingConv::ID>(CallingConv)); 4356 4357 // Apply various metadata. 4358 4359 if (!CI->getType()->isVoidTy()) 4360 CI->setName("call"); 4361 4362 // Update largest vector width from the return type. 4363 if (auto *VT = dyn_cast<llvm::VectorType>(CI->getType())) 4364 LargestVectorWidth = std::max(LargestVectorWidth, 4365 VT->getPrimitiveSizeInBits()); 4366 4367 // Insert instrumentation or attach profile metadata at indirect call sites. 4368 // For more details, see the comment before the definition of 4369 // IPVK_IndirectCallTarget in InstrProfData.inc. 4370 if (!CI->getCalledFunction()) 4371 PGO.valueProfile(Builder, llvm::IPVK_IndirectCallTarget, 4372 CI, CalleePtr); 4373 4374 // In ObjC ARC mode with no ObjC ARC exception safety, tell the ARC 4375 // optimizer it can aggressively ignore unwind edges. 4376 if (CGM.getLangOpts().ObjCAutoRefCount) 4377 AddObjCARCExceptionMetadata(CI); 4378 4379 // Suppress tail calls if requested. 4380 if (llvm::CallInst *Call = dyn_cast<llvm::CallInst>(CI)) { 4381 const Decl *TargetDecl = Callee.getAbstractInfo().getCalleeDecl().getDecl(); 4382 if (TargetDecl && TargetDecl->hasAttr<NotTailCalledAttr>()) 4383 Call->setTailCallKind(llvm::CallInst::TCK_NoTail); 4384 } 4385 4386 // 4. Finish the call. 4387 4388 // If the call doesn't return, finish the basic block and clear the 4389 // insertion point; this allows the rest of IRGen to discard 4390 // unreachable code. 4391 if (CI->doesNotReturn()) { 4392 if (UnusedReturnSizePtr) 4393 PopCleanupBlock(); 4394 4395 // Strip away the noreturn attribute to better diagnose unreachable UB. 4396 if (SanOpts.has(SanitizerKind::Unreachable)) { 4397 // Also remove from function since CallBase::hasFnAttr additionally checks 4398 // attributes of the called function. 4399 if (auto *F = CI->getCalledFunction()) 4400 F->removeFnAttr(llvm::Attribute::NoReturn); 4401 CI->removeAttribute(llvm::AttributeList::FunctionIndex, 4402 llvm::Attribute::NoReturn); 4403 4404 // Avoid incompatibility with ASan which relies on the `noreturn` 4405 // attribute to insert handler calls. 4406 if (SanOpts.has(SanitizerKind::Address)) { 4407 SanitizerScope SanScope(this); 4408 llvm::IRBuilder<>::InsertPointGuard IPGuard(Builder); 4409 Builder.SetInsertPoint(CI); 4410 auto *FnType = llvm::FunctionType::get(CGM.VoidTy, /*isVarArg=*/false); 4411 auto *Fn = CGM.CreateRuntimeFunction(FnType, "__asan_handle_no_return"); 4412 EmitNounwindRuntimeCall(Fn); 4413 } 4414 } 4415 4416 EmitUnreachable(Loc); 4417 Builder.ClearInsertionPoint(); 4418 4419 // FIXME: For now, emit a dummy basic block because expr emitters in 4420 // generally are not ready to handle emitting expressions at unreachable 4421 // points. 4422 EnsureInsertPoint(); 4423 4424 // Return a reasonable RValue. 4425 return GetUndefRValue(RetTy); 4426 } 4427 4428 // Perform the swifterror writeback. 4429 if (swiftErrorTemp.isValid()) { 4430 llvm::Value *errorResult = Builder.CreateLoad(swiftErrorTemp); 4431 Builder.CreateStore(errorResult, swiftErrorArg); 4432 } 4433 4434 // Emit any call-associated writebacks immediately. Arguably this 4435 // should happen after any return-value munging. 4436 if (CallArgs.hasWritebacks()) 4437 emitWritebacks(*this, CallArgs); 4438 4439 // The stack cleanup for inalloca arguments has to run out of the normal 4440 // lexical order, so deactivate it and run it manually here. 4441 CallArgs.freeArgumentMemory(*this); 4442 4443 // Extract the return value. 4444 RValue Ret = [&] { 4445 switch (RetAI.getKind()) { 4446 case ABIArgInfo::CoerceAndExpand: { 4447 auto coercionType = RetAI.getCoerceAndExpandType(); 4448 auto layout = CGM.getDataLayout().getStructLayout(coercionType); 4449 4450 Address addr = SRetPtr; 4451 addr = Builder.CreateElementBitCast(addr, coercionType); 4452 4453 assert(CI->getType() == RetAI.getUnpaddedCoerceAndExpandType()); 4454 bool requiresExtract = isa<llvm::StructType>(CI->getType()); 4455 4456 unsigned unpaddedIndex = 0; 4457 for (unsigned i = 0, e = coercionType->getNumElements(); i != e; ++i) { 4458 llvm::Type *eltType = coercionType->getElementType(i); 4459 if (ABIArgInfo::isPaddingForCoerceAndExpand(eltType)) continue; 4460 Address eltAddr = Builder.CreateStructGEP(addr, i, layout); 4461 llvm::Value *elt = CI; 4462 if (requiresExtract) 4463 elt = Builder.CreateExtractValue(elt, unpaddedIndex++); 4464 else 4465 assert(unpaddedIndex == 0); 4466 Builder.CreateStore(elt, eltAddr); 4467 } 4468 // FALLTHROUGH 4469 LLVM_FALLTHROUGH; 4470 } 4471 4472 case ABIArgInfo::InAlloca: 4473 case ABIArgInfo::Indirect: { 4474 RValue ret = convertTempToRValue(SRetPtr, RetTy, SourceLocation()); 4475 if (UnusedReturnSizePtr) 4476 PopCleanupBlock(); 4477 return ret; 4478 } 4479 4480 case ABIArgInfo::Ignore: 4481 // If we are ignoring an argument that had a result, make sure to 4482 // construct the appropriate return value for our caller. 4483 return GetUndefRValue(RetTy); 4484 4485 case ABIArgInfo::Extend: 4486 case ABIArgInfo::Direct: { 4487 llvm::Type *RetIRTy = ConvertType(RetTy); 4488 if (RetAI.getCoerceToType() == RetIRTy && RetAI.getDirectOffset() == 0) { 4489 switch (getEvaluationKind(RetTy)) { 4490 case TEK_Complex: { 4491 llvm::Value *Real = Builder.CreateExtractValue(CI, 0); 4492 llvm::Value *Imag = Builder.CreateExtractValue(CI, 1); 4493 return RValue::getComplex(std::make_pair(Real, Imag)); 4494 } 4495 case TEK_Aggregate: { 4496 Address DestPtr = ReturnValue.getValue(); 4497 bool DestIsVolatile = ReturnValue.isVolatile(); 4498 4499 if (!DestPtr.isValid()) { 4500 DestPtr = CreateMemTemp(RetTy, "agg.tmp"); 4501 DestIsVolatile = false; 4502 } 4503 BuildAggStore(*this, CI, DestPtr, DestIsVolatile); 4504 return RValue::getAggregate(DestPtr); 4505 } 4506 case TEK_Scalar: { 4507 // If the argument doesn't match, perform a bitcast to coerce it. This 4508 // can happen due to trivial type mismatches. 4509 llvm::Value *V = CI; 4510 if (V->getType() != RetIRTy) 4511 V = Builder.CreateBitCast(V, RetIRTy); 4512 return RValue::get(V); 4513 } 4514 } 4515 llvm_unreachable("bad evaluation kind"); 4516 } 4517 4518 Address DestPtr = ReturnValue.getValue(); 4519 bool DestIsVolatile = ReturnValue.isVolatile(); 4520 4521 if (!DestPtr.isValid()) { 4522 DestPtr = CreateMemTemp(RetTy, "coerce"); 4523 DestIsVolatile = false; 4524 } 4525 4526 // If the value is offset in memory, apply the offset now. 4527 Address StorePtr = emitAddressAtOffset(*this, DestPtr, RetAI); 4528 CreateCoercedStore(CI, StorePtr, DestIsVolatile, *this); 4529 4530 return convertTempToRValue(DestPtr, RetTy, SourceLocation()); 4531 } 4532 4533 case ABIArgInfo::Expand: 4534 llvm_unreachable("Invalid ABI kind for return argument"); 4535 } 4536 4537 llvm_unreachable("Unhandled ABIArgInfo::Kind"); 4538 } (); 4539 4540 // Emit the assume_aligned check on the return value. 4541 const Decl *TargetDecl = Callee.getAbstractInfo().getCalleeDecl().getDecl(); 4542 if (Ret.isScalar() && TargetDecl) { 4543 if (const auto *AA = TargetDecl->getAttr<AssumeAlignedAttr>()) { 4544 llvm::Value *OffsetValue = nullptr; 4545 if (const auto *Offset = AA->getOffset()) 4546 OffsetValue = EmitScalarExpr(Offset); 4547 4548 llvm::Value *Alignment = EmitScalarExpr(AA->getAlignment()); 4549 llvm::ConstantInt *AlignmentCI = cast<llvm::ConstantInt>(Alignment); 4550 EmitAlignmentAssumption(Ret.getScalarVal(), RetTy, Loc, AA->getLocation(), 4551 AlignmentCI->getZExtValue(), OffsetValue); 4552 } else if (const auto *AA = TargetDecl->getAttr<AllocAlignAttr>()) { 4553 llvm::Value *AlignmentVal = CallArgs[AA->getParamIndex().getLLVMIndex()] 4554 .getRValue(*this) 4555 .getScalarVal(); 4556 EmitAlignmentAssumption(Ret.getScalarVal(), RetTy, Loc, AA->getLocation(), 4557 AlignmentVal); 4558 } 4559 } 4560 4561 return Ret; 4562 } 4563 4564 CGCallee CGCallee::prepareConcreteCallee(CodeGenFunction &CGF) const { 4565 if (isVirtual()) { 4566 const CallExpr *CE = getVirtualCallExpr(); 4567 return CGF.CGM.getCXXABI().getVirtualFunctionPointer( 4568 CGF, getVirtualMethodDecl(), getThisAddress(), getFunctionType(), 4569 CE ? CE->getBeginLoc() : SourceLocation()); 4570 } 4571 4572 return *this; 4573 } 4574 4575 /* VarArg handling */ 4576 4577 Address CodeGenFunction::EmitVAArg(VAArgExpr *VE, Address &VAListAddr) { 4578 VAListAddr = VE->isMicrosoftABI() 4579 ? EmitMSVAListRef(VE->getSubExpr()) 4580 : EmitVAListRef(VE->getSubExpr()); 4581 QualType Ty = VE->getType(); 4582 if (VE->isMicrosoftABI()) 4583 return CGM.getTypes().getABIInfo().EmitMSVAArg(*this, VAListAddr, Ty); 4584 return CGM.getTypes().getABIInfo().EmitVAArg(*this, VAListAddr, Ty); 4585 } 4586