1 //===--- CGCall.cpp - Encapsulate calling convention details --------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // These classes wrap the information about a call or function 10 // definition used to handle ABI compliancy. 11 // 12 //===----------------------------------------------------------------------===// 13 14 #include "CGCall.h" 15 #include "ABIInfo.h" 16 #include "CGBlocks.h" 17 #include "CGCXXABI.h" 18 #include "CGCleanup.h" 19 #include "CGRecordLayout.h" 20 #include "CodeGenFunction.h" 21 #include "CodeGenModule.h" 22 #include "TargetInfo.h" 23 #include "clang/AST/Attr.h" 24 #include "clang/AST/Decl.h" 25 #include "clang/AST/DeclCXX.h" 26 #include "clang/AST/DeclObjC.h" 27 #include "clang/Basic/CodeGenOptions.h" 28 #include "clang/Basic/TargetBuiltins.h" 29 #include "clang/Basic/TargetInfo.h" 30 #include "clang/CodeGen/CGFunctionInfo.h" 31 #include "clang/CodeGen/SwiftCallingConv.h" 32 #include "llvm/ADT/StringExtras.h" 33 #include "llvm/Analysis/ValueTracking.h" 34 #include "llvm/IR/Attributes.h" 35 #include "llvm/IR/CallingConv.h" 36 #include "llvm/IR/DataLayout.h" 37 #include "llvm/IR/InlineAsm.h" 38 #include "llvm/IR/IntrinsicInst.h" 39 #include "llvm/IR/Intrinsics.h" 40 #include "llvm/Transforms/Utils/Local.h" 41 using namespace clang; 42 using namespace CodeGen; 43 44 /***/ 45 46 unsigned CodeGenTypes::ClangCallConvToLLVMCallConv(CallingConv CC) { 47 switch (CC) { 48 default: return llvm::CallingConv::C; 49 case CC_X86StdCall: return llvm::CallingConv::X86_StdCall; 50 case CC_X86FastCall: return llvm::CallingConv::X86_FastCall; 51 case CC_X86RegCall: return llvm::CallingConv::X86_RegCall; 52 case CC_X86ThisCall: return llvm::CallingConv::X86_ThisCall; 53 case CC_Win64: return llvm::CallingConv::Win64; 54 case CC_X86_64SysV: return llvm::CallingConv::X86_64_SysV; 55 case CC_AAPCS: return llvm::CallingConv::ARM_AAPCS; 56 case CC_AAPCS_VFP: return llvm::CallingConv::ARM_AAPCS_VFP; 57 case CC_IntelOclBicc: return llvm::CallingConv::Intel_OCL_BI; 58 // TODO: Add support for __pascal to LLVM. 59 case CC_X86Pascal: return llvm::CallingConv::C; 60 // TODO: Add support for __vectorcall to LLVM. 61 case CC_X86VectorCall: return llvm::CallingConv::X86_VectorCall; 62 case CC_AArch64VectorCall: return llvm::CallingConv::AArch64_VectorCall; 63 case CC_SpirFunction: return llvm::CallingConv::SPIR_FUNC; 64 case CC_OpenCLKernel: return CGM.getTargetCodeGenInfo().getOpenCLKernelCallingConv(); 65 case CC_PreserveMost: return llvm::CallingConv::PreserveMost; 66 case CC_PreserveAll: return llvm::CallingConv::PreserveAll; 67 case CC_Swift: return llvm::CallingConv::Swift; 68 } 69 } 70 71 /// Derives the 'this' type for codegen purposes, i.e. ignoring method CVR 72 /// qualification. Either or both of RD and MD may be null. A null RD indicates 73 /// that there is no meaningful 'this' type, and a null MD can occur when 74 /// calling a method pointer. 75 CanQualType CodeGenTypes::DeriveThisType(const CXXRecordDecl *RD, 76 const CXXMethodDecl *MD) { 77 QualType RecTy; 78 if (RD) 79 RecTy = Context.getTagDeclType(RD)->getCanonicalTypeInternal(); 80 else 81 RecTy = Context.VoidTy; 82 83 if (MD) 84 RecTy = Context.getAddrSpaceQualType(RecTy, MD->getMethodQualifiers().getAddressSpace()); 85 return Context.getPointerType(CanQualType::CreateUnsafe(RecTy)); 86 } 87 88 /// Returns the canonical formal type of the given C++ method. 89 static CanQual<FunctionProtoType> GetFormalType(const CXXMethodDecl *MD) { 90 return MD->getType()->getCanonicalTypeUnqualified() 91 .getAs<FunctionProtoType>(); 92 } 93 94 /// Returns the "extra-canonicalized" return type, which discards 95 /// qualifiers on the return type. Codegen doesn't care about them, 96 /// and it makes ABI code a little easier to be able to assume that 97 /// all parameter and return types are top-level unqualified. 98 static CanQualType GetReturnType(QualType RetTy) { 99 return RetTy->getCanonicalTypeUnqualified().getUnqualifiedType(); 100 } 101 102 /// Arrange the argument and result information for a value of the given 103 /// unprototyped freestanding function type. 104 const CGFunctionInfo & 105 CodeGenTypes::arrangeFreeFunctionType(CanQual<FunctionNoProtoType> FTNP) { 106 // When translating an unprototyped function type, always use a 107 // variadic type. 108 return arrangeLLVMFunctionInfo(FTNP->getReturnType().getUnqualifiedType(), 109 /*instanceMethod=*/false, 110 /*chainCall=*/false, None, 111 FTNP->getExtInfo(), {}, RequiredArgs(0)); 112 } 113 114 static void addExtParameterInfosForCall( 115 llvm::SmallVectorImpl<FunctionProtoType::ExtParameterInfo> ¶mInfos, 116 const FunctionProtoType *proto, 117 unsigned prefixArgs, 118 unsigned totalArgs) { 119 assert(proto->hasExtParameterInfos()); 120 assert(paramInfos.size() <= prefixArgs); 121 assert(proto->getNumParams() + prefixArgs <= totalArgs); 122 123 paramInfos.reserve(totalArgs); 124 125 // Add default infos for any prefix args that don't already have infos. 126 paramInfos.resize(prefixArgs); 127 128 // Add infos for the prototype. 129 for (const auto &ParamInfo : proto->getExtParameterInfos()) { 130 paramInfos.push_back(ParamInfo); 131 // pass_object_size params have no parameter info. 132 if (ParamInfo.hasPassObjectSize()) 133 paramInfos.emplace_back(); 134 } 135 136 assert(paramInfos.size() <= totalArgs && 137 "Did we forget to insert pass_object_size args?"); 138 // Add default infos for the variadic and/or suffix arguments. 139 paramInfos.resize(totalArgs); 140 } 141 142 /// Adds the formal parameters in FPT to the given prefix. If any parameter in 143 /// FPT has pass_object_size attrs, then we'll add parameters for those, too. 144 static void appendParameterTypes(const CodeGenTypes &CGT, 145 SmallVectorImpl<CanQualType> &prefix, 146 SmallVectorImpl<FunctionProtoType::ExtParameterInfo> ¶mInfos, 147 CanQual<FunctionProtoType> FPT) { 148 // Fast path: don't touch param info if we don't need to. 149 if (!FPT->hasExtParameterInfos()) { 150 assert(paramInfos.empty() && 151 "We have paramInfos, but the prototype doesn't?"); 152 prefix.append(FPT->param_type_begin(), FPT->param_type_end()); 153 return; 154 } 155 156 unsigned PrefixSize = prefix.size(); 157 // In the vast majority of cases, we'll have precisely FPT->getNumParams() 158 // parameters; the only thing that can change this is the presence of 159 // pass_object_size. So, we preallocate for the common case. 160 prefix.reserve(prefix.size() + FPT->getNumParams()); 161 162 auto ExtInfos = FPT->getExtParameterInfos(); 163 assert(ExtInfos.size() == FPT->getNumParams()); 164 for (unsigned I = 0, E = FPT->getNumParams(); I != E; ++I) { 165 prefix.push_back(FPT->getParamType(I)); 166 if (ExtInfos[I].hasPassObjectSize()) 167 prefix.push_back(CGT.getContext().getSizeType()); 168 } 169 170 addExtParameterInfosForCall(paramInfos, FPT.getTypePtr(), PrefixSize, 171 prefix.size()); 172 } 173 174 /// Arrange the LLVM function layout for a value of the given function 175 /// type, on top of any implicit parameters already stored. 176 static const CGFunctionInfo & 177 arrangeLLVMFunctionInfo(CodeGenTypes &CGT, bool instanceMethod, 178 SmallVectorImpl<CanQualType> &prefix, 179 CanQual<FunctionProtoType> FTP) { 180 SmallVector<FunctionProtoType::ExtParameterInfo, 16> paramInfos; 181 RequiredArgs Required = RequiredArgs::forPrototypePlus(FTP, prefix.size()); 182 // FIXME: Kill copy. 183 appendParameterTypes(CGT, prefix, paramInfos, FTP); 184 CanQualType resultType = FTP->getReturnType().getUnqualifiedType(); 185 186 return CGT.arrangeLLVMFunctionInfo(resultType, instanceMethod, 187 /*chainCall=*/false, prefix, 188 FTP->getExtInfo(), paramInfos, 189 Required); 190 } 191 192 /// Arrange the argument and result information for a value of the 193 /// given freestanding function type. 194 const CGFunctionInfo & 195 CodeGenTypes::arrangeFreeFunctionType(CanQual<FunctionProtoType> FTP) { 196 SmallVector<CanQualType, 16> argTypes; 197 return ::arrangeLLVMFunctionInfo(*this, /*instanceMethod=*/false, argTypes, 198 FTP); 199 } 200 201 static CallingConv getCallingConventionForDecl(const Decl *D, bool IsWindows) { 202 // Set the appropriate calling convention for the Function. 203 if (D->hasAttr<StdCallAttr>()) 204 return CC_X86StdCall; 205 206 if (D->hasAttr<FastCallAttr>()) 207 return CC_X86FastCall; 208 209 if (D->hasAttr<RegCallAttr>()) 210 return CC_X86RegCall; 211 212 if (D->hasAttr<ThisCallAttr>()) 213 return CC_X86ThisCall; 214 215 if (D->hasAttr<VectorCallAttr>()) 216 return CC_X86VectorCall; 217 218 if (D->hasAttr<PascalAttr>()) 219 return CC_X86Pascal; 220 221 if (PcsAttr *PCS = D->getAttr<PcsAttr>()) 222 return (PCS->getPCS() == PcsAttr::AAPCS ? CC_AAPCS : CC_AAPCS_VFP); 223 224 if (D->hasAttr<AArch64VectorPcsAttr>()) 225 return CC_AArch64VectorCall; 226 227 if (D->hasAttr<IntelOclBiccAttr>()) 228 return CC_IntelOclBicc; 229 230 if (D->hasAttr<MSABIAttr>()) 231 return IsWindows ? CC_C : CC_Win64; 232 233 if (D->hasAttr<SysVABIAttr>()) 234 return IsWindows ? CC_X86_64SysV : CC_C; 235 236 if (D->hasAttr<PreserveMostAttr>()) 237 return CC_PreserveMost; 238 239 if (D->hasAttr<PreserveAllAttr>()) 240 return CC_PreserveAll; 241 242 return CC_C; 243 } 244 245 /// Arrange the argument and result information for a call to an 246 /// unknown C++ non-static member function of the given abstract type. 247 /// (A null RD means we don't have any meaningful "this" argument type, 248 /// so fall back to a generic pointer type). 249 /// The member function must be an ordinary function, i.e. not a 250 /// constructor or destructor. 251 const CGFunctionInfo & 252 CodeGenTypes::arrangeCXXMethodType(const CXXRecordDecl *RD, 253 const FunctionProtoType *FTP, 254 const CXXMethodDecl *MD) { 255 SmallVector<CanQualType, 16> argTypes; 256 257 // Add the 'this' pointer. 258 argTypes.push_back(DeriveThisType(RD, MD)); 259 260 return ::arrangeLLVMFunctionInfo( 261 *this, true, argTypes, 262 FTP->getCanonicalTypeUnqualified().getAs<FunctionProtoType>()); 263 } 264 265 /// Set calling convention for CUDA/HIP kernel. 266 static void setCUDAKernelCallingConvention(CanQualType &FTy, CodeGenModule &CGM, 267 const FunctionDecl *FD) { 268 if (FD->hasAttr<CUDAGlobalAttr>()) { 269 const FunctionType *FT = FTy->getAs<FunctionType>(); 270 CGM.getTargetCodeGenInfo().setCUDAKernelCallingConvention(FT); 271 FTy = FT->getCanonicalTypeUnqualified(); 272 } 273 } 274 275 /// Arrange the argument and result information for a declaration or 276 /// definition of the given C++ non-static member function. The 277 /// member function must be an ordinary function, i.e. not a 278 /// constructor or destructor. 279 const CGFunctionInfo & 280 CodeGenTypes::arrangeCXXMethodDeclaration(const CXXMethodDecl *MD) { 281 assert(!isa<CXXConstructorDecl>(MD) && "wrong method for constructors!"); 282 assert(!isa<CXXDestructorDecl>(MD) && "wrong method for destructors!"); 283 284 CanQualType FT = GetFormalType(MD).getAs<Type>(); 285 setCUDAKernelCallingConvention(FT, CGM, MD); 286 auto prototype = FT.getAs<FunctionProtoType>(); 287 288 if (MD->isInstance()) { 289 // The abstract case is perfectly fine. 290 const CXXRecordDecl *ThisType = TheCXXABI.getThisArgumentTypeForMethod(MD); 291 return arrangeCXXMethodType(ThisType, prototype.getTypePtr(), MD); 292 } 293 294 return arrangeFreeFunctionType(prototype); 295 } 296 297 bool CodeGenTypes::inheritingCtorHasParams( 298 const InheritedConstructor &Inherited, CXXCtorType Type) { 299 // Parameters are unnecessary if we're constructing a base class subobject 300 // and the inherited constructor lives in a virtual base. 301 return Type == Ctor_Complete || 302 !Inherited.getShadowDecl()->constructsVirtualBase() || 303 !Target.getCXXABI().hasConstructorVariants(); 304 } 305 306 const CGFunctionInfo & 307 CodeGenTypes::arrangeCXXStructorDeclaration(GlobalDecl GD) { 308 auto *MD = cast<CXXMethodDecl>(GD.getDecl()); 309 310 SmallVector<CanQualType, 16> argTypes; 311 SmallVector<FunctionProtoType::ExtParameterInfo, 16> paramInfos; 312 argTypes.push_back(DeriveThisType(MD->getParent(), MD)); 313 314 bool PassParams = true; 315 316 if (auto *CD = dyn_cast<CXXConstructorDecl>(MD)) { 317 // A base class inheriting constructor doesn't get forwarded arguments 318 // needed to construct a virtual base (or base class thereof). 319 if (auto Inherited = CD->getInheritedConstructor()) 320 PassParams = inheritingCtorHasParams(Inherited, GD.getCtorType()); 321 } 322 323 CanQual<FunctionProtoType> FTP = GetFormalType(MD); 324 325 // Add the formal parameters. 326 if (PassParams) 327 appendParameterTypes(*this, argTypes, paramInfos, FTP); 328 329 CGCXXABI::AddedStructorArgs AddedArgs = 330 TheCXXABI.buildStructorSignature(GD, argTypes); 331 if (!paramInfos.empty()) { 332 // Note: prefix implies after the first param. 333 if (AddedArgs.Prefix) 334 paramInfos.insert(paramInfos.begin() + 1, AddedArgs.Prefix, 335 FunctionProtoType::ExtParameterInfo{}); 336 if (AddedArgs.Suffix) 337 paramInfos.append(AddedArgs.Suffix, 338 FunctionProtoType::ExtParameterInfo{}); 339 } 340 341 RequiredArgs required = 342 (PassParams && MD->isVariadic() ? RequiredArgs(argTypes.size()) 343 : RequiredArgs::All); 344 345 FunctionType::ExtInfo extInfo = FTP->getExtInfo(); 346 CanQualType resultType = TheCXXABI.HasThisReturn(GD) 347 ? argTypes.front() 348 : TheCXXABI.hasMostDerivedReturn(GD) 349 ? CGM.getContext().VoidPtrTy 350 : Context.VoidTy; 351 return arrangeLLVMFunctionInfo(resultType, /*instanceMethod=*/true, 352 /*chainCall=*/false, argTypes, extInfo, 353 paramInfos, required); 354 } 355 356 static SmallVector<CanQualType, 16> 357 getArgTypesForCall(ASTContext &ctx, const CallArgList &args) { 358 SmallVector<CanQualType, 16> argTypes; 359 for (auto &arg : args) 360 argTypes.push_back(ctx.getCanonicalParamType(arg.Ty)); 361 return argTypes; 362 } 363 364 static SmallVector<CanQualType, 16> 365 getArgTypesForDeclaration(ASTContext &ctx, const FunctionArgList &args) { 366 SmallVector<CanQualType, 16> argTypes; 367 for (auto &arg : args) 368 argTypes.push_back(ctx.getCanonicalParamType(arg->getType())); 369 return argTypes; 370 } 371 372 static llvm::SmallVector<FunctionProtoType::ExtParameterInfo, 16> 373 getExtParameterInfosForCall(const FunctionProtoType *proto, 374 unsigned prefixArgs, unsigned totalArgs) { 375 llvm::SmallVector<FunctionProtoType::ExtParameterInfo, 16> result; 376 if (proto->hasExtParameterInfos()) { 377 addExtParameterInfosForCall(result, proto, prefixArgs, totalArgs); 378 } 379 return result; 380 } 381 382 /// Arrange a call to a C++ method, passing the given arguments. 383 /// 384 /// ExtraPrefixArgs is the number of ABI-specific args passed after the `this` 385 /// parameter. 386 /// ExtraSuffixArgs is the number of ABI-specific args passed at the end of 387 /// args. 388 /// PassProtoArgs indicates whether `args` has args for the parameters in the 389 /// given CXXConstructorDecl. 390 const CGFunctionInfo & 391 CodeGenTypes::arrangeCXXConstructorCall(const CallArgList &args, 392 const CXXConstructorDecl *D, 393 CXXCtorType CtorKind, 394 unsigned ExtraPrefixArgs, 395 unsigned ExtraSuffixArgs, 396 bool PassProtoArgs) { 397 // FIXME: Kill copy. 398 SmallVector<CanQualType, 16> ArgTypes; 399 for (const auto &Arg : args) 400 ArgTypes.push_back(Context.getCanonicalParamType(Arg.Ty)); 401 402 // +1 for implicit this, which should always be args[0]. 403 unsigned TotalPrefixArgs = 1 + ExtraPrefixArgs; 404 405 CanQual<FunctionProtoType> FPT = GetFormalType(D); 406 RequiredArgs Required = PassProtoArgs 407 ? RequiredArgs::forPrototypePlus( 408 FPT, TotalPrefixArgs + ExtraSuffixArgs) 409 : RequiredArgs::All; 410 411 GlobalDecl GD(D, CtorKind); 412 CanQualType ResultType = TheCXXABI.HasThisReturn(GD) 413 ? ArgTypes.front() 414 : TheCXXABI.hasMostDerivedReturn(GD) 415 ? CGM.getContext().VoidPtrTy 416 : Context.VoidTy; 417 418 FunctionType::ExtInfo Info = FPT->getExtInfo(); 419 llvm::SmallVector<FunctionProtoType::ExtParameterInfo, 16> ParamInfos; 420 // If the prototype args are elided, we should only have ABI-specific args, 421 // which never have param info. 422 if (PassProtoArgs && FPT->hasExtParameterInfos()) { 423 // ABI-specific suffix arguments are treated the same as variadic arguments. 424 addExtParameterInfosForCall(ParamInfos, FPT.getTypePtr(), TotalPrefixArgs, 425 ArgTypes.size()); 426 } 427 return arrangeLLVMFunctionInfo(ResultType, /*instanceMethod=*/true, 428 /*chainCall=*/false, ArgTypes, Info, 429 ParamInfos, Required); 430 } 431 432 /// Arrange the argument and result information for the declaration or 433 /// definition of the given function. 434 const CGFunctionInfo & 435 CodeGenTypes::arrangeFunctionDeclaration(const FunctionDecl *FD) { 436 if (const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(FD)) 437 if (MD->isInstance()) 438 return arrangeCXXMethodDeclaration(MD); 439 440 CanQualType FTy = FD->getType()->getCanonicalTypeUnqualified(); 441 442 assert(isa<FunctionType>(FTy)); 443 setCUDAKernelCallingConvention(FTy, CGM, FD); 444 445 // When declaring a function without a prototype, always use a 446 // non-variadic type. 447 if (CanQual<FunctionNoProtoType> noProto = FTy.getAs<FunctionNoProtoType>()) { 448 return arrangeLLVMFunctionInfo( 449 noProto->getReturnType(), /*instanceMethod=*/false, 450 /*chainCall=*/false, None, noProto->getExtInfo(), {},RequiredArgs::All); 451 } 452 453 return arrangeFreeFunctionType(FTy.castAs<FunctionProtoType>()); 454 } 455 456 /// Arrange the argument and result information for the declaration or 457 /// definition of an Objective-C method. 458 const CGFunctionInfo & 459 CodeGenTypes::arrangeObjCMethodDeclaration(const ObjCMethodDecl *MD) { 460 // It happens that this is the same as a call with no optional 461 // arguments, except also using the formal 'self' type. 462 return arrangeObjCMessageSendSignature(MD, MD->getSelfDecl()->getType()); 463 } 464 465 /// Arrange the argument and result information for the function type 466 /// through which to perform a send to the given Objective-C method, 467 /// using the given receiver type. The receiver type is not always 468 /// the 'self' type of the method or even an Objective-C pointer type. 469 /// This is *not* the right method for actually performing such a 470 /// message send, due to the possibility of optional arguments. 471 const CGFunctionInfo & 472 CodeGenTypes::arrangeObjCMessageSendSignature(const ObjCMethodDecl *MD, 473 QualType receiverType) { 474 SmallVector<CanQualType, 16> argTys; 475 SmallVector<FunctionProtoType::ExtParameterInfo, 4> extParamInfos(2); 476 argTys.push_back(Context.getCanonicalParamType(receiverType)); 477 argTys.push_back(Context.getCanonicalParamType(Context.getObjCSelType())); 478 // FIXME: Kill copy? 479 for (const auto *I : MD->parameters()) { 480 argTys.push_back(Context.getCanonicalParamType(I->getType())); 481 auto extParamInfo = FunctionProtoType::ExtParameterInfo().withIsNoEscape( 482 I->hasAttr<NoEscapeAttr>()); 483 extParamInfos.push_back(extParamInfo); 484 } 485 486 FunctionType::ExtInfo einfo; 487 bool IsWindows = getContext().getTargetInfo().getTriple().isOSWindows(); 488 einfo = einfo.withCallingConv(getCallingConventionForDecl(MD, IsWindows)); 489 490 if (getContext().getLangOpts().ObjCAutoRefCount && 491 MD->hasAttr<NSReturnsRetainedAttr>()) 492 einfo = einfo.withProducesResult(true); 493 494 RequiredArgs required = 495 (MD->isVariadic() ? RequiredArgs(argTys.size()) : RequiredArgs::All); 496 497 return arrangeLLVMFunctionInfo( 498 GetReturnType(MD->getReturnType()), /*instanceMethod=*/false, 499 /*chainCall=*/false, argTys, einfo, extParamInfos, required); 500 } 501 502 const CGFunctionInfo & 503 CodeGenTypes::arrangeUnprototypedObjCMessageSend(QualType returnType, 504 const CallArgList &args) { 505 auto argTypes = getArgTypesForCall(Context, args); 506 FunctionType::ExtInfo einfo; 507 508 return arrangeLLVMFunctionInfo( 509 GetReturnType(returnType), /*instanceMethod=*/false, 510 /*chainCall=*/false, argTypes, einfo, {}, RequiredArgs::All); 511 } 512 513 const CGFunctionInfo & 514 CodeGenTypes::arrangeGlobalDeclaration(GlobalDecl GD) { 515 // FIXME: Do we need to handle ObjCMethodDecl? 516 const FunctionDecl *FD = cast<FunctionDecl>(GD.getDecl()); 517 518 if (isa<CXXConstructorDecl>(GD.getDecl()) || 519 isa<CXXDestructorDecl>(GD.getDecl())) 520 return arrangeCXXStructorDeclaration(GD); 521 522 return arrangeFunctionDeclaration(FD); 523 } 524 525 /// Arrange a thunk that takes 'this' as the first parameter followed by 526 /// varargs. Return a void pointer, regardless of the actual return type. 527 /// The body of the thunk will end in a musttail call to a function of the 528 /// correct type, and the caller will bitcast the function to the correct 529 /// prototype. 530 const CGFunctionInfo & 531 CodeGenTypes::arrangeUnprototypedMustTailThunk(const CXXMethodDecl *MD) { 532 assert(MD->isVirtual() && "only methods have thunks"); 533 CanQual<FunctionProtoType> FTP = GetFormalType(MD); 534 CanQualType ArgTys[] = {DeriveThisType(MD->getParent(), MD)}; 535 return arrangeLLVMFunctionInfo(Context.VoidTy, /*instanceMethod=*/false, 536 /*chainCall=*/false, ArgTys, 537 FTP->getExtInfo(), {}, RequiredArgs(1)); 538 } 539 540 const CGFunctionInfo & 541 CodeGenTypes::arrangeMSCtorClosure(const CXXConstructorDecl *CD, 542 CXXCtorType CT) { 543 assert(CT == Ctor_CopyingClosure || CT == Ctor_DefaultClosure); 544 545 CanQual<FunctionProtoType> FTP = GetFormalType(CD); 546 SmallVector<CanQualType, 2> ArgTys; 547 const CXXRecordDecl *RD = CD->getParent(); 548 ArgTys.push_back(DeriveThisType(RD, CD)); 549 if (CT == Ctor_CopyingClosure) 550 ArgTys.push_back(*FTP->param_type_begin()); 551 if (RD->getNumVBases() > 0) 552 ArgTys.push_back(Context.IntTy); 553 CallingConv CC = Context.getDefaultCallingConvention( 554 /*IsVariadic=*/false, /*IsCXXMethod=*/true); 555 return arrangeLLVMFunctionInfo(Context.VoidTy, /*instanceMethod=*/true, 556 /*chainCall=*/false, ArgTys, 557 FunctionType::ExtInfo(CC), {}, 558 RequiredArgs::All); 559 } 560 561 /// Arrange a call as unto a free function, except possibly with an 562 /// additional number of formal parameters considered required. 563 static const CGFunctionInfo & 564 arrangeFreeFunctionLikeCall(CodeGenTypes &CGT, 565 CodeGenModule &CGM, 566 const CallArgList &args, 567 const FunctionType *fnType, 568 unsigned numExtraRequiredArgs, 569 bool chainCall) { 570 assert(args.size() >= numExtraRequiredArgs); 571 572 llvm::SmallVector<FunctionProtoType::ExtParameterInfo, 16> paramInfos; 573 574 // In most cases, there are no optional arguments. 575 RequiredArgs required = RequiredArgs::All; 576 577 // If we have a variadic prototype, the required arguments are the 578 // extra prefix plus the arguments in the prototype. 579 if (const FunctionProtoType *proto = dyn_cast<FunctionProtoType>(fnType)) { 580 if (proto->isVariadic()) 581 required = RequiredArgs::forPrototypePlus(proto, numExtraRequiredArgs); 582 583 if (proto->hasExtParameterInfos()) 584 addExtParameterInfosForCall(paramInfos, proto, numExtraRequiredArgs, 585 args.size()); 586 587 // If we don't have a prototype at all, but we're supposed to 588 // explicitly use the variadic convention for unprototyped calls, 589 // treat all of the arguments as required but preserve the nominal 590 // possibility of variadics. 591 } else if (CGM.getTargetCodeGenInfo() 592 .isNoProtoCallVariadic(args, 593 cast<FunctionNoProtoType>(fnType))) { 594 required = RequiredArgs(args.size()); 595 } 596 597 // FIXME: Kill copy. 598 SmallVector<CanQualType, 16> argTypes; 599 for (const auto &arg : args) 600 argTypes.push_back(CGT.getContext().getCanonicalParamType(arg.Ty)); 601 return CGT.arrangeLLVMFunctionInfo(GetReturnType(fnType->getReturnType()), 602 /*instanceMethod=*/false, chainCall, 603 argTypes, fnType->getExtInfo(), paramInfos, 604 required); 605 } 606 607 /// Figure out the rules for calling a function with the given formal 608 /// type using the given arguments. The arguments are necessary 609 /// because the function might be unprototyped, in which case it's 610 /// target-dependent in crazy ways. 611 const CGFunctionInfo & 612 CodeGenTypes::arrangeFreeFunctionCall(const CallArgList &args, 613 const FunctionType *fnType, 614 bool chainCall) { 615 return arrangeFreeFunctionLikeCall(*this, CGM, args, fnType, 616 chainCall ? 1 : 0, chainCall); 617 } 618 619 /// A block function is essentially a free function with an 620 /// extra implicit argument. 621 const CGFunctionInfo & 622 CodeGenTypes::arrangeBlockFunctionCall(const CallArgList &args, 623 const FunctionType *fnType) { 624 return arrangeFreeFunctionLikeCall(*this, CGM, args, fnType, 1, 625 /*chainCall=*/false); 626 } 627 628 const CGFunctionInfo & 629 CodeGenTypes::arrangeBlockFunctionDeclaration(const FunctionProtoType *proto, 630 const FunctionArgList ¶ms) { 631 auto paramInfos = getExtParameterInfosForCall(proto, 1, params.size()); 632 auto argTypes = getArgTypesForDeclaration(Context, params); 633 634 return arrangeLLVMFunctionInfo(GetReturnType(proto->getReturnType()), 635 /*instanceMethod*/ false, /*chainCall*/ false, 636 argTypes, proto->getExtInfo(), paramInfos, 637 RequiredArgs::forPrototypePlus(proto, 1)); 638 } 639 640 const CGFunctionInfo & 641 CodeGenTypes::arrangeBuiltinFunctionCall(QualType resultType, 642 const CallArgList &args) { 643 // FIXME: Kill copy. 644 SmallVector<CanQualType, 16> argTypes; 645 for (const auto &Arg : args) 646 argTypes.push_back(Context.getCanonicalParamType(Arg.Ty)); 647 return arrangeLLVMFunctionInfo( 648 GetReturnType(resultType), /*instanceMethod=*/false, 649 /*chainCall=*/false, argTypes, FunctionType::ExtInfo(), 650 /*paramInfos=*/ {}, RequiredArgs::All); 651 } 652 653 const CGFunctionInfo & 654 CodeGenTypes::arrangeBuiltinFunctionDeclaration(QualType resultType, 655 const FunctionArgList &args) { 656 auto argTypes = getArgTypesForDeclaration(Context, args); 657 658 return arrangeLLVMFunctionInfo( 659 GetReturnType(resultType), /*instanceMethod=*/false, /*chainCall=*/false, 660 argTypes, FunctionType::ExtInfo(), {}, RequiredArgs::All); 661 } 662 663 const CGFunctionInfo & 664 CodeGenTypes::arrangeBuiltinFunctionDeclaration(CanQualType resultType, 665 ArrayRef<CanQualType> argTypes) { 666 return arrangeLLVMFunctionInfo( 667 resultType, /*instanceMethod=*/false, /*chainCall=*/false, 668 argTypes, FunctionType::ExtInfo(), {}, RequiredArgs::All); 669 } 670 671 /// Arrange a call to a C++ method, passing the given arguments. 672 /// 673 /// numPrefixArgs is the number of ABI-specific prefix arguments we have. It 674 /// does not count `this`. 675 const CGFunctionInfo & 676 CodeGenTypes::arrangeCXXMethodCall(const CallArgList &args, 677 const FunctionProtoType *proto, 678 RequiredArgs required, 679 unsigned numPrefixArgs) { 680 assert(numPrefixArgs + 1 <= args.size() && 681 "Emitting a call with less args than the required prefix?"); 682 // Add one to account for `this`. It's a bit awkward here, but we don't count 683 // `this` in similar places elsewhere. 684 auto paramInfos = 685 getExtParameterInfosForCall(proto, numPrefixArgs + 1, args.size()); 686 687 // FIXME: Kill copy. 688 auto argTypes = getArgTypesForCall(Context, args); 689 690 FunctionType::ExtInfo info = proto->getExtInfo(); 691 return arrangeLLVMFunctionInfo( 692 GetReturnType(proto->getReturnType()), /*instanceMethod=*/true, 693 /*chainCall=*/false, argTypes, info, paramInfos, required); 694 } 695 696 const CGFunctionInfo &CodeGenTypes::arrangeNullaryFunction() { 697 return arrangeLLVMFunctionInfo( 698 getContext().VoidTy, /*instanceMethod=*/false, /*chainCall=*/false, 699 None, FunctionType::ExtInfo(), {}, RequiredArgs::All); 700 } 701 702 const CGFunctionInfo & 703 CodeGenTypes::arrangeCall(const CGFunctionInfo &signature, 704 const CallArgList &args) { 705 assert(signature.arg_size() <= args.size()); 706 if (signature.arg_size() == args.size()) 707 return signature; 708 709 SmallVector<FunctionProtoType::ExtParameterInfo, 16> paramInfos; 710 auto sigParamInfos = signature.getExtParameterInfos(); 711 if (!sigParamInfos.empty()) { 712 paramInfos.append(sigParamInfos.begin(), sigParamInfos.end()); 713 paramInfos.resize(args.size()); 714 } 715 716 auto argTypes = getArgTypesForCall(Context, args); 717 718 assert(signature.getRequiredArgs().allowsOptionalArgs()); 719 return arrangeLLVMFunctionInfo(signature.getReturnType(), 720 signature.isInstanceMethod(), 721 signature.isChainCall(), 722 argTypes, 723 signature.getExtInfo(), 724 paramInfos, 725 signature.getRequiredArgs()); 726 } 727 728 namespace clang { 729 namespace CodeGen { 730 void computeSPIRKernelABIInfo(CodeGenModule &CGM, CGFunctionInfo &FI); 731 } 732 } 733 734 /// Arrange the argument and result information for an abstract value 735 /// of a given function type. This is the method which all of the 736 /// above functions ultimately defer to. 737 const CGFunctionInfo & 738 CodeGenTypes::arrangeLLVMFunctionInfo(CanQualType resultType, 739 bool instanceMethod, 740 bool chainCall, 741 ArrayRef<CanQualType> argTypes, 742 FunctionType::ExtInfo info, 743 ArrayRef<FunctionProtoType::ExtParameterInfo> paramInfos, 744 RequiredArgs required) { 745 assert(llvm::all_of(argTypes, 746 [](CanQualType T) { return T.isCanonicalAsParam(); })); 747 748 // Lookup or create unique function info. 749 llvm::FoldingSetNodeID ID; 750 CGFunctionInfo::Profile(ID, instanceMethod, chainCall, info, paramInfos, 751 required, resultType, argTypes); 752 753 void *insertPos = nullptr; 754 CGFunctionInfo *FI = FunctionInfos.FindNodeOrInsertPos(ID, insertPos); 755 if (FI) 756 return *FI; 757 758 unsigned CC = ClangCallConvToLLVMCallConv(info.getCC()); 759 760 // Construct the function info. We co-allocate the ArgInfos. 761 FI = CGFunctionInfo::create(CC, instanceMethod, chainCall, info, 762 paramInfos, resultType, argTypes, required); 763 FunctionInfos.InsertNode(FI, insertPos); 764 765 bool inserted = FunctionsBeingProcessed.insert(FI).second; 766 (void)inserted; 767 assert(inserted && "Recursively being processed?"); 768 769 // Compute ABI information. 770 if (CC == llvm::CallingConv::SPIR_KERNEL) { 771 // Force target independent argument handling for the host visible 772 // kernel functions. 773 computeSPIRKernelABIInfo(CGM, *FI); 774 } else if (info.getCC() == CC_Swift) { 775 swiftcall::computeABIInfo(CGM, *FI); 776 } else { 777 getABIInfo().computeInfo(*FI); 778 } 779 780 // Loop over all of the computed argument and return value info. If any of 781 // them are direct or extend without a specified coerce type, specify the 782 // default now. 783 ABIArgInfo &retInfo = FI->getReturnInfo(); 784 if (retInfo.canHaveCoerceToType() && retInfo.getCoerceToType() == nullptr) 785 retInfo.setCoerceToType(ConvertType(FI->getReturnType())); 786 787 for (auto &I : FI->arguments()) 788 if (I.info.canHaveCoerceToType() && I.info.getCoerceToType() == nullptr) 789 I.info.setCoerceToType(ConvertType(I.type)); 790 791 bool erased = FunctionsBeingProcessed.erase(FI); (void)erased; 792 assert(erased && "Not in set?"); 793 794 return *FI; 795 } 796 797 CGFunctionInfo *CGFunctionInfo::create(unsigned llvmCC, 798 bool instanceMethod, 799 bool chainCall, 800 const FunctionType::ExtInfo &info, 801 ArrayRef<ExtParameterInfo> paramInfos, 802 CanQualType resultType, 803 ArrayRef<CanQualType> argTypes, 804 RequiredArgs required) { 805 assert(paramInfos.empty() || paramInfos.size() == argTypes.size()); 806 assert(!required.allowsOptionalArgs() || 807 required.getNumRequiredArgs() <= argTypes.size()); 808 809 void *buffer = 810 operator new(totalSizeToAlloc<ArgInfo, ExtParameterInfo>( 811 argTypes.size() + 1, paramInfos.size())); 812 813 CGFunctionInfo *FI = new(buffer) CGFunctionInfo(); 814 FI->CallingConvention = llvmCC; 815 FI->EffectiveCallingConvention = llvmCC; 816 FI->ASTCallingConvention = info.getCC(); 817 FI->InstanceMethod = instanceMethod; 818 FI->ChainCall = chainCall; 819 FI->CmseNSCall = info.getCmseNSCall(); 820 FI->NoReturn = info.getNoReturn(); 821 FI->ReturnsRetained = info.getProducesResult(); 822 FI->NoCallerSavedRegs = info.getNoCallerSavedRegs(); 823 FI->NoCfCheck = info.getNoCfCheck(); 824 FI->Required = required; 825 FI->HasRegParm = info.getHasRegParm(); 826 FI->RegParm = info.getRegParm(); 827 FI->ArgStruct = nullptr; 828 FI->ArgStructAlign = 0; 829 FI->NumArgs = argTypes.size(); 830 FI->HasExtParameterInfos = !paramInfos.empty(); 831 FI->getArgsBuffer()[0].type = resultType; 832 for (unsigned i = 0, e = argTypes.size(); i != e; ++i) 833 FI->getArgsBuffer()[i + 1].type = argTypes[i]; 834 for (unsigned i = 0, e = paramInfos.size(); i != e; ++i) 835 FI->getExtParameterInfosBuffer()[i] = paramInfos[i]; 836 return FI; 837 } 838 839 /***/ 840 841 namespace { 842 // ABIArgInfo::Expand implementation. 843 844 // Specifies the way QualType passed as ABIArgInfo::Expand is expanded. 845 struct TypeExpansion { 846 enum TypeExpansionKind { 847 // Elements of constant arrays are expanded recursively. 848 TEK_ConstantArray, 849 // Record fields are expanded recursively (but if record is a union, only 850 // the field with the largest size is expanded). 851 TEK_Record, 852 // For complex types, real and imaginary parts are expanded recursively. 853 TEK_Complex, 854 // All other types are not expandable. 855 TEK_None 856 }; 857 858 const TypeExpansionKind Kind; 859 860 TypeExpansion(TypeExpansionKind K) : Kind(K) {} 861 virtual ~TypeExpansion() {} 862 }; 863 864 struct ConstantArrayExpansion : TypeExpansion { 865 QualType EltTy; 866 uint64_t NumElts; 867 868 ConstantArrayExpansion(QualType EltTy, uint64_t NumElts) 869 : TypeExpansion(TEK_ConstantArray), EltTy(EltTy), NumElts(NumElts) {} 870 static bool classof(const TypeExpansion *TE) { 871 return TE->Kind == TEK_ConstantArray; 872 } 873 }; 874 875 struct RecordExpansion : TypeExpansion { 876 SmallVector<const CXXBaseSpecifier *, 1> Bases; 877 878 SmallVector<const FieldDecl *, 1> Fields; 879 880 RecordExpansion(SmallVector<const CXXBaseSpecifier *, 1> &&Bases, 881 SmallVector<const FieldDecl *, 1> &&Fields) 882 : TypeExpansion(TEK_Record), Bases(std::move(Bases)), 883 Fields(std::move(Fields)) {} 884 static bool classof(const TypeExpansion *TE) { 885 return TE->Kind == TEK_Record; 886 } 887 }; 888 889 struct ComplexExpansion : TypeExpansion { 890 QualType EltTy; 891 892 ComplexExpansion(QualType EltTy) : TypeExpansion(TEK_Complex), EltTy(EltTy) {} 893 static bool classof(const TypeExpansion *TE) { 894 return TE->Kind == TEK_Complex; 895 } 896 }; 897 898 struct NoExpansion : TypeExpansion { 899 NoExpansion() : TypeExpansion(TEK_None) {} 900 static bool classof(const TypeExpansion *TE) { 901 return TE->Kind == TEK_None; 902 } 903 }; 904 } // namespace 905 906 static std::unique_ptr<TypeExpansion> 907 getTypeExpansion(QualType Ty, const ASTContext &Context) { 908 if (const ConstantArrayType *AT = Context.getAsConstantArrayType(Ty)) { 909 return std::make_unique<ConstantArrayExpansion>( 910 AT->getElementType(), AT->getSize().getZExtValue()); 911 } 912 if (const RecordType *RT = Ty->getAs<RecordType>()) { 913 SmallVector<const CXXBaseSpecifier *, 1> Bases; 914 SmallVector<const FieldDecl *, 1> Fields; 915 const RecordDecl *RD = RT->getDecl(); 916 assert(!RD->hasFlexibleArrayMember() && 917 "Cannot expand structure with flexible array."); 918 if (RD->isUnion()) { 919 // Unions can be here only in degenerative cases - all the fields are same 920 // after flattening. Thus we have to use the "largest" field. 921 const FieldDecl *LargestFD = nullptr; 922 CharUnits UnionSize = CharUnits::Zero(); 923 924 for (const auto *FD : RD->fields()) { 925 if (FD->isZeroLengthBitField(Context)) 926 continue; 927 assert(!FD->isBitField() && 928 "Cannot expand structure with bit-field members."); 929 CharUnits FieldSize = Context.getTypeSizeInChars(FD->getType()); 930 if (UnionSize < FieldSize) { 931 UnionSize = FieldSize; 932 LargestFD = FD; 933 } 934 } 935 if (LargestFD) 936 Fields.push_back(LargestFD); 937 } else { 938 if (const auto *CXXRD = dyn_cast<CXXRecordDecl>(RD)) { 939 assert(!CXXRD->isDynamicClass() && 940 "cannot expand vtable pointers in dynamic classes"); 941 for (const CXXBaseSpecifier &BS : CXXRD->bases()) 942 Bases.push_back(&BS); 943 } 944 945 for (const auto *FD : RD->fields()) { 946 if (FD->isZeroLengthBitField(Context)) 947 continue; 948 assert(!FD->isBitField() && 949 "Cannot expand structure with bit-field members."); 950 Fields.push_back(FD); 951 } 952 } 953 return std::make_unique<RecordExpansion>(std::move(Bases), 954 std::move(Fields)); 955 } 956 if (const ComplexType *CT = Ty->getAs<ComplexType>()) { 957 return std::make_unique<ComplexExpansion>(CT->getElementType()); 958 } 959 return std::make_unique<NoExpansion>(); 960 } 961 962 static int getExpansionSize(QualType Ty, const ASTContext &Context) { 963 auto Exp = getTypeExpansion(Ty, Context); 964 if (auto CAExp = dyn_cast<ConstantArrayExpansion>(Exp.get())) { 965 return CAExp->NumElts * getExpansionSize(CAExp->EltTy, Context); 966 } 967 if (auto RExp = dyn_cast<RecordExpansion>(Exp.get())) { 968 int Res = 0; 969 for (auto BS : RExp->Bases) 970 Res += getExpansionSize(BS->getType(), Context); 971 for (auto FD : RExp->Fields) 972 Res += getExpansionSize(FD->getType(), Context); 973 return Res; 974 } 975 if (isa<ComplexExpansion>(Exp.get())) 976 return 2; 977 assert(isa<NoExpansion>(Exp.get())); 978 return 1; 979 } 980 981 void 982 CodeGenTypes::getExpandedTypes(QualType Ty, 983 SmallVectorImpl<llvm::Type *>::iterator &TI) { 984 auto Exp = getTypeExpansion(Ty, Context); 985 if (auto CAExp = dyn_cast<ConstantArrayExpansion>(Exp.get())) { 986 for (int i = 0, n = CAExp->NumElts; i < n; i++) { 987 getExpandedTypes(CAExp->EltTy, TI); 988 } 989 } else if (auto RExp = dyn_cast<RecordExpansion>(Exp.get())) { 990 for (auto BS : RExp->Bases) 991 getExpandedTypes(BS->getType(), TI); 992 for (auto FD : RExp->Fields) 993 getExpandedTypes(FD->getType(), TI); 994 } else if (auto CExp = dyn_cast<ComplexExpansion>(Exp.get())) { 995 llvm::Type *EltTy = ConvertType(CExp->EltTy); 996 *TI++ = EltTy; 997 *TI++ = EltTy; 998 } else { 999 assert(isa<NoExpansion>(Exp.get())); 1000 *TI++ = ConvertType(Ty); 1001 } 1002 } 1003 1004 static void forConstantArrayExpansion(CodeGenFunction &CGF, 1005 ConstantArrayExpansion *CAE, 1006 Address BaseAddr, 1007 llvm::function_ref<void(Address)> Fn) { 1008 CharUnits EltSize = CGF.getContext().getTypeSizeInChars(CAE->EltTy); 1009 CharUnits EltAlign = 1010 BaseAddr.getAlignment().alignmentOfArrayElement(EltSize); 1011 1012 for (int i = 0, n = CAE->NumElts; i < n; i++) { 1013 llvm::Value *EltAddr = 1014 CGF.Builder.CreateConstGEP2_32(nullptr, BaseAddr.getPointer(), 0, i); 1015 Fn(Address(EltAddr, EltAlign)); 1016 } 1017 } 1018 1019 void CodeGenFunction::ExpandTypeFromArgs(QualType Ty, LValue LV, 1020 llvm::Function::arg_iterator &AI) { 1021 assert(LV.isSimple() && 1022 "Unexpected non-simple lvalue during struct expansion."); 1023 1024 auto Exp = getTypeExpansion(Ty, getContext()); 1025 if (auto CAExp = dyn_cast<ConstantArrayExpansion>(Exp.get())) { 1026 forConstantArrayExpansion( 1027 *this, CAExp, LV.getAddress(*this), [&](Address EltAddr) { 1028 LValue LV = MakeAddrLValue(EltAddr, CAExp->EltTy); 1029 ExpandTypeFromArgs(CAExp->EltTy, LV, AI); 1030 }); 1031 } else if (auto RExp = dyn_cast<RecordExpansion>(Exp.get())) { 1032 Address This = LV.getAddress(*this); 1033 for (const CXXBaseSpecifier *BS : RExp->Bases) { 1034 // Perform a single step derived-to-base conversion. 1035 Address Base = 1036 GetAddressOfBaseClass(This, Ty->getAsCXXRecordDecl(), &BS, &BS + 1, 1037 /*NullCheckValue=*/false, SourceLocation()); 1038 LValue SubLV = MakeAddrLValue(Base, BS->getType()); 1039 1040 // Recurse onto bases. 1041 ExpandTypeFromArgs(BS->getType(), SubLV, AI); 1042 } 1043 for (auto FD : RExp->Fields) { 1044 // FIXME: What are the right qualifiers here? 1045 LValue SubLV = EmitLValueForFieldInitialization(LV, FD); 1046 ExpandTypeFromArgs(FD->getType(), SubLV, AI); 1047 } 1048 } else if (isa<ComplexExpansion>(Exp.get())) { 1049 auto realValue = &*AI++; 1050 auto imagValue = &*AI++; 1051 EmitStoreOfComplex(ComplexPairTy(realValue, imagValue), LV, /*init*/ true); 1052 } else { 1053 // Call EmitStoreOfScalar except when the lvalue is a bitfield to emit a 1054 // primitive store. 1055 assert(isa<NoExpansion>(Exp.get())); 1056 if (LV.isBitField()) 1057 EmitStoreThroughLValue(RValue::get(&*AI++), LV); 1058 else 1059 EmitStoreOfScalar(&*AI++, LV); 1060 } 1061 } 1062 1063 void CodeGenFunction::ExpandTypeToArgs( 1064 QualType Ty, CallArg Arg, llvm::FunctionType *IRFuncTy, 1065 SmallVectorImpl<llvm::Value *> &IRCallArgs, unsigned &IRCallArgPos) { 1066 auto Exp = getTypeExpansion(Ty, getContext()); 1067 if (auto CAExp = dyn_cast<ConstantArrayExpansion>(Exp.get())) { 1068 Address Addr = Arg.hasLValue() ? Arg.getKnownLValue().getAddress(*this) 1069 : Arg.getKnownRValue().getAggregateAddress(); 1070 forConstantArrayExpansion( 1071 *this, CAExp, Addr, [&](Address EltAddr) { 1072 CallArg EltArg = CallArg( 1073 convertTempToRValue(EltAddr, CAExp->EltTy, SourceLocation()), 1074 CAExp->EltTy); 1075 ExpandTypeToArgs(CAExp->EltTy, EltArg, IRFuncTy, IRCallArgs, 1076 IRCallArgPos); 1077 }); 1078 } else if (auto RExp = dyn_cast<RecordExpansion>(Exp.get())) { 1079 Address This = Arg.hasLValue() ? Arg.getKnownLValue().getAddress(*this) 1080 : Arg.getKnownRValue().getAggregateAddress(); 1081 for (const CXXBaseSpecifier *BS : RExp->Bases) { 1082 // Perform a single step derived-to-base conversion. 1083 Address Base = 1084 GetAddressOfBaseClass(This, Ty->getAsCXXRecordDecl(), &BS, &BS + 1, 1085 /*NullCheckValue=*/false, SourceLocation()); 1086 CallArg BaseArg = CallArg(RValue::getAggregate(Base), BS->getType()); 1087 1088 // Recurse onto bases. 1089 ExpandTypeToArgs(BS->getType(), BaseArg, IRFuncTy, IRCallArgs, 1090 IRCallArgPos); 1091 } 1092 1093 LValue LV = MakeAddrLValue(This, Ty); 1094 for (auto FD : RExp->Fields) { 1095 CallArg FldArg = 1096 CallArg(EmitRValueForField(LV, FD, SourceLocation()), FD->getType()); 1097 ExpandTypeToArgs(FD->getType(), FldArg, IRFuncTy, IRCallArgs, 1098 IRCallArgPos); 1099 } 1100 } else if (isa<ComplexExpansion>(Exp.get())) { 1101 ComplexPairTy CV = Arg.getKnownRValue().getComplexVal(); 1102 IRCallArgs[IRCallArgPos++] = CV.first; 1103 IRCallArgs[IRCallArgPos++] = CV.second; 1104 } else { 1105 assert(isa<NoExpansion>(Exp.get())); 1106 auto RV = Arg.getKnownRValue(); 1107 assert(RV.isScalar() && 1108 "Unexpected non-scalar rvalue during struct expansion."); 1109 1110 // Insert a bitcast as needed. 1111 llvm::Value *V = RV.getScalarVal(); 1112 if (IRCallArgPos < IRFuncTy->getNumParams() && 1113 V->getType() != IRFuncTy->getParamType(IRCallArgPos)) 1114 V = Builder.CreateBitCast(V, IRFuncTy->getParamType(IRCallArgPos)); 1115 1116 IRCallArgs[IRCallArgPos++] = V; 1117 } 1118 } 1119 1120 /// Create a temporary allocation for the purposes of coercion. 1121 static Address CreateTempAllocaForCoercion(CodeGenFunction &CGF, llvm::Type *Ty, 1122 CharUnits MinAlign) { 1123 // Don't use an alignment that's worse than what LLVM would prefer. 1124 auto PrefAlign = CGF.CGM.getDataLayout().getPrefTypeAlignment(Ty); 1125 CharUnits Align = std::max(MinAlign, CharUnits::fromQuantity(PrefAlign)); 1126 1127 return CGF.CreateTempAlloca(Ty, Align); 1128 } 1129 1130 /// EnterStructPointerForCoercedAccess - Given a struct pointer that we are 1131 /// accessing some number of bytes out of it, try to gep into the struct to get 1132 /// at its inner goodness. Dive as deep as possible without entering an element 1133 /// with an in-memory size smaller than DstSize. 1134 static Address 1135 EnterStructPointerForCoercedAccess(Address SrcPtr, 1136 llvm::StructType *SrcSTy, 1137 uint64_t DstSize, CodeGenFunction &CGF) { 1138 // We can't dive into a zero-element struct. 1139 if (SrcSTy->getNumElements() == 0) return SrcPtr; 1140 1141 llvm::Type *FirstElt = SrcSTy->getElementType(0); 1142 1143 // If the first elt is at least as large as what we're looking for, or if the 1144 // first element is the same size as the whole struct, we can enter it. The 1145 // comparison must be made on the store size and not the alloca size. Using 1146 // the alloca size may overstate the size of the load. 1147 uint64_t FirstEltSize = 1148 CGF.CGM.getDataLayout().getTypeStoreSize(FirstElt); 1149 if (FirstEltSize < DstSize && 1150 FirstEltSize < CGF.CGM.getDataLayout().getTypeStoreSize(SrcSTy)) 1151 return SrcPtr; 1152 1153 // GEP into the first element. 1154 SrcPtr = CGF.Builder.CreateStructGEP(SrcPtr, 0, "coerce.dive"); 1155 1156 // If the first element is a struct, recurse. 1157 llvm::Type *SrcTy = SrcPtr.getElementType(); 1158 if (llvm::StructType *SrcSTy = dyn_cast<llvm::StructType>(SrcTy)) 1159 return EnterStructPointerForCoercedAccess(SrcPtr, SrcSTy, DstSize, CGF); 1160 1161 return SrcPtr; 1162 } 1163 1164 /// CoerceIntOrPtrToIntOrPtr - Convert a value Val to the specific Ty where both 1165 /// are either integers or pointers. This does a truncation of the value if it 1166 /// is too large or a zero extension if it is too small. 1167 /// 1168 /// This behaves as if the value were coerced through memory, so on big-endian 1169 /// targets the high bits are preserved in a truncation, while little-endian 1170 /// targets preserve the low bits. 1171 static llvm::Value *CoerceIntOrPtrToIntOrPtr(llvm::Value *Val, 1172 llvm::Type *Ty, 1173 CodeGenFunction &CGF) { 1174 if (Val->getType() == Ty) 1175 return Val; 1176 1177 if (isa<llvm::PointerType>(Val->getType())) { 1178 // If this is Pointer->Pointer avoid conversion to and from int. 1179 if (isa<llvm::PointerType>(Ty)) 1180 return CGF.Builder.CreateBitCast(Val, Ty, "coerce.val"); 1181 1182 // Convert the pointer to an integer so we can play with its width. 1183 Val = CGF.Builder.CreatePtrToInt(Val, CGF.IntPtrTy, "coerce.val.pi"); 1184 } 1185 1186 llvm::Type *DestIntTy = Ty; 1187 if (isa<llvm::PointerType>(DestIntTy)) 1188 DestIntTy = CGF.IntPtrTy; 1189 1190 if (Val->getType() != DestIntTy) { 1191 const llvm::DataLayout &DL = CGF.CGM.getDataLayout(); 1192 if (DL.isBigEndian()) { 1193 // Preserve the high bits on big-endian targets. 1194 // That is what memory coercion does. 1195 uint64_t SrcSize = DL.getTypeSizeInBits(Val->getType()); 1196 uint64_t DstSize = DL.getTypeSizeInBits(DestIntTy); 1197 1198 if (SrcSize > DstSize) { 1199 Val = CGF.Builder.CreateLShr(Val, SrcSize - DstSize, "coerce.highbits"); 1200 Val = CGF.Builder.CreateTrunc(Val, DestIntTy, "coerce.val.ii"); 1201 } else { 1202 Val = CGF.Builder.CreateZExt(Val, DestIntTy, "coerce.val.ii"); 1203 Val = CGF.Builder.CreateShl(Val, DstSize - SrcSize, "coerce.highbits"); 1204 } 1205 } else { 1206 // Little-endian targets preserve the low bits. No shifts required. 1207 Val = CGF.Builder.CreateIntCast(Val, DestIntTy, false, "coerce.val.ii"); 1208 } 1209 } 1210 1211 if (isa<llvm::PointerType>(Ty)) 1212 Val = CGF.Builder.CreateIntToPtr(Val, Ty, "coerce.val.ip"); 1213 return Val; 1214 } 1215 1216 1217 1218 /// CreateCoercedLoad - Create a load from \arg SrcPtr interpreted as 1219 /// a pointer to an object of type \arg Ty, known to be aligned to 1220 /// \arg SrcAlign bytes. 1221 /// 1222 /// This safely handles the case when the src type is smaller than the 1223 /// destination type; in this situation the values of bits which not 1224 /// present in the src are undefined. 1225 static llvm::Value *CreateCoercedLoad(Address Src, llvm::Type *Ty, 1226 CodeGenFunction &CGF) { 1227 llvm::Type *SrcTy = Src.getElementType(); 1228 1229 // If SrcTy and Ty are the same, just do a load. 1230 if (SrcTy == Ty) 1231 return CGF.Builder.CreateLoad(Src); 1232 1233 uint64_t DstSize = CGF.CGM.getDataLayout().getTypeAllocSize(Ty); 1234 1235 if (llvm::StructType *SrcSTy = dyn_cast<llvm::StructType>(SrcTy)) { 1236 Src = EnterStructPointerForCoercedAccess(Src, SrcSTy, DstSize, CGF); 1237 SrcTy = Src.getElementType(); 1238 } 1239 1240 uint64_t SrcSize = CGF.CGM.getDataLayout().getTypeAllocSize(SrcTy); 1241 1242 // If the source and destination are integer or pointer types, just do an 1243 // extension or truncation to the desired type. 1244 if ((isa<llvm::IntegerType>(Ty) || isa<llvm::PointerType>(Ty)) && 1245 (isa<llvm::IntegerType>(SrcTy) || isa<llvm::PointerType>(SrcTy))) { 1246 llvm::Value *Load = CGF.Builder.CreateLoad(Src); 1247 return CoerceIntOrPtrToIntOrPtr(Load, Ty, CGF); 1248 } 1249 1250 // If load is legal, just bitcast the src pointer. 1251 if (SrcSize >= DstSize) { 1252 // Generally SrcSize is never greater than DstSize, since this means we are 1253 // losing bits. However, this can happen in cases where the structure has 1254 // additional padding, for example due to a user specified alignment. 1255 // 1256 // FIXME: Assert that we aren't truncating non-padding bits when have access 1257 // to that information. 1258 Src = CGF.Builder.CreateBitCast(Src, 1259 Ty->getPointerTo(Src.getAddressSpace())); 1260 return CGF.Builder.CreateLoad(Src); 1261 } 1262 1263 // Otherwise do coercion through memory. This is stupid, but simple. 1264 Address Tmp = CreateTempAllocaForCoercion(CGF, Ty, Src.getAlignment()); 1265 CGF.Builder.CreateMemCpy(Tmp.getPointer(), Tmp.getAlignment().getAsAlign(), 1266 Src.getPointer(), Src.getAlignment().getAsAlign(), 1267 llvm::ConstantInt::get(CGF.IntPtrTy, SrcSize)); 1268 return CGF.Builder.CreateLoad(Tmp); 1269 } 1270 1271 // Function to store a first-class aggregate into memory. We prefer to 1272 // store the elements rather than the aggregate to be more friendly to 1273 // fast-isel. 1274 // FIXME: Do we need to recurse here? 1275 static void BuildAggStore(CodeGenFunction &CGF, llvm::Value *Val, 1276 Address Dest, bool DestIsVolatile) { 1277 // Prefer scalar stores to first-class aggregate stores. 1278 if (llvm::StructType *STy = 1279 dyn_cast<llvm::StructType>(Val->getType())) { 1280 for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) { 1281 Address EltPtr = CGF.Builder.CreateStructGEP(Dest, i); 1282 llvm::Value *Elt = CGF.Builder.CreateExtractValue(Val, i); 1283 CGF.Builder.CreateStore(Elt, EltPtr, DestIsVolatile); 1284 } 1285 } else { 1286 CGF.Builder.CreateStore(Val, Dest, DestIsVolatile); 1287 } 1288 } 1289 1290 /// CreateCoercedStore - Create a store to \arg DstPtr from \arg Src, 1291 /// where the source and destination may have different types. The 1292 /// destination is known to be aligned to \arg DstAlign bytes. 1293 /// 1294 /// This safely handles the case when the src type is larger than the 1295 /// destination type; the upper bits of the src will be lost. 1296 static void CreateCoercedStore(llvm::Value *Src, 1297 Address Dst, 1298 bool DstIsVolatile, 1299 CodeGenFunction &CGF) { 1300 llvm::Type *SrcTy = Src->getType(); 1301 llvm::Type *DstTy = Dst.getElementType(); 1302 if (SrcTy == DstTy) { 1303 CGF.Builder.CreateStore(Src, Dst, DstIsVolatile); 1304 return; 1305 } 1306 1307 uint64_t SrcSize = CGF.CGM.getDataLayout().getTypeAllocSize(SrcTy); 1308 1309 if (llvm::StructType *DstSTy = dyn_cast<llvm::StructType>(DstTy)) { 1310 Dst = EnterStructPointerForCoercedAccess(Dst, DstSTy, SrcSize, CGF); 1311 DstTy = Dst.getElementType(); 1312 } 1313 1314 llvm::PointerType *SrcPtrTy = llvm::dyn_cast<llvm::PointerType>(SrcTy); 1315 llvm::PointerType *DstPtrTy = llvm::dyn_cast<llvm::PointerType>(DstTy); 1316 if (SrcPtrTy && DstPtrTy && 1317 SrcPtrTy->getAddressSpace() != DstPtrTy->getAddressSpace()) { 1318 Src = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(Src, DstTy); 1319 CGF.Builder.CreateStore(Src, Dst, DstIsVolatile); 1320 return; 1321 } 1322 1323 // If the source and destination are integer or pointer types, just do an 1324 // extension or truncation to the desired type. 1325 if ((isa<llvm::IntegerType>(SrcTy) || isa<llvm::PointerType>(SrcTy)) && 1326 (isa<llvm::IntegerType>(DstTy) || isa<llvm::PointerType>(DstTy))) { 1327 Src = CoerceIntOrPtrToIntOrPtr(Src, DstTy, CGF); 1328 CGF.Builder.CreateStore(Src, Dst, DstIsVolatile); 1329 return; 1330 } 1331 1332 uint64_t DstSize = CGF.CGM.getDataLayout().getTypeAllocSize(DstTy); 1333 1334 // If store is legal, just bitcast the src pointer. 1335 if (SrcSize <= DstSize) { 1336 Dst = CGF.Builder.CreateElementBitCast(Dst, SrcTy); 1337 BuildAggStore(CGF, Src, Dst, DstIsVolatile); 1338 } else { 1339 // Otherwise do coercion through memory. This is stupid, but 1340 // simple. 1341 1342 // Generally SrcSize is never greater than DstSize, since this means we are 1343 // losing bits. However, this can happen in cases where the structure has 1344 // additional padding, for example due to a user specified alignment. 1345 // 1346 // FIXME: Assert that we aren't truncating non-padding bits when have access 1347 // to that information. 1348 Address Tmp = CreateTempAllocaForCoercion(CGF, SrcTy, Dst.getAlignment()); 1349 CGF.Builder.CreateStore(Src, Tmp); 1350 CGF.Builder.CreateMemCpy(Dst.getPointer(), Dst.getAlignment().getAsAlign(), 1351 Tmp.getPointer(), Tmp.getAlignment().getAsAlign(), 1352 llvm::ConstantInt::get(CGF.IntPtrTy, DstSize)); 1353 } 1354 } 1355 1356 static Address emitAddressAtOffset(CodeGenFunction &CGF, Address addr, 1357 const ABIArgInfo &info) { 1358 if (unsigned offset = info.getDirectOffset()) { 1359 addr = CGF.Builder.CreateElementBitCast(addr, CGF.Int8Ty); 1360 addr = CGF.Builder.CreateConstInBoundsByteGEP(addr, 1361 CharUnits::fromQuantity(offset)); 1362 addr = CGF.Builder.CreateElementBitCast(addr, info.getCoerceToType()); 1363 } 1364 return addr; 1365 } 1366 1367 namespace { 1368 1369 /// Encapsulates information about the way function arguments from 1370 /// CGFunctionInfo should be passed to actual LLVM IR function. 1371 class ClangToLLVMArgMapping { 1372 static const unsigned InvalidIndex = ~0U; 1373 unsigned InallocaArgNo; 1374 unsigned SRetArgNo; 1375 unsigned TotalIRArgs; 1376 1377 /// Arguments of LLVM IR function corresponding to single Clang argument. 1378 struct IRArgs { 1379 unsigned PaddingArgIndex; 1380 // Argument is expanded to IR arguments at positions 1381 // [FirstArgIndex, FirstArgIndex + NumberOfArgs). 1382 unsigned FirstArgIndex; 1383 unsigned NumberOfArgs; 1384 1385 IRArgs() 1386 : PaddingArgIndex(InvalidIndex), FirstArgIndex(InvalidIndex), 1387 NumberOfArgs(0) {} 1388 }; 1389 1390 SmallVector<IRArgs, 8> ArgInfo; 1391 1392 public: 1393 ClangToLLVMArgMapping(const ASTContext &Context, const CGFunctionInfo &FI, 1394 bool OnlyRequiredArgs = false) 1395 : InallocaArgNo(InvalidIndex), SRetArgNo(InvalidIndex), TotalIRArgs(0), 1396 ArgInfo(OnlyRequiredArgs ? FI.getNumRequiredArgs() : FI.arg_size()) { 1397 construct(Context, FI, OnlyRequiredArgs); 1398 } 1399 1400 bool hasInallocaArg() const { return InallocaArgNo != InvalidIndex; } 1401 unsigned getInallocaArgNo() const { 1402 assert(hasInallocaArg()); 1403 return InallocaArgNo; 1404 } 1405 1406 bool hasSRetArg() const { return SRetArgNo != InvalidIndex; } 1407 unsigned getSRetArgNo() const { 1408 assert(hasSRetArg()); 1409 return SRetArgNo; 1410 } 1411 1412 unsigned totalIRArgs() const { return TotalIRArgs; } 1413 1414 bool hasPaddingArg(unsigned ArgNo) const { 1415 assert(ArgNo < ArgInfo.size()); 1416 return ArgInfo[ArgNo].PaddingArgIndex != InvalidIndex; 1417 } 1418 unsigned getPaddingArgNo(unsigned ArgNo) const { 1419 assert(hasPaddingArg(ArgNo)); 1420 return ArgInfo[ArgNo].PaddingArgIndex; 1421 } 1422 1423 /// Returns index of first IR argument corresponding to ArgNo, and their 1424 /// quantity. 1425 std::pair<unsigned, unsigned> getIRArgs(unsigned ArgNo) const { 1426 assert(ArgNo < ArgInfo.size()); 1427 return std::make_pair(ArgInfo[ArgNo].FirstArgIndex, 1428 ArgInfo[ArgNo].NumberOfArgs); 1429 } 1430 1431 private: 1432 void construct(const ASTContext &Context, const CGFunctionInfo &FI, 1433 bool OnlyRequiredArgs); 1434 }; 1435 1436 void ClangToLLVMArgMapping::construct(const ASTContext &Context, 1437 const CGFunctionInfo &FI, 1438 bool OnlyRequiredArgs) { 1439 unsigned IRArgNo = 0; 1440 bool SwapThisWithSRet = false; 1441 const ABIArgInfo &RetAI = FI.getReturnInfo(); 1442 1443 if (RetAI.getKind() == ABIArgInfo::Indirect) { 1444 SwapThisWithSRet = RetAI.isSRetAfterThis(); 1445 SRetArgNo = SwapThisWithSRet ? 1 : IRArgNo++; 1446 } 1447 1448 unsigned ArgNo = 0; 1449 unsigned NumArgs = OnlyRequiredArgs ? FI.getNumRequiredArgs() : FI.arg_size(); 1450 for (CGFunctionInfo::const_arg_iterator I = FI.arg_begin(); ArgNo < NumArgs; 1451 ++I, ++ArgNo) { 1452 assert(I != FI.arg_end()); 1453 QualType ArgType = I->type; 1454 const ABIArgInfo &AI = I->info; 1455 // Collect data about IR arguments corresponding to Clang argument ArgNo. 1456 auto &IRArgs = ArgInfo[ArgNo]; 1457 1458 if (AI.getPaddingType()) 1459 IRArgs.PaddingArgIndex = IRArgNo++; 1460 1461 switch (AI.getKind()) { 1462 case ABIArgInfo::Extend: 1463 case ABIArgInfo::Direct: { 1464 // FIXME: handle sseregparm someday... 1465 llvm::StructType *STy = dyn_cast<llvm::StructType>(AI.getCoerceToType()); 1466 if (AI.isDirect() && AI.getCanBeFlattened() && STy) { 1467 IRArgs.NumberOfArgs = STy->getNumElements(); 1468 } else { 1469 IRArgs.NumberOfArgs = 1; 1470 } 1471 break; 1472 } 1473 case ABIArgInfo::Indirect: 1474 IRArgs.NumberOfArgs = 1; 1475 break; 1476 case ABIArgInfo::Ignore: 1477 case ABIArgInfo::InAlloca: 1478 // ignore and inalloca doesn't have matching LLVM parameters. 1479 IRArgs.NumberOfArgs = 0; 1480 break; 1481 case ABIArgInfo::CoerceAndExpand: 1482 IRArgs.NumberOfArgs = AI.getCoerceAndExpandTypeSequence().size(); 1483 break; 1484 case ABIArgInfo::Expand: 1485 IRArgs.NumberOfArgs = getExpansionSize(ArgType, Context); 1486 break; 1487 } 1488 1489 if (IRArgs.NumberOfArgs > 0) { 1490 IRArgs.FirstArgIndex = IRArgNo; 1491 IRArgNo += IRArgs.NumberOfArgs; 1492 } 1493 1494 // Skip over the sret parameter when it comes second. We already handled it 1495 // above. 1496 if (IRArgNo == 1 && SwapThisWithSRet) 1497 IRArgNo++; 1498 } 1499 assert(ArgNo == ArgInfo.size()); 1500 1501 if (FI.usesInAlloca()) 1502 InallocaArgNo = IRArgNo++; 1503 1504 TotalIRArgs = IRArgNo; 1505 } 1506 } // namespace 1507 1508 /***/ 1509 1510 bool CodeGenModule::ReturnTypeUsesSRet(const CGFunctionInfo &FI) { 1511 const auto &RI = FI.getReturnInfo(); 1512 return RI.isIndirect() || (RI.isInAlloca() && RI.getInAllocaSRet()); 1513 } 1514 1515 bool CodeGenModule::ReturnSlotInterferesWithArgs(const CGFunctionInfo &FI) { 1516 return ReturnTypeUsesSRet(FI) && 1517 getTargetCodeGenInfo().doesReturnSlotInterfereWithArgs(); 1518 } 1519 1520 bool CodeGenModule::ReturnTypeUsesFPRet(QualType ResultType) { 1521 if (const BuiltinType *BT = ResultType->getAs<BuiltinType>()) { 1522 switch (BT->getKind()) { 1523 default: 1524 return false; 1525 case BuiltinType::Float: 1526 return getTarget().useObjCFPRetForRealType(TargetInfo::Float); 1527 case BuiltinType::Double: 1528 return getTarget().useObjCFPRetForRealType(TargetInfo::Double); 1529 case BuiltinType::LongDouble: 1530 return getTarget().useObjCFPRetForRealType(TargetInfo::LongDouble); 1531 } 1532 } 1533 1534 return false; 1535 } 1536 1537 bool CodeGenModule::ReturnTypeUsesFP2Ret(QualType ResultType) { 1538 if (const ComplexType *CT = ResultType->getAs<ComplexType>()) { 1539 if (const BuiltinType *BT = CT->getElementType()->getAs<BuiltinType>()) { 1540 if (BT->getKind() == BuiltinType::LongDouble) 1541 return getTarget().useObjCFP2RetForComplexLongDouble(); 1542 } 1543 } 1544 1545 return false; 1546 } 1547 1548 llvm::FunctionType *CodeGenTypes::GetFunctionType(GlobalDecl GD) { 1549 const CGFunctionInfo &FI = arrangeGlobalDeclaration(GD); 1550 return GetFunctionType(FI); 1551 } 1552 1553 llvm::FunctionType * 1554 CodeGenTypes::GetFunctionType(const CGFunctionInfo &FI) { 1555 1556 bool Inserted = FunctionsBeingProcessed.insert(&FI).second; 1557 (void)Inserted; 1558 assert(Inserted && "Recursively being processed?"); 1559 1560 llvm::Type *resultType = nullptr; 1561 const ABIArgInfo &retAI = FI.getReturnInfo(); 1562 switch (retAI.getKind()) { 1563 case ABIArgInfo::Expand: 1564 llvm_unreachable("Invalid ABI kind for return argument"); 1565 1566 case ABIArgInfo::Extend: 1567 case ABIArgInfo::Direct: 1568 resultType = retAI.getCoerceToType(); 1569 break; 1570 1571 case ABIArgInfo::InAlloca: 1572 if (retAI.getInAllocaSRet()) { 1573 // sret things on win32 aren't void, they return the sret pointer. 1574 QualType ret = FI.getReturnType(); 1575 llvm::Type *ty = ConvertType(ret); 1576 unsigned addressSpace = Context.getTargetAddressSpace(ret); 1577 resultType = llvm::PointerType::get(ty, addressSpace); 1578 } else { 1579 resultType = llvm::Type::getVoidTy(getLLVMContext()); 1580 } 1581 break; 1582 1583 case ABIArgInfo::Indirect: 1584 case ABIArgInfo::Ignore: 1585 resultType = llvm::Type::getVoidTy(getLLVMContext()); 1586 break; 1587 1588 case ABIArgInfo::CoerceAndExpand: 1589 resultType = retAI.getUnpaddedCoerceAndExpandType(); 1590 break; 1591 } 1592 1593 ClangToLLVMArgMapping IRFunctionArgs(getContext(), FI, true); 1594 SmallVector<llvm::Type*, 8> ArgTypes(IRFunctionArgs.totalIRArgs()); 1595 1596 // Add type for sret argument. 1597 if (IRFunctionArgs.hasSRetArg()) { 1598 QualType Ret = FI.getReturnType(); 1599 llvm::Type *Ty = ConvertType(Ret); 1600 unsigned AddressSpace = Context.getTargetAddressSpace(Ret); 1601 ArgTypes[IRFunctionArgs.getSRetArgNo()] = 1602 llvm::PointerType::get(Ty, AddressSpace); 1603 } 1604 1605 // Add type for inalloca argument. 1606 if (IRFunctionArgs.hasInallocaArg()) { 1607 auto ArgStruct = FI.getArgStruct(); 1608 assert(ArgStruct); 1609 ArgTypes[IRFunctionArgs.getInallocaArgNo()] = ArgStruct->getPointerTo(); 1610 } 1611 1612 // Add in all of the required arguments. 1613 unsigned ArgNo = 0; 1614 CGFunctionInfo::const_arg_iterator it = FI.arg_begin(), 1615 ie = it + FI.getNumRequiredArgs(); 1616 for (; it != ie; ++it, ++ArgNo) { 1617 const ABIArgInfo &ArgInfo = it->info; 1618 1619 // Insert a padding type to ensure proper alignment. 1620 if (IRFunctionArgs.hasPaddingArg(ArgNo)) 1621 ArgTypes[IRFunctionArgs.getPaddingArgNo(ArgNo)] = 1622 ArgInfo.getPaddingType(); 1623 1624 unsigned FirstIRArg, NumIRArgs; 1625 std::tie(FirstIRArg, NumIRArgs) = IRFunctionArgs.getIRArgs(ArgNo); 1626 1627 switch (ArgInfo.getKind()) { 1628 case ABIArgInfo::Ignore: 1629 case ABIArgInfo::InAlloca: 1630 assert(NumIRArgs == 0); 1631 break; 1632 1633 case ABIArgInfo::Indirect: { 1634 assert(NumIRArgs == 1); 1635 // indirect arguments are always on the stack, which is alloca addr space. 1636 llvm::Type *LTy = ConvertTypeForMem(it->type); 1637 ArgTypes[FirstIRArg] = LTy->getPointerTo( 1638 CGM.getDataLayout().getAllocaAddrSpace()); 1639 break; 1640 } 1641 1642 case ABIArgInfo::Extend: 1643 case ABIArgInfo::Direct: { 1644 // Fast-isel and the optimizer generally like scalar values better than 1645 // FCAs, so we flatten them if this is safe to do for this argument. 1646 llvm::Type *argType = ArgInfo.getCoerceToType(); 1647 llvm::StructType *st = dyn_cast<llvm::StructType>(argType); 1648 if (st && ArgInfo.isDirect() && ArgInfo.getCanBeFlattened()) { 1649 assert(NumIRArgs == st->getNumElements()); 1650 for (unsigned i = 0, e = st->getNumElements(); i != e; ++i) 1651 ArgTypes[FirstIRArg + i] = st->getElementType(i); 1652 } else { 1653 assert(NumIRArgs == 1); 1654 ArgTypes[FirstIRArg] = argType; 1655 } 1656 break; 1657 } 1658 1659 case ABIArgInfo::CoerceAndExpand: { 1660 auto ArgTypesIter = ArgTypes.begin() + FirstIRArg; 1661 for (auto EltTy : ArgInfo.getCoerceAndExpandTypeSequence()) { 1662 *ArgTypesIter++ = EltTy; 1663 } 1664 assert(ArgTypesIter == ArgTypes.begin() + FirstIRArg + NumIRArgs); 1665 break; 1666 } 1667 1668 case ABIArgInfo::Expand: 1669 auto ArgTypesIter = ArgTypes.begin() + FirstIRArg; 1670 getExpandedTypes(it->type, ArgTypesIter); 1671 assert(ArgTypesIter == ArgTypes.begin() + FirstIRArg + NumIRArgs); 1672 break; 1673 } 1674 } 1675 1676 bool Erased = FunctionsBeingProcessed.erase(&FI); (void)Erased; 1677 assert(Erased && "Not in set?"); 1678 1679 return llvm::FunctionType::get(resultType, ArgTypes, FI.isVariadic()); 1680 } 1681 1682 llvm::Type *CodeGenTypes::GetFunctionTypeForVTable(GlobalDecl GD) { 1683 const CXXMethodDecl *MD = cast<CXXMethodDecl>(GD.getDecl()); 1684 const FunctionProtoType *FPT = MD->getType()->getAs<FunctionProtoType>(); 1685 1686 if (!isFuncTypeConvertible(FPT)) 1687 return llvm::StructType::get(getLLVMContext()); 1688 1689 return GetFunctionType(GD); 1690 } 1691 1692 static void AddAttributesFromFunctionProtoType(ASTContext &Ctx, 1693 llvm::AttrBuilder &FuncAttrs, 1694 const FunctionProtoType *FPT) { 1695 if (!FPT) 1696 return; 1697 1698 if (!isUnresolvedExceptionSpec(FPT->getExceptionSpecType()) && 1699 FPT->isNothrow()) 1700 FuncAttrs.addAttribute(llvm::Attribute::NoUnwind); 1701 } 1702 1703 void CodeGenModule::getDefaultFunctionAttributes(StringRef Name, 1704 bool HasOptnone, 1705 bool AttrOnCallSite, 1706 llvm::AttrBuilder &FuncAttrs) { 1707 // OptimizeNoneAttr takes precedence over -Os or -Oz. No warning needed. 1708 if (!HasOptnone) { 1709 if (CodeGenOpts.OptimizeSize) 1710 FuncAttrs.addAttribute(llvm::Attribute::OptimizeForSize); 1711 if (CodeGenOpts.OptimizeSize == 2) 1712 FuncAttrs.addAttribute(llvm::Attribute::MinSize); 1713 } 1714 1715 if (CodeGenOpts.DisableRedZone) 1716 FuncAttrs.addAttribute(llvm::Attribute::NoRedZone); 1717 if (CodeGenOpts.IndirectTlsSegRefs) 1718 FuncAttrs.addAttribute("indirect-tls-seg-refs"); 1719 if (CodeGenOpts.NoImplicitFloat) 1720 FuncAttrs.addAttribute(llvm::Attribute::NoImplicitFloat); 1721 1722 if (AttrOnCallSite) { 1723 // Attributes that should go on the call site only. 1724 if (!CodeGenOpts.SimplifyLibCalls || 1725 CodeGenOpts.isNoBuiltinFunc(Name.data())) 1726 FuncAttrs.addAttribute(llvm::Attribute::NoBuiltin); 1727 if (!CodeGenOpts.TrapFuncName.empty()) 1728 FuncAttrs.addAttribute("trap-func-name", CodeGenOpts.TrapFuncName); 1729 } else { 1730 StringRef FpKind; 1731 switch (CodeGenOpts.getFramePointer()) { 1732 case CodeGenOptions::FramePointerKind::None: 1733 FpKind = "none"; 1734 break; 1735 case CodeGenOptions::FramePointerKind::NonLeaf: 1736 FpKind = "non-leaf"; 1737 break; 1738 case CodeGenOptions::FramePointerKind::All: 1739 FpKind = "all"; 1740 break; 1741 } 1742 FuncAttrs.addAttribute("frame-pointer", FpKind); 1743 1744 FuncAttrs.addAttribute("less-precise-fpmad", 1745 llvm::toStringRef(CodeGenOpts.LessPreciseFPMAD)); 1746 1747 if (CodeGenOpts.NullPointerIsValid) 1748 FuncAttrs.addAttribute(llvm::Attribute::NullPointerIsValid); 1749 1750 if (CodeGenOpts.FPDenormalMode != llvm::DenormalMode::getIEEE()) 1751 FuncAttrs.addAttribute("denormal-fp-math", 1752 CodeGenOpts.FPDenormalMode.str()); 1753 if (CodeGenOpts.FP32DenormalMode != CodeGenOpts.FPDenormalMode) { 1754 FuncAttrs.addAttribute( 1755 "denormal-fp-math-f32", 1756 CodeGenOpts.FP32DenormalMode.str()); 1757 } 1758 1759 FuncAttrs.addAttribute("no-trapping-math", 1760 llvm::toStringRef(CodeGenOpts.NoTrappingMath)); 1761 1762 // Strict (compliant) code is the default, so only add this attribute to 1763 // indicate that we are trying to workaround a problem case. 1764 if (!CodeGenOpts.StrictFloatCastOverflow) 1765 FuncAttrs.addAttribute("strict-float-cast-overflow", "false"); 1766 1767 // TODO: Are these all needed? 1768 // unsafe/inf/nan/nsz are handled by instruction-level FastMathFlags. 1769 FuncAttrs.addAttribute("no-infs-fp-math", 1770 llvm::toStringRef(CodeGenOpts.NoInfsFPMath)); 1771 FuncAttrs.addAttribute("no-nans-fp-math", 1772 llvm::toStringRef(CodeGenOpts.NoNaNsFPMath)); 1773 FuncAttrs.addAttribute("unsafe-fp-math", 1774 llvm::toStringRef(CodeGenOpts.UnsafeFPMath)); 1775 FuncAttrs.addAttribute("use-soft-float", 1776 llvm::toStringRef(CodeGenOpts.SoftFloat)); 1777 FuncAttrs.addAttribute("stack-protector-buffer-size", 1778 llvm::utostr(CodeGenOpts.SSPBufferSize)); 1779 FuncAttrs.addAttribute("no-signed-zeros-fp-math", 1780 llvm::toStringRef(CodeGenOpts.NoSignedZeros)); 1781 FuncAttrs.addAttribute( 1782 "correctly-rounded-divide-sqrt-fp-math", 1783 llvm::toStringRef(CodeGenOpts.CorrectlyRoundedDivSqrt)); 1784 1785 // TODO: Reciprocal estimate codegen options should apply to instructions? 1786 const std::vector<std::string> &Recips = CodeGenOpts.Reciprocals; 1787 if (!Recips.empty()) 1788 FuncAttrs.addAttribute("reciprocal-estimates", 1789 llvm::join(Recips, ",")); 1790 1791 if (!CodeGenOpts.PreferVectorWidth.empty() && 1792 CodeGenOpts.PreferVectorWidth != "none") 1793 FuncAttrs.addAttribute("prefer-vector-width", 1794 CodeGenOpts.PreferVectorWidth); 1795 1796 if (CodeGenOpts.StackRealignment) 1797 FuncAttrs.addAttribute("stackrealign"); 1798 if (CodeGenOpts.Backchain) 1799 FuncAttrs.addAttribute("backchain"); 1800 if (CodeGenOpts.EnableSegmentedStacks) 1801 FuncAttrs.addAttribute("split-stack"); 1802 1803 if (CodeGenOpts.SpeculativeLoadHardening) 1804 FuncAttrs.addAttribute(llvm::Attribute::SpeculativeLoadHardening); 1805 } 1806 1807 if (getLangOpts().assumeFunctionsAreConvergent()) { 1808 // Conservatively, mark all functions and calls in CUDA and OpenCL as 1809 // convergent (meaning, they may call an intrinsically convergent op, such 1810 // as __syncthreads() / barrier(), and so can't have certain optimizations 1811 // applied around them). LLVM will remove this attribute where it safely 1812 // can. 1813 FuncAttrs.addAttribute(llvm::Attribute::Convergent); 1814 } 1815 1816 if (getLangOpts().CUDA && getLangOpts().CUDAIsDevice) { 1817 // Exceptions aren't supported in CUDA device code. 1818 FuncAttrs.addAttribute(llvm::Attribute::NoUnwind); 1819 } 1820 1821 for (StringRef Attr : CodeGenOpts.DefaultFunctionAttrs) { 1822 StringRef Var, Value; 1823 std::tie(Var, Value) = Attr.split('='); 1824 FuncAttrs.addAttribute(Var, Value); 1825 } 1826 } 1827 1828 void CodeGenModule::addDefaultFunctionDefinitionAttributes(llvm::Function &F) { 1829 llvm::AttrBuilder FuncAttrs; 1830 getDefaultFunctionAttributes(F.getName(), F.hasOptNone(), 1831 /* AttrOnCallSite = */ false, FuncAttrs); 1832 // TODO: call GetCPUAndFeaturesAttributes? 1833 F.addAttributes(llvm::AttributeList::FunctionIndex, FuncAttrs); 1834 } 1835 1836 void CodeGenModule::addDefaultFunctionDefinitionAttributes( 1837 llvm::AttrBuilder &attrs) { 1838 getDefaultFunctionAttributes(/*function name*/ "", /*optnone*/ false, 1839 /*for call*/ false, attrs); 1840 GetCPUAndFeaturesAttributes(GlobalDecl(), attrs); 1841 } 1842 1843 static void addNoBuiltinAttributes(llvm::AttrBuilder &FuncAttrs, 1844 const LangOptions &LangOpts, 1845 const NoBuiltinAttr *NBA = nullptr) { 1846 auto AddNoBuiltinAttr = [&FuncAttrs](StringRef BuiltinName) { 1847 SmallString<32> AttributeName; 1848 AttributeName += "no-builtin-"; 1849 AttributeName += BuiltinName; 1850 FuncAttrs.addAttribute(AttributeName); 1851 }; 1852 1853 // First, handle the language options passed through -fno-builtin. 1854 if (LangOpts.NoBuiltin) { 1855 // -fno-builtin disables them all. 1856 FuncAttrs.addAttribute("no-builtins"); 1857 return; 1858 } 1859 1860 // Then, add attributes for builtins specified through -fno-builtin-<name>. 1861 llvm::for_each(LangOpts.NoBuiltinFuncs, AddNoBuiltinAttr); 1862 1863 // Now, let's check the __attribute__((no_builtin("...")) attribute added to 1864 // the source. 1865 if (!NBA) 1866 return; 1867 1868 // If there is a wildcard in the builtin names specified through the 1869 // attribute, disable them all. 1870 if (llvm::is_contained(NBA->builtinNames(), "*")) { 1871 FuncAttrs.addAttribute("no-builtins"); 1872 return; 1873 } 1874 1875 // And last, add the rest of the builtin names. 1876 llvm::for_each(NBA->builtinNames(), AddNoBuiltinAttr); 1877 } 1878 1879 /// Construct the IR attribute list of a function or call. 1880 /// 1881 /// When adding an attribute, please consider where it should be handled: 1882 /// 1883 /// - getDefaultFunctionAttributes is for attributes that are essentially 1884 /// part of the global target configuration (but perhaps can be 1885 /// overridden on a per-function basis). Adding attributes there 1886 /// will cause them to also be set in frontends that build on Clang's 1887 /// target-configuration logic, as well as for code defined in library 1888 /// modules such as CUDA's libdevice. 1889 /// 1890 /// - ConstructAttributeList builds on top of getDefaultFunctionAttributes 1891 /// and adds declaration-specific, convention-specific, and 1892 /// frontend-specific logic. The last is of particular importance: 1893 /// attributes that restrict how the frontend generates code must be 1894 /// added here rather than getDefaultFunctionAttributes. 1895 /// 1896 void CodeGenModule::ConstructAttributeList( 1897 StringRef Name, const CGFunctionInfo &FI, CGCalleeInfo CalleeInfo, 1898 llvm::AttributeList &AttrList, unsigned &CallingConv, bool AttrOnCallSite) { 1899 llvm::AttrBuilder FuncAttrs; 1900 llvm::AttrBuilder RetAttrs; 1901 1902 // Collect function IR attributes from the CC lowering. 1903 // We'll collect the paramete and result attributes later. 1904 CallingConv = FI.getEffectiveCallingConvention(); 1905 if (FI.isNoReturn()) 1906 FuncAttrs.addAttribute(llvm::Attribute::NoReturn); 1907 if (FI.isCmseNSCall()) 1908 FuncAttrs.addAttribute("cmse_nonsecure_call"); 1909 1910 // Collect function IR attributes from the callee prototype if we have one. 1911 AddAttributesFromFunctionProtoType(getContext(), FuncAttrs, 1912 CalleeInfo.getCalleeFunctionProtoType()); 1913 1914 const Decl *TargetDecl = CalleeInfo.getCalleeDecl().getDecl(); 1915 1916 bool HasOptnone = false; 1917 // The NoBuiltinAttr attached to the target FunctionDecl. 1918 const NoBuiltinAttr *NBA = nullptr; 1919 1920 // Collect function IR attributes based on declaration-specific 1921 // information. 1922 // FIXME: handle sseregparm someday... 1923 if (TargetDecl) { 1924 if (TargetDecl->hasAttr<ReturnsTwiceAttr>()) 1925 FuncAttrs.addAttribute(llvm::Attribute::ReturnsTwice); 1926 if (TargetDecl->hasAttr<NoThrowAttr>()) 1927 FuncAttrs.addAttribute(llvm::Attribute::NoUnwind); 1928 if (TargetDecl->hasAttr<NoReturnAttr>()) 1929 FuncAttrs.addAttribute(llvm::Attribute::NoReturn); 1930 if (TargetDecl->hasAttr<ColdAttr>()) 1931 FuncAttrs.addAttribute(llvm::Attribute::Cold); 1932 if (TargetDecl->hasAttr<NoDuplicateAttr>()) 1933 FuncAttrs.addAttribute(llvm::Attribute::NoDuplicate); 1934 if (TargetDecl->hasAttr<ConvergentAttr>()) 1935 FuncAttrs.addAttribute(llvm::Attribute::Convergent); 1936 1937 if (const FunctionDecl *Fn = dyn_cast<FunctionDecl>(TargetDecl)) { 1938 AddAttributesFromFunctionProtoType( 1939 getContext(), FuncAttrs, Fn->getType()->getAs<FunctionProtoType>()); 1940 if (AttrOnCallSite && Fn->isReplaceableGlobalAllocationFunction()) { 1941 // A sane operator new returns a non-aliasing pointer. 1942 auto Kind = Fn->getDeclName().getCXXOverloadedOperator(); 1943 if (getCodeGenOpts().AssumeSaneOperatorNew && 1944 (Kind == OO_New || Kind == OO_Array_New)) 1945 RetAttrs.addAttribute(llvm::Attribute::NoAlias); 1946 } 1947 const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(Fn); 1948 const bool IsVirtualCall = MD && MD->isVirtual(); 1949 // Don't use [[noreturn]], _Noreturn or [[no_builtin]] for a call to a 1950 // virtual function. These attributes are not inherited by overloads. 1951 if (!(AttrOnCallSite && IsVirtualCall)) { 1952 if (Fn->isNoReturn()) 1953 FuncAttrs.addAttribute(llvm::Attribute::NoReturn); 1954 NBA = Fn->getAttr<NoBuiltinAttr>(); 1955 } 1956 } 1957 1958 // 'const', 'pure' and 'noalias' attributed functions are also nounwind. 1959 if (TargetDecl->hasAttr<ConstAttr>()) { 1960 FuncAttrs.addAttribute(llvm::Attribute::ReadNone); 1961 FuncAttrs.addAttribute(llvm::Attribute::NoUnwind); 1962 } else if (TargetDecl->hasAttr<PureAttr>()) { 1963 FuncAttrs.addAttribute(llvm::Attribute::ReadOnly); 1964 FuncAttrs.addAttribute(llvm::Attribute::NoUnwind); 1965 } else if (TargetDecl->hasAttr<NoAliasAttr>()) { 1966 FuncAttrs.addAttribute(llvm::Attribute::ArgMemOnly); 1967 FuncAttrs.addAttribute(llvm::Attribute::NoUnwind); 1968 } 1969 if (TargetDecl->hasAttr<RestrictAttr>()) 1970 RetAttrs.addAttribute(llvm::Attribute::NoAlias); 1971 if (TargetDecl->hasAttr<ReturnsNonNullAttr>() && 1972 !CodeGenOpts.NullPointerIsValid) 1973 RetAttrs.addAttribute(llvm::Attribute::NonNull); 1974 if (TargetDecl->hasAttr<AnyX86NoCallerSavedRegistersAttr>()) 1975 FuncAttrs.addAttribute("no_caller_saved_registers"); 1976 if (TargetDecl->hasAttr<AnyX86NoCfCheckAttr>()) 1977 FuncAttrs.addAttribute(llvm::Attribute::NoCfCheck); 1978 1979 HasOptnone = TargetDecl->hasAttr<OptimizeNoneAttr>(); 1980 if (auto *AllocSize = TargetDecl->getAttr<AllocSizeAttr>()) { 1981 Optional<unsigned> NumElemsParam; 1982 if (AllocSize->getNumElemsParam().isValid()) 1983 NumElemsParam = AllocSize->getNumElemsParam().getLLVMIndex(); 1984 FuncAttrs.addAllocSizeAttr(AllocSize->getElemSizeParam().getLLVMIndex(), 1985 NumElemsParam); 1986 } 1987 1988 if (TargetDecl->hasAttr<OpenCLKernelAttr>()) { 1989 if (getLangOpts().OpenCLVersion <= 120) { 1990 // OpenCL v1.2 Work groups are always uniform 1991 FuncAttrs.addAttribute("uniform-work-group-size", "true"); 1992 } else { 1993 // OpenCL v2.0 Work groups may be whether uniform or not. 1994 // '-cl-uniform-work-group-size' compile option gets a hint 1995 // to the compiler that the global work-size be a multiple of 1996 // the work-group size specified to clEnqueueNDRangeKernel 1997 // (i.e. work groups are uniform). 1998 FuncAttrs.addAttribute("uniform-work-group-size", 1999 llvm::toStringRef(CodeGenOpts.UniformWGSize)); 2000 } 2001 } 2002 } 2003 2004 // Attach "no-builtins" attributes to: 2005 // * call sites: both `nobuiltin` and "no-builtins" or "no-builtin-<name>". 2006 // * definitions: "no-builtins" or "no-builtin-<name>" only. 2007 // The attributes can come from: 2008 // * LangOpts: -ffreestanding, -fno-builtin, -fno-builtin-<name> 2009 // * FunctionDecl attributes: __attribute__((no_builtin(...))) 2010 addNoBuiltinAttributes(FuncAttrs, getLangOpts(), NBA); 2011 2012 // Collect function IR attributes based on global settiings. 2013 getDefaultFunctionAttributes(Name, HasOptnone, AttrOnCallSite, FuncAttrs); 2014 2015 // Override some default IR attributes based on declaration-specific 2016 // information. 2017 if (TargetDecl) { 2018 if (TargetDecl->hasAttr<NoSpeculativeLoadHardeningAttr>()) 2019 FuncAttrs.removeAttribute(llvm::Attribute::SpeculativeLoadHardening); 2020 if (TargetDecl->hasAttr<SpeculativeLoadHardeningAttr>()) 2021 FuncAttrs.addAttribute(llvm::Attribute::SpeculativeLoadHardening); 2022 if (TargetDecl->hasAttr<NoSplitStackAttr>()) 2023 FuncAttrs.removeAttribute("split-stack"); 2024 2025 // Add NonLazyBind attribute to function declarations when -fno-plt 2026 // is used. 2027 // FIXME: what if we just haven't processed the function definition 2028 // yet, or if it's an external definition like C99 inline? 2029 if (CodeGenOpts.NoPLT) { 2030 if (auto *Fn = dyn_cast<FunctionDecl>(TargetDecl)) { 2031 if (!Fn->isDefined() && !AttrOnCallSite) { 2032 FuncAttrs.addAttribute(llvm::Attribute::NonLazyBind); 2033 } 2034 } 2035 } 2036 } 2037 2038 // Collect non-call-site function IR attributes from declaration-specific 2039 // information. 2040 if (!AttrOnCallSite) { 2041 if (TargetDecl && TargetDecl->hasAttr<CmseNSEntryAttr>()) 2042 FuncAttrs.addAttribute("cmse_nonsecure_entry"); 2043 2044 // Whether tail calls are enabled. 2045 auto shouldDisableTailCalls = [&] { 2046 // Should this be honored in getDefaultFunctionAttributes? 2047 if (CodeGenOpts.DisableTailCalls) 2048 return true; 2049 2050 if (!TargetDecl) 2051 return false; 2052 2053 if (TargetDecl->hasAttr<DisableTailCallsAttr>() || 2054 TargetDecl->hasAttr<AnyX86InterruptAttr>()) 2055 return true; 2056 2057 if (CodeGenOpts.NoEscapingBlockTailCalls) { 2058 if (const auto *BD = dyn_cast<BlockDecl>(TargetDecl)) 2059 if (!BD->doesNotEscape()) 2060 return true; 2061 } 2062 2063 return false; 2064 }; 2065 FuncAttrs.addAttribute("disable-tail-calls", 2066 llvm::toStringRef(shouldDisableTailCalls())); 2067 2068 // CPU/feature overrides. addDefaultFunctionDefinitionAttributes 2069 // handles these separately to set them based on the global defaults. 2070 GetCPUAndFeaturesAttributes(CalleeInfo.getCalleeDecl(), FuncAttrs); 2071 } 2072 2073 // Collect attributes from arguments and return values. 2074 ClangToLLVMArgMapping IRFunctionArgs(getContext(), FI); 2075 2076 QualType RetTy = FI.getReturnType(); 2077 const ABIArgInfo &RetAI = FI.getReturnInfo(); 2078 switch (RetAI.getKind()) { 2079 case ABIArgInfo::Extend: 2080 if (RetAI.isSignExt()) 2081 RetAttrs.addAttribute(llvm::Attribute::SExt); 2082 else 2083 RetAttrs.addAttribute(llvm::Attribute::ZExt); 2084 LLVM_FALLTHROUGH; 2085 case ABIArgInfo::Direct: 2086 if (RetAI.getInReg()) 2087 RetAttrs.addAttribute(llvm::Attribute::InReg); 2088 break; 2089 case ABIArgInfo::Ignore: 2090 break; 2091 2092 case ABIArgInfo::InAlloca: 2093 case ABIArgInfo::Indirect: { 2094 // inalloca and sret disable readnone and readonly 2095 FuncAttrs.removeAttribute(llvm::Attribute::ReadOnly) 2096 .removeAttribute(llvm::Attribute::ReadNone); 2097 break; 2098 } 2099 2100 case ABIArgInfo::CoerceAndExpand: 2101 break; 2102 2103 case ABIArgInfo::Expand: 2104 llvm_unreachable("Invalid ABI kind for return argument"); 2105 } 2106 2107 if (const auto *RefTy = RetTy->getAs<ReferenceType>()) { 2108 QualType PTy = RefTy->getPointeeType(); 2109 if (!PTy->isIncompleteType() && PTy->isConstantSizeType()) 2110 RetAttrs.addDereferenceableAttr( 2111 getMinimumObjectSize(PTy).getQuantity()); 2112 else if (getContext().getTargetAddressSpace(PTy) == 0 && 2113 !CodeGenOpts.NullPointerIsValid) 2114 RetAttrs.addAttribute(llvm::Attribute::NonNull); 2115 } 2116 2117 bool hasUsedSRet = false; 2118 SmallVector<llvm::AttributeSet, 4> ArgAttrs(IRFunctionArgs.totalIRArgs()); 2119 2120 // Attach attributes to sret. 2121 if (IRFunctionArgs.hasSRetArg()) { 2122 llvm::AttrBuilder SRETAttrs; 2123 SRETAttrs.addAttribute(llvm::Attribute::StructRet); 2124 hasUsedSRet = true; 2125 if (RetAI.getInReg()) 2126 SRETAttrs.addAttribute(llvm::Attribute::InReg); 2127 SRETAttrs.addAlignmentAttr(RetAI.getIndirectAlign().getQuantity()); 2128 ArgAttrs[IRFunctionArgs.getSRetArgNo()] = 2129 llvm::AttributeSet::get(getLLVMContext(), SRETAttrs); 2130 } 2131 2132 // Attach attributes to inalloca argument. 2133 if (IRFunctionArgs.hasInallocaArg()) { 2134 llvm::AttrBuilder Attrs; 2135 Attrs.addAttribute(llvm::Attribute::InAlloca); 2136 ArgAttrs[IRFunctionArgs.getInallocaArgNo()] = 2137 llvm::AttributeSet::get(getLLVMContext(), Attrs); 2138 } 2139 2140 unsigned ArgNo = 0; 2141 for (CGFunctionInfo::const_arg_iterator I = FI.arg_begin(), 2142 E = FI.arg_end(); 2143 I != E; ++I, ++ArgNo) { 2144 QualType ParamType = I->type; 2145 const ABIArgInfo &AI = I->info; 2146 llvm::AttrBuilder Attrs; 2147 2148 // Add attribute for padding argument, if necessary. 2149 if (IRFunctionArgs.hasPaddingArg(ArgNo)) { 2150 if (AI.getPaddingInReg()) { 2151 ArgAttrs[IRFunctionArgs.getPaddingArgNo(ArgNo)] = 2152 llvm::AttributeSet::get( 2153 getLLVMContext(), 2154 llvm::AttrBuilder().addAttribute(llvm::Attribute::InReg)); 2155 } 2156 } 2157 2158 // 'restrict' -> 'noalias' is done in EmitFunctionProlog when we 2159 // have the corresponding parameter variable. It doesn't make 2160 // sense to do it here because parameters are so messed up. 2161 switch (AI.getKind()) { 2162 case ABIArgInfo::Extend: 2163 if (AI.isSignExt()) 2164 Attrs.addAttribute(llvm::Attribute::SExt); 2165 else 2166 Attrs.addAttribute(llvm::Attribute::ZExt); 2167 LLVM_FALLTHROUGH; 2168 case ABIArgInfo::Direct: 2169 if (ArgNo == 0 && FI.isChainCall()) 2170 Attrs.addAttribute(llvm::Attribute::Nest); 2171 else if (AI.getInReg()) 2172 Attrs.addAttribute(llvm::Attribute::InReg); 2173 break; 2174 2175 case ABIArgInfo::Indirect: { 2176 if (AI.getInReg()) 2177 Attrs.addAttribute(llvm::Attribute::InReg); 2178 2179 if (AI.getIndirectByVal()) 2180 Attrs.addByValAttr(getTypes().ConvertTypeForMem(ParamType)); 2181 2182 CharUnits Align = AI.getIndirectAlign(); 2183 2184 // In a byval argument, it is important that the required 2185 // alignment of the type is honored, as LLVM might be creating a 2186 // *new* stack object, and needs to know what alignment to give 2187 // it. (Sometimes it can deduce a sensible alignment on its own, 2188 // but not if clang decides it must emit a packed struct, or the 2189 // user specifies increased alignment requirements.) 2190 // 2191 // This is different from indirect *not* byval, where the object 2192 // exists already, and the align attribute is purely 2193 // informative. 2194 assert(!Align.isZero()); 2195 2196 // For now, only add this when we have a byval argument. 2197 // TODO: be less lazy about updating test cases. 2198 if (AI.getIndirectByVal()) 2199 Attrs.addAlignmentAttr(Align.getQuantity()); 2200 2201 // byval disables readnone and readonly. 2202 FuncAttrs.removeAttribute(llvm::Attribute::ReadOnly) 2203 .removeAttribute(llvm::Attribute::ReadNone); 2204 break; 2205 } 2206 case ABIArgInfo::Ignore: 2207 case ABIArgInfo::Expand: 2208 case ABIArgInfo::CoerceAndExpand: 2209 break; 2210 2211 case ABIArgInfo::InAlloca: 2212 // inalloca disables readnone and readonly. 2213 FuncAttrs.removeAttribute(llvm::Attribute::ReadOnly) 2214 .removeAttribute(llvm::Attribute::ReadNone); 2215 continue; 2216 } 2217 2218 if (const auto *RefTy = ParamType->getAs<ReferenceType>()) { 2219 QualType PTy = RefTy->getPointeeType(); 2220 if (!PTy->isIncompleteType() && PTy->isConstantSizeType()) 2221 Attrs.addDereferenceableAttr( 2222 getMinimumObjectSize(PTy).getQuantity()); 2223 else if (getContext().getTargetAddressSpace(PTy) == 0 && 2224 !CodeGenOpts.NullPointerIsValid) 2225 Attrs.addAttribute(llvm::Attribute::NonNull); 2226 } 2227 2228 switch (FI.getExtParameterInfo(ArgNo).getABI()) { 2229 case ParameterABI::Ordinary: 2230 break; 2231 2232 case ParameterABI::SwiftIndirectResult: { 2233 // Add 'sret' if we haven't already used it for something, but 2234 // only if the result is void. 2235 if (!hasUsedSRet && RetTy->isVoidType()) { 2236 Attrs.addAttribute(llvm::Attribute::StructRet); 2237 hasUsedSRet = true; 2238 } 2239 2240 // Add 'noalias' in either case. 2241 Attrs.addAttribute(llvm::Attribute::NoAlias); 2242 2243 // Add 'dereferenceable' and 'alignment'. 2244 auto PTy = ParamType->getPointeeType(); 2245 if (!PTy->isIncompleteType() && PTy->isConstantSizeType()) { 2246 auto info = getContext().getTypeInfoInChars(PTy); 2247 Attrs.addDereferenceableAttr(info.first.getQuantity()); 2248 Attrs.addAttribute(llvm::Attribute::getWithAlignment( 2249 getLLVMContext(), info.second.getAsAlign())); 2250 } 2251 break; 2252 } 2253 2254 case ParameterABI::SwiftErrorResult: 2255 Attrs.addAttribute(llvm::Attribute::SwiftError); 2256 break; 2257 2258 case ParameterABI::SwiftContext: 2259 Attrs.addAttribute(llvm::Attribute::SwiftSelf); 2260 break; 2261 } 2262 2263 if (FI.getExtParameterInfo(ArgNo).isNoEscape()) 2264 Attrs.addAttribute(llvm::Attribute::NoCapture); 2265 2266 if (Attrs.hasAttributes()) { 2267 unsigned FirstIRArg, NumIRArgs; 2268 std::tie(FirstIRArg, NumIRArgs) = IRFunctionArgs.getIRArgs(ArgNo); 2269 for (unsigned i = 0; i < NumIRArgs; i++) 2270 ArgAttrs[FirstIRArg + i] = 2271 llvm::AttributeSet::get(getLLVMContext(), Attrs); 2272 } 2273 } 2274 assert(ArgNo == FI.arg_size()); 2275 2276 AttrList = llvm::AttributeList::get( 2277 getLLVMContext(), llvm::AttributeSet::get(getLLVMContext(), FuncAttrs), 2278 llvm::AttributeSet::get(getLLVMContext(), RetAttrs), ArgAttrs); 2279 } 2280 2281 /// An argument came in as a promoted argument; demote it back to its 2282 /// declared type. 2283 static llvm::Value *emitArgumentDemotion(CodeGenFunction &CGF, 2284 const VarDecl *var, 2285 llvm::Value *value) { 2286 llvm::Type *varType = CGF.ConvertType(var->getType()); 2287 2288 // This can happen with promotions that actually don't change the 2289 // underlying type, like the enum promotions. 2290 if (value->getType() == varType) return value; 2291 2292 assert((varType->isIntegerTy() || varType->isFloatingPointTy()) 2293 && "unexpected promotion type"); 2294 2295 if (isa<llvm::IntegerType>(varType)) 2296 return CGF.Builder.CreateTrunc(value, varType, "arg.unpromote"); 2297 2298 return CGF.Builder.CreateFPCast(value, varType, "arg.unpromote"); 2299 } 2300 2301 /// Returns the attribute (either parameter attribute, or function 2302 /// attribute), which declares argument ArgNo to be non-null. 2303 static const NonNullAttr *getNonNullAttr(const Decl *FD, const ParmVarDecl *PVD, 2304 QualType ArgType, unsigned ArgNo) { 2305 // FIXME: __attribute__((nonnull)) can also be applied to: 2306 // - references to pointers, where the pointee is known to be 2307 // nonnull (apparently a Clang extension) 2308 // - transparent unions containing pointers 2309 // In the former case, LLVM IR cannot represent the constraint. In 2310 // the latter case, we have no guarantee that the transparent union 2311 // is in fact passed as a pointer. 2312 if (!ArgType->isAnyPointerType() && !ArgType->isBlockPointerType()) 2313 return nullptr; 2314 // First, check attribute on parameter itself. 2315 if (PVD) { 2316 if (auto ParmNNAttr = PVD->getAttr<NonNullAttr>()) 2317 return ParmNNAttr; 2318 } 2319 // Check function attributes. 2320 if (!FD) 2321 return nullptr; 2322 for (const auto *NNAttr : FD->specific_attrs<NonNullAttr>()) { 2323 if (NNAttr->isNonNull(ArgNo)) 2324 return NNAttr; 2325 } 2326 return nullptr; 2327 } 2328 2329 namespace { 2330 struct CopyBackSwiftError final : EHScopeStack::Cleanup { 2331 Address Temp; 2332 Address Arg; 2333 CopyBackSwiftError(Address temp, Address arg) : Temp(temp), Arg(arg) {} 2334 void Emit(CodeGenFunction &CGF, Flags flags) override { 2335 llvm::Value *errorValue = CGF.Builder.CreateLoad(Temp); 2336 CGF.Builder.CreateStore(errorValue, Arg); 2337 } 2338 }; 2339 } 2340 2341 void CodeGenFunction::EmitFunctionProlog(const CGFunctionInfo &FI, 2342 llvm::Function *Fn, 2343 const FunctionArgList &Args) { 2344 if (CurCodeDecl && CurCodeDecl->hasAttr<NakedAttr>()) 2345 // Naked functions don't have prologues. 2346 return; 2347 2348 // If this is an implicit-return-zero function, go ahead and 2349 // initialize the return value. TODO: it might be nice to have 2350 // a more general mechanism for this that didn't require synthesized 2351 // return statements. 2352 if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(CurCodeDecl)) { 2353 if (FD->hasImplicitReturnZero()) { 2354 QualType RetTy = FD->getReturnType().getUnqualifiedType(); 2355 llvm::Type* LLVMTy = CGM.getTypes().ConvertType(RetTy); 2356 llvm::Constant* Zero = llvm::Constant::getNullValue(LLVMTy); 2357 Builder.CreateStore(Zero, ReturnValue); 2358 } 2359 } 2360 2361 // FIXME: We no longer need the types from FunctionArgList; lift up and 2362 // simplify. 2363 2364 ClangToLLVMArgMapping IRFunctionArgs(CGM.getContext(), FI); 2365 assert(Fn->arg_size() == IRFunctionArgs.totalIRArgs()); 2366 2367 // If we're using inalloca, all the memory arguments are GEPs off of the last 2368 // parameter, which is a pointer to the complete memory area. 2369 Address ArgStruct = Address::invalid(); 2370 if (IRFunctionArgs.hasInallocaArg()) { 2371 ArgStruct = Address(Fn->getArg(IRFunctionArgs.getInallocaArgNo()), 2372 FI.getArgStructAlignment()); 2373 2374 assert(ArgStruct.getType() == FI.getArgStruct()->getPointerTo()); 2375 } 2376 2377 // Name the struct return parameter. 2378 if (IRFunctionArgs.hasSRetArg()) { 2379 auto AI = Fn->getArg(IRFunctionArgs.getSRetArgNo()); 2380 AI->setName("agg.result"); 2381 AI->addAttr(llvm::Attribute::NoAlias); 2382 } 2383 2384 // Track if we received the parameter as a pointer (indirect, byval, or 2385 // inalloca). If already have a pointer, EmitParmDecl doesn't need to copy it 2386 // into a local alloca for us. 2387 SmallVector<ParamValue, 16> ArgVals; 2388 ArgVals.reserve(Args.size()); 2389 2390 // Create a pointer value for every parameter declaration. This usually 2391 // entails copying one or more LLVM IR arguments into an alloca. Don't push 2392 // any cleanups or do anything that might unwind. We do that separately, so 2393 // we can push the cleanups in the correct order for the ABI. 2394 assert(FI.arg_size() == Args.size() && 2395 "Mismatch between function signature & arguments."); 2396 unsigned ArgNo = 0; 2397 CGFunctionInfo::const_arg_iterator info_it = FI.arg_begin(); 2398 for (FunctionArgList::const_iterator i = Args.begin(), e = Args.end(); 2399 i != e; ++i, ++info_it, ++ArgNo) { 2400 const VarDecl *Arg = *i; 2401 const ABIArgInfo &ArgI = info_it->info; 2402 2403 bool isPromoted = 2404 isa<ParmVarDecl>(Arg) && cast<ParmVarDecl>(Arg)->isKNRPromoted(); 2405 // We are converting from ABIArgInfo type to VarDecl type directly, unless 2406 // the parameter is promoted. In this case we convert to 2407 // CGFunctionInfo::ArgInfo type with subsequent argument demotion. 2408 QualType Ty = isPromoted ? info_it->type : Arg->getType(); 2409 assert(hasScalarEvaluationKind(Ty) == 2410 hasScalarEvaluationKind(Arg->getType())); 2411 2412 unsigned FirstIRArg, NumIRArgs; 2413 std::tie(FirstIRArg, NumIRArgs) = IRFunctionArgs.getIRArgs(ArgNo); 2414 2415 switch (ArgI.getKind()) { 2416 case ABIArgInfo::InAlloca: { 2417 assert(NumIRArgs == 0); 2418 auto FieldIndex = ArgI.getInAllocaFieldIndex(); 2419 Address V = 2420 Builder.CreateStructGEP(ArgStruct, FieldIndex, Arg->getName()); 2421 if (ArgI.getInAllocaIndirect()) 2422 V = Address(Builder.CreateLoad(V), 2423 getContext().getTypeAlignInChars(Ty)); 2424 ArgVals.push_back(ParamValue::forIndirect(V)); 2425 break; 2426 } 2427 2428 case ABIArgInfo::Indirect: { 2429 assert(NumIRArgs == 1); 2430 Address ParamAddr = 2431 Address(Fn->getArg(FirstIRArg), ArgI.getIndirectAlign()); 2432 2433 if (!hasScalarEvaluationKind(Ty)) { 2434 // Aggregates and complex variables are accessed by reference. All we 2435 // need to do is realign the value, if requested. 2436 Address V = ParamAddr; 2437 if (ArgI.getIndirectRealign()) { 2438 Address AlignedTemp = CreateMemTemp(Ty, "coerce"); 2439 2440 // Copy from the incoming argument pointer to the temporary with the 2441 // appropriate alignment. 2442 // 2443 // FIXME: We should have a common utility for generating an aggregate 2444 // copy. 2445 CharUnits Size = getContext().getTypeSizeInChars(Ty); 2446 Builder.CreateMemCpy( 2447 AlignedTemp.getPointer(), AlignedTemp.getAlignment().getAsAlign(), 2448 ParamAddr.getPointer(), ParamAddr.getAlignment().getAsAlign(), 2449 llvm::ConstantInt::get(IntPtrTy, Size.getQuantity())); 2450 V = AlignedTemp; 2451 } 2452 ArgVals.push_back(ParamValue::forIndirect(V)); 2453 } else { 2454 // Load scalar value from indirect argument. 2455 llvm::Value *V = 2456 EmitLoadOfScalar(ParamAddr, false, Ty, Arg->getBeginLoc()); 2457 2458 if (isPromoted) 2459 V = emitArgumentDemotion(*this, Arg, V); 2460 ArgVals.push_back(ParamValue::forDirect(V)); 2461 } 2462 break; 2463 } 2464 2465 case ABIArgInfo::Extend: 2466 case ABIArgInfo::Direct: { 2467 auto AI = Fn->getArg(FirstIRArg); 2468 llvm::Type *LTy = ConvertType(Arg->getType()); 2469 2470 // Prepare parameter attributes. So far, only attributes for pointer 2471 // parameters are prepared. See 2472 // http://llvm.org/docs/LangRef.html#paramattrs. 2473 if (ArgI.getDirectOffset() == 0 && LTy->isPointerTy() && 2474 ArgI.getCoerceToType()->isPointerTy()) { 2475 assert(NumIRArgs == 1); 2476 2477 if (const ParmVarDecl *PVD = dyn_cast<ParmVarDecl>(Arg)) { 2478 // Set `nonnull` attribute if any. 2479 if (getNonNullAttr(CurCodeDecl, PVD, PVD->getType(), 2480 PVD->getFunctionScopeIndex()) && 2481 !CGM.getCodeGenOpts().NullPointerIsValid) 2482 AI->addAttr(llvm::Attribute::NonNull); 2483 2484 QualType OTy = PVD->getOriginalType(); 2485 if (const auto *ArrTy = 2486 getContext().getAsConstantArrayType(OTy)) { 2487 // A C99 array parameter declaration with the static keyword also 2488 // indicates dereferenceability, and if the size is constant we can 2489 // use the dereferenceable attribute (which requires the size in 2490 // bytes). 2491 if (ArrTy->getSizeModifier() == ArrayType::Static) { 2492 QualType ETy = ArrTy->getElementType(); 2493 uint64_t ArrSize = ArrTy->getSize().getZExtValue(); 2494 if (!ETy->isIncompleteType() && ETy->isConstantSizeType() && 2495 ArrSize) { 2496 llvm::AttrBuilder Attrs; 2497 Attrs.addDereferenceableAttr( 2498 getContext().getTypeSizeInChars(ETy).getQuantity()*ArrSize); 2499 AI->addAttrs(Attrs); 2500 } else if (getContext().getTargetAddressSpace(ETy) == 0 && 2501 !CGM.getCodeGenOpts().NullPointerIsValid) { 2502 AI->addAttr(llvm::Attribute::NonNull); 2503 } 2504 } 2505 } else if (const auto *ArrTy = 2506 getContext().getAsVariableArrayType(OTy)) { 2507 // For C99 VLAs with the static keyword, we don't know the size so 2508 // we can't use the dereferenceable attribute, but in addrspace(0) 2509 // we know that it must be nonnull. 2510 if (ArrTy->getSizeModifier() == VariableArrayType::Static && 2511 !getContext().getTargetAddressSpace(ArrTy->getElementType()) && 2512 !CGM.getCodeGenOpts().NullPointerIsValid) 2513 AI->addAttr(llvm::Attribute::NonNull); 2514 } 2515 2516 // Set `align` attribute if any. 2517 const auto *AVAttr = PVD->getAttr<AlignValueAttr>(); 2518 if (!AVAttr) 2519 if (const auto *TOTy = dyn_cast<TypedefType>(OTy)) 2520 AVAttr = TOTy->getDecl()->getAttr<AlignValueAttr>(); 2521 if (AVAttr && !SanOpts.has(SanitizerKind::Alignment)) { 2522 // If alignment-assumption sanitizer is enabled, we do *not* add 2523 // alignment attribute here, but emit normal alignment assumption, 2524 // so the UBSAN check could function. 2525 llvm::Value *AlignmentValue = 2526 EmitScalarExpr(AVAttr->getAlignment()); 2527 llvm::ConstantInt *AlignmentCI = 2528 cast<llvm::ConstantInt>(AlignmentValue); 2529 AI->addAttrs(llvm::AttrBuilder().addAlignmentAttr(llvm::MaybeAlign( 2530 AlignmentCI->getLimitedValue(llvm::Value::MaximumAlignment)))); 2531 } 2532 } 2533 2534 // Set 'noalias' if an argument type has the `restrict` qualifier. 2535 if (Arg->getType().isRestrictQualified()) 2536 AI->addAttr(llvm::Attribute::NoAlias); 2537 } 2538 2539 // Prepare the argument value. If we have the trivial case, handle it 2540 // with no muss and fuss. 2541 if (!isa<llvm::StructType>(ArgI.getCoerceToType()) && 2542 ArgI.getCoerceToType() == ConvertType(Ty) && 2543 ArgI.getDirectOffset() == 0) { 2544 assert(NumIRArgs == 1); 2545 2546 // LLVM expects swifterror parameters to be used in very restricted 2547 // ways. Copy the value into a less-restricted temporary. 2548 llvm::Value *V = AI; 2549 if (FI.getExtParameterInfo(ArgNo).getABI() 2550 == ParameterABI::SwiftErrorResult) { 2551 QualType pointeeTy = Ty->getPointeeType(); 2552 assert(pointeeTy->isPointerType()); 2553 Address temp = 2554 CreateMemTemp(pointeeTy, getPointerAlign(), "swifterror.temp"); 2555 Address arg = Address(V, getContext().getTypeAlignInChars(pointeeTy)); 2556 llvm::Value *incomingErrorValue = Builder.CreateLoad(arg); 2557 Builder.CreateStore(incomingErrorValue, temp); 2558 V = temp.getPointer(); 2559 2560 // Push a cleanup to copy the value back at the end of the function. 2561 // The convention does not guarantee that the value will be written 2562 // back if the function exits with an unwind exception. 2563 EHStack.pushCleanup<CopyBackSwiftError>(NormalCleanup, temp, arg); 2564 } 2565 2566 // Ensure the argument is the correct type. 2567 if (V->getType() != ArgI.getCoerceToType()) 2568 V = Builder.CreateBitCast(V, ArgI.getCoerceToType()); 2569 2570 if (isPromoted) 2571 V = emitArgumentDemotion(*this, Arg, V); 2572 2573 // Because of merging of function types from multiple decls it is 2574 // possible for the type of an argument to not match the corresponding 2575 // type in the function type. Since we are codegening the callee 2576 // in here, add a cast to the argument type. 2577 llvm::Type *LTy = ConvertType(Arg->getType()); 2578 if (V->getType() != LTy) 2579 V = Builder.CreateBitCast(V, LTy); 2580 2581 ArgVals.push_back(ParamValue::forDirect(V)); 2582 break; 2583 } 2584 2585 Address Alloca = CreateMemTemp(Ty, getContext().getDeclAlign(Arg), 2586 Arg->getName()); 2587 2588 // Pointer to store into. 2589 Address Ptr = emitAddressAtOffset(*this, Alloca, ArgI); 2590 2591 // Fast-isel and the optimizer generally like scalar values better than 2592 // FCAs, so we flatten them if this is safe to do for this argument. 2593 llvm::StructType *STy = dyn_cast<llvm::StructType>(ArgI.getCoerceToType()); 2594 if (ArgI.isDirect() && ArgI.getCanBeFlattened() && STy && 2595 STy->getNumElements() > 1) { 2596 uint64_t SrcSize = CGM.getDataLayout().getTypeAllocSize(STy); 2597 llvm::Type *DstTy = Ptr.getElementType(); 2598 uint64_t DstSize = CGM.getDataLayout().getTypeAllocSize(DstTy); 2599 2600 Address AddrToStoreInto = Address::invalid(); 2601 if (SrcSize <= DstSize) { 2602 AddrToStoreInto = Builder.CreateElementBitCast(Ptr, STy); 2603 } else { 2604 AddrToStoreInto = 2605 CreateTempAlloca(STy, Alloca.getAlignment(), "coerce"); 2606 } 2607 2608 assert(STy->getNumElements() == NumIRArgs); 2609 for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) { 2610 auto AI = Fn->getArg(FirstIRArg + i); 2611 AI->setName(Arg->getName() + ".coerce" + Twine(i)); 2612 Address EltPtr = Builder.CreateStructGEP(AddrToStoreInto, i); 2613 Builder.CreateStore(AI, EltPtr); 2614 } 2615 2616 if (SrcSize > DstSize) { 2617 Builder.CreateMemCpy(Ptr, AddrToStoreInto, DstSize); 2618 } 2619 2620 } else { 2621 // Simple case, just do a coerced store of the argument into the alloca. 2622 assert(NumIRArgs == 1); 2623 auto AI = Fn->getArg(FirstIRArg); 2624 AI->setName(Arg->getName() + ".coerce"); 2625 CreateCoercedStore(AI, Ptr, /*DstIsVolatile=*/false, *this); 2626 } 2627 2628 // Match to what EmitParmDecl is expecting for this type. 2629 if (CodeGenFunction::hasScalarEvaluationKind(Ty)) { 2630 llvm::Value *V = 2631 EmitLoadOfScalar(Alloca, false, Ty, Arg->getBeginLoc()); 2632 if (isPromoted) 2633 V = emitArgumentDemotion(*this, Arg, V); 2634 ArgVals.push_back(ParamValue::forDirect(V)); 2635 } else { 2636 ArgVals.push_back(ParamValue::forIndirect(Alloca)); 2637 } 2638 break; 2639 } 2640 2641 case ABIArgInfo::CoerceAndExpand: { 2642 // Reconstruct into a temporary. 2643 Address alloca = CreateMemTemp(Ty, getContext().getDeclAlign(Arg)); 2644 ArgVals.push_back(ParamValue::forIndirect(alloca)); 2645 2646 auto coercionType = ArgI.getCoerceAndExpandType(); 2647 alloca = Builder.CreateElementBitCast(alloca, coercionType); 2648 2649 unsigned argIndex = FirstIRArg; 2650 for (unsigned i = 0, e = coercionType->getNumElements(); i != e; ++i) { 2651 llvm::Type *eltType = coercionType->getElementType(i); 2652 if (ABIArgInfo::isPaddingForCoerceAndExpand(eltType)) 2653 continue; 2654 2655 auto eltAddr = Builder.CreateStructGEP(alloca, i); 2656 auto elt = Fn->getArg(argIndex++); 2657 Builder.CreateStore(elt, eltAddr); 2658 } 2659 assert(argIndex == FirstIRArg + NumIRArgs); 2660 break; 2661 } 2662 2663 case ABIArgInfo::Expand: { 2664 // If this structure was expanded into multiple arguments then 2665 // we need to create a temporary and reconstruct it from the 2666 // arguments. 2667 Address Alloca = CreateMemTemp(Ty, getContext().getDeclAlign(Arg)); 2668 LValue LV = MakeAddrLValue(Alloca, Ty); 2669 ArgVals.push_back(ParamValue::forIndirect(Alloca)); 2670 2671 auto FnArgIter = Fn->arg_begin() + FirstIRArg; 2672 ExpandTypeFromArgs(Ty, LV, FnArgIter); 2673 assert(FnArgIter == Fn->arg_begin() + FirstIRArg + NumIRArgs); 2674 for (unsigned i = 0, e = NumIRArgs; i != e; ++i) { 2675 auto AI = Fn->getArg(FirstIRArg + i); 2676 AI->setName(Arg->getName() + "." + Twine(i)); 2677 } 2678 break; 2679 } 2680 2681 case ABIArgInfo::Ignore: 2682 assert(NumIRArgs == 0); 2683 // Initialize the local variable appropriately. 2684 if (!hasScalarEvaluationKind(Ty)) { 2685 ArgVals.push_back(ParamValue::forIndirect(CreateMemTemp(Ty))); 2686 } else { 2687 llvm::Value *U = llvm::UndefValue::get(ConvertType(Arg->getType())); 2688 ArgVals.push_back(ParamValue::forDirect(U)); 2689 } 2690 break; 2691 } 2692 } 2693 2694 if (getTarget().getCXXABI().areArgsDestroyedLeftToRightInCallee()) { 2695 for (int I = Args.size() - 1; I >= 0; --I) 2696 EmitParmDecl(*Args[I], ArgVals[I], I + 1); 2697 } else { 2698 for (unsigned I = 0, E = Args.size(); I != E; ++I) 2699 EmitParmDecl(*Args[I], ArgVals[I], I + 1); 2700 } 2701 } 2702 2703 static void eraseUnusedBitCasts(llvm::Instruction *insn) { 2704 while (insn->use_empty()) { 2705 llvm::BitCastInst *bitcast = dyn_cast<llvm::BitCastInst>(insn); 2706 if (!bitcast) return; 2707 2708 // This is "safe" because we would have used a ConstantExpr otherwise. 2709 insn = cast<llvm::Instruction>(bitcast->getOperand(0)); 2710 bitcast->eraseFromParent(); 2711 } 2712 } 2713 2714 /// Try to emit a fused autorelease of a return result. 2715 static llvm::Value *tryEmitFusedAutoreleaseOfResult(CodeGenFunction &CGF, 2716 llvm::Value *result) { 2717 // We must be immediately followed the cast. 2718 llvm::BasicBlock *BB = CGF.Builder.GetInsertBlock(); 2719 if (BB->empty()) return nullptr; 2720 if (&BB->back() != result) return nullptr; 2721 2722 llvm::Type *resultType = result->getType(); 2723 2724 // result is in a BasicBlock and is therefore an Instruction. 2725 llvm::Instruction *generator = cast<llvm::Instruction>(result); 2726 2727 SmallVector<llvm::Instruction *, 4> InstsToKill; 2728 2729 // Look for: 2730 // %generator = bitcast %type1* %generator2 to %type2* 2731 while (llvm::BitCastInst *bitcast = dyn_cast<llvm::BitCastInst>(generator)) { 2732 // We would have emitted this as a constant if the operand weren't 2733 // an Instruction. 2734 generator = cast<llvm::Instruction>(bitcast->getOperand(0)); 2735 2736 // Require the generator to be immediately followed by the cast. 2737 if (generator->getNextNode() != bitcast) 2738 return nullptr; 2739 2740 InstsToKill.push_back(bitcast); 2741 } 2742 2743 // Look for: 2744 // %generator = call i8* @objc_retain(i8* %originalResult) 2745 // or 2746 // %generator = call i8* @objc_retainAutoreleasedReturnValue(i8* %originalResult) 2747 llvm::CallInst *call = dyn_cast<llvm::CallInst>(generator); 2748 if (!call) return nullptr; 2749 2750 bool doRetainAutorelease; 2751 2752 if (call->getCalledOperand() == CGF.CGM.getObjCEntrypoints().objc_retain) { 2753 doRetainAutorelease = true; 2754 } else if (call->getCalledOperand() == 2755 CGF.CGM.getObjCEntrypoints().objc_retainAutoreleasedReturnValue) { 2756 doRetainAutorelease = false; 2757 2758 // If we emitted an assembly marker for this call (and the 2759 // ARCEntrypoints field should have been set if so), go looking 2760 // for that call. If we can't find it, we can't do this 2761 // optimization. But it should always be the immediately previous 2762 // instruction, unless we needed bitcasts around the call. 2763 if (CGF.CGM.getObjCEntrypoints().retainAutoreleasedReturnValueMarker) { 2764 llvm::Instruction *prev = call->getPrevNode(); 2765 assert(prev); 2766 if (isa<llvm::BitCastInst>(prev)) { 2767 prev = prev->getPrevNode(); 2768 assert(prev); 2769 } 2770 assert(isa<llvm::CallInst>(prev)); 2771 assert(cast<llvm::CallInst>(prev)->getCalledOperand() == 2772 CGF.CGM.getObjCEntrypoints().retainAutoreleasedReturnValueMarker); 2773 InstsToKill.push_back(prev); 2774 } 2775 } else { 2776 return nullptr; 2777 } 2778 2779 result = call->getArgOperand(0); 2780 InstsToKill.push_back(call); 2781 2782 // Keep killing bitcasts, for sanity. Note that we no longer care 2783 // about precise ordering as long as there's exactly one use. 2784 while (llvm::BitCastInst *bitcast = dyn_cast<llvm::BitCastInst>(result)) { 2785 if (!bitcast->hasOneUse()) break; 2786 InstsToKill.push_back(bitcast); 2787 result = bitcast->getOperand(0); 2788 } 2789 2790 // Delete all the unnecessary instructions, from latest to earliest. 2791 for (auto *I : InstsToKill) 2792 I->eraseFromParent(); 2793 2794 // Do the fused retain/autorelease if we were asked to. 2795 if (doRetainAutorelease) 2796 result = CGF.EmitARCRetainAutoreleaseReturnValue(result); 2797 2798 // Cast back to the result type. 2799 return CGF.Builder.CreateBitCast(result, resultType); 2800 } 2801 2802 /// If this is a +1 of the value of an immutable 'self', remove it. 2803 static llvm::Value *tryRemoveRetainOfSelf(CodeGenFunction &CGF, 2804 llvm::Value *result) { 2805 // This is only applicable to a method with an immutable 'self'. 2806 const ObjCMethodDecl *method = 2807 dyn_cast_or_null<ObjCMethodDecl>(CGF.CurCodeDecl); 2808 if (!method) return nullptr; 2809 const VarDecl *self = method->getSelfDecl(); 2810 if (!self->getType().isConstQualified()) return nullptr; 2811 2812 // Look for a retain call. 2813 llvm::CallInst *retainCall = 2814 dyn_cast<llvm::CallInst>(result->stripPointerCasts()); 2815 if (!retainCall || retainCall->getCalledOperand() != 2816 CGF.CGM.getObjCEntrypoints().objc_retain) 2817 return nullptr; 2818 2819 // Look for an ordinary load of 'self'. 2820 llvm::Value *retainedValue = retainCall->getArgOperand(0); 2821 llvm::LoadInst *load = 2822 dyn_cast<llvm::LoadInst>(retainedValue->stripPointerCasts()); 2823 if (!load || load->isAtomic() || load->isVolatile() || 2824 load->getPointerOperand() != CGF.GetAddrOfLocalVar(self).getPointer()) 2825 return nullptr; 2826 2827 // Okay! Burn it all down. This relies for correctness on the 2828 // assumption that the retain is emitted as part of the return and 2829 // that thereafter everything is used "linearly". 2830 llvm::Type *resultType = result->getType(); 2831 eraseUnusedBitCasts(cast<llvm::Instruction>(result)); 2832 assert(retainCall->use_empty()); 2833 retainCall->eraseFromParent(); 2834 eraseUnusedBitCasts(cast<llvm::Instruction>(retainedValue)); 2835 2836 return CGF.Builder.CreateBitCast(load, resultType); 2837 } 2838 2839 /// Emit an ARC autorelease of the result of a function. 2840 /// 2841 /// \return the value to actually return from the function 2842 static llvm::Value *emitAutoreleaseOfResult(CodeGenFunction &CGF, 2843 llvm::Value *result) { 2844 // If we're returning 'self', kill the initial retain. This is a 2845 // heuristic attempt to "encourage correctness" in the really unfortunate 2846 // case where we have a return of self during a dealloc and we desperately 2847 // need to avoid the possible autorelease. 2848 if (llvm::Value *self = tryRemoveRetainOfSelf(CGF, result)) 2849 return self; 2850 2851 // At -O0, try to emit a fused retain/autorelease. 2852 if (CGF.shouldUseFusedARCCalls()) 2853 if (llvm::Value *fused = tryEmitFusedAutoreleaseOfResult(CGF, result)) 2854 return fused; 2855 2856 return CGF.EmitARCAutoreleaseReturnValue(result); 2857 } 2858 2859 /// Heuristically search for a dominating store to the return-value slot. 2860 static llvm::StoreInst *findDominatingStoreToReturnValue(CodeGenFunction &CGF) { 2861 // Check if a User is a store which pointerOperand is the ReturnValue. 2862 // We are looking for stores to the ReturnValue, not for stores of the 2863 // ReturnValue to some other location. 2864 auto GetStoreIfValid = [&CGF](llvm::User *U) -> llvm::StoreInst * { 2865 auto *SI = dyn_cast<llvm::StoreInst>(U); 2866 if (!SI || SI->getPointerOperand() != CGF.ReturnValue.getPointer()) 2867 return nullptr; 2868 // These aren't actually possible for non-coerced returns, and we 2869 // only care about non-coerced returns on this code path. 2870 assert(!SI->isAtomic() && !SI->isVolatile()); 2871 return SI; 2872 }; 2873 // If there are multiple uses of the return-value slot, just check 2874 // for something immediately preceding the IP. Sometimes this can 2875 // happen with how we generate implicit-returns; it can also happen 2876 // with noreturn cleanups. 2877 if (!CGF.ReturnValue.getPointer()->hasOneUse()) { 2878 llvm::BasicBlock *IP = CGF.Builder.GetInsertBlock(); 2879 if (IP->empty()) return nullptr; 2880 llvm::Instruction *I = &IP->back(); 2881 2882 // Skip lifetime markers 2883 for (llvm::BasicBlock::reverse_iterator II = IP->rbegin(), 2884 IE = IP->rend(); 2885 II != IE; ++II) { 2886 if (llvm::IntrinsicInst *Intrinsic = 2887 dyn_cast<llvm::IntrinsicInst>(&*II)) { 2888 if (Intrinsic->getIntrinsicID() == llvm::Intrinsic::lifetime_end) { 2889 const llvm::Value *CastAddr = Intrinsic->getArgOperand(1); 2890 ++II; 2891 if (II == IE) 2892 break; 2893 if (isa<llvm::BitCastInst>(&*II) && (CastAddr == &*II)) 2894 continue; 2895 } 2896 } 2897 I = &*II; 2898 break; 2899 } 2900 2901 return GetStoreIfValid(I); 2902 } 2903 2904 llvm::StoreInst *store = 2905 GetStoreIfValid(CGF.ReturnValue.getPointer()->user_back()); 2906 if (!store) return nullptr; 2907 2908 // Now do a first-and-dirty dominance check: just walk up the 2909 // single-predecessors chain from the current insertion point. 2910 llvm::BasicBlock *StoreBB = store->getParent(); 2911 llvm::BasicBlock *IP = CGF.Builder.GetInsertBlock(); 2912 while (IP != StoreBB) { 2913 if (!(IP = IP->getSinglePredecessor())) 2914 return nullptr; 2915 } 2916 2917 // Okay, the store's basic block dominates the insertion point; we 2918 // can do our thing. 2919 return store; 2920 } 2921 2922 // Helper functions for EmitCMSEClearRecord 2923 2924 // Set the bits corresponding to a field having width `BitWidth` and located at 2925 // offset `BitOffset` (from the least significant bit) within a storage unit of 2926 // `Bits.size()` bytes. Each element of `Bits` corresponds to one target byte. 2927 // Use little-endian layout, i.e.`Bits[0]` is the LSB. 2928 static void setBitRange(SmallVectorImpl<uint64_t> &Bits, int BitOffset, 2929 int BitWidth, int CharWidth) { 2930 assert(CharWidth <= 64); 2931 assert(static_cast<unsigned>(BitWidth) <= Bits.size() * CharWidth); 2932 2933 int Pos = 0; 2934 if (BitOffset >= CharWidth) { 2935 Pos += BitOffset / CharWidth; 2936 BitOffset = BitOffset % CharWidth; 2937 } 2938 2939 const uint64_t Used = (uint64_t(1) << CharWidth) - 1; 2940 if (BitOffset + BitWidth >= CharWidth) { 2941 Bits[Pos++] |= (Used << BitOffset) & Used; 2942 BitWidth -= CharWidth - BitOffset; 2943 BitOffset = 0; 2944 } 2945 2946 while (BitWidth >= CharWidth) { 2947 Bits[Pos++] = Used; 2948 BitWidth -= CharWidth; 2949 } 2950 2951 if (BitWidth > 0) 2952 Bits[Pos++] |= (Used >> (CharWidth - BitWidth)) << BitOffset; 2953 } 2954 2955 // Set the bits corresponding to a field having width `BitWidth` and located at 2956 // offset `BitOffset` (from the least significant bit) within a storage unit of 2957 // `StorageSize` bytes, located at `StorageOffset` in `Bits`. Each element of 2958 // `Bits` corresponds to one target byte. Use target endian layout. 2959 static void setBitRange(SmallVectorImpl<uint64_t> &Bits, int StorageOffset, 2960 int StorageSize, int BitOffset, int BitWidth, 2961 int CharWidth, bool BigEndian) { 2962 2963 SmallVector<uint64_t, 8> TmpBits(StorageSize); 2964 setBitRange(TmpBits, BitOffset, BitWidth, CharWidth); 2965 2966 if (BigEndian) 2967 std::reverse(TmpBits.begin(), TmpBits.end()); 2968 2969 for (uint64_t V : TmpBits) 2970 Bits[StorageOffset++] |= V; 2971 } 2972 2973 static void setUsedBits(CodeGenModule &, QualType, int, 2974 SmallVectorImpl<uint64_t> &); 2975 2976 // Set the bits in `Bits`, which correspond to the value representations of 2977 // the actual members of the record type `RTy`. Note that this function does 2978 // not handle base classes, virtual tables, etc, since they cannot happen in 2979 // CMSE function arguments or return. The bit mask corresponds to the target 2980 // memory layout, i.e. it's endian dependent. 2981 static void setUsedBits(CodeGenModule &CGM, const RecordType *RTy, int Offset, 2982 SmallVectorImpl<uint64_t> &Bits) { 2983 ASTContext &Context = CGM.getContext(); 2984 int CharWidth = Context.getCharWidth(); 2985 const RecordDecl *RD = RTy->getDecl()->getDefinition(); 2986 const ASTRecordLayout &ASTLayout = Context.getASTRecordLayout(RD); 2987 const CGRecordLayout &Layout = CGM.getTypes().getCGRecordLayout(RD); 2988 2989 int Idx = 0; 2990 for (auto I = RD->field_begin(), E = RD->field_end(); I != E; ++I, ++Idx) { 2991 const FieldDecl *F = *I; 2992 2993 if (F->isUnnamedBitfield() || F->isZeroLengthBitField(Context) || 2994 F->getType()->isIncompleteArrayType()) 2995 continue; 2996 2997 if (F->isBitField()) { 2998 const CGBitFieldInfo &BFI = Layout.getBitFieldInfo(F); 2999 setBitRange(Bits, Offset + BFI.StorageOffset.getQuantity(), 3000 BFI.StorageSize / CharWidth, BFI.Offset, 3001 BFI.Size, CharWidth, 3002 CGM.getDataLayout().isBigEndian()); 3003 continue; 3004 } 3005 3006 setUsedBits(CGM, F->getType(), 3007 Offset + ASTLayout.getFieldOffset(Idx) / CharWidth, Bits); 3008 } 3009 } 3010 3011 // Set the bits in `Bits`, which correspond to the value representations of 3012 // the elements of an array type `ATy`. 3013 static void setUsedBits(CodeGenModule &CGM, const ConstantArrayType *ATy, 3014 int Offset, SmallVectorImpl<uint64_t> &Bits) { 3015 const ASTContext &Context = CGM.getContext(); 3016 3017 QualType ETy = Context.getBaseElementType(ATy); 3018 int Size = Context.getTypeSizeInChars(ETy).getQuantity(); 3019 SmallVector<uint64_t, 4> TmpBits(Size); 3020 setUsedBits(CGM, ETy, 0, TmpBits); 3021 3022 for (int I = 0, N = Context.getConstantArrayElementCount(ATy); I < N; ++I) { 3023 auto Src = TmpBits.begin(); 3024 auto Dst = Bits.begin() + Offset + I * Size; 3025 for (int J = 0; J < Size; ++J) 3026 *Dst++ |= *Src++; 3027 } 3028 } 3029 3030 // Set the bits in `Bits`, which correspond to the value representations of 3031 // the type `QTy`. 3032 static void setUsedBits(CodeGenModule &CGM, QualType QTy, int Offset, 3033 SmallVectorImpl<uint64_t> &Bits) { 3034 if (const auto *RTy = QTy->getAs<RecordType>()) 3035 return setUsedBits(CGM, RTy, Offset, Bits); 3036 3037 ASTContext &Context = CGM.getContext(); 3038 if (const auto *ATy = Context.getAsConstantArrayType(QTy)) 3039 return setUsedBits(CGM, ATy, Offset, Bits); 3040 3041 int Size = Context.getTypeSizeInChars(QTy).getQuantity(); 3042 if (Size <= 0) 3043 return; 3044 3045 std::fill_n(Bits.begin() + Offset, Size, 3046 (uint64_t(1) << Context.getCharWidth()) - 1); 3047 } 3048 3049 static uint64_t buildMultiCharMask(const SmallVectorImpl<uint64_t> &Bits, 3050 int Pos, int Size, int CharWidth, 3051 bool BigEndian) { 3052 assert(Size > 0); 3053 uint64_t Mask = 0; 3054 if (BigEndian) { 3055 for (auto P = Bits.begin() + Pos, E = Bits.begin() + Pos + Size; P != E; 3056 ++P) 3057 Mask = (Mask << CharWidth) | *P; 3058 } else { 3059 auto P = Bits.begin() + Pos + Size, End = Bits.begin() + Pos; 3060 do 3061 Mask = (Mask << CharWidth) | *--P; 3062 while (P != End); 3063 } 3064 return Mask; 3065 } 3066 3067 // Emit code to clear the bits in a record, which aren't a part of any user 3068 // declared member, when the record is a function return. 3069 llvm::Value *CodeGenFunction::EmitCMSEClearRecord(llvm::Value *Src, 3070 llvm::IntegerType *ITy, 3071 QualType QTy) { 3072 assert(Src->getType() == ITy); 3073 assert(ITy->getScalarSizeInBits() <= 64); 3074 3075 const llvm::DataLayout &DataLayout = CGM.getDataLayout(); 3076 int Size = DataLayout.getTypeStoreSize(ITy); 3077 SmallVector<uint64_t, 4> Bits(Size); 3078 setUsedBits(CGM, QTy->getAs<RecordType>(), 0, Bits); 3079 3080 int CharWidth = CGM.getContext().getCharWidth(); 3081 uint64_t Mask = 3082 buildMultiCharMask(Bits, 0, Size, CharWidth, DataLayout.isBigEndian()); 3083 3084 return Builder.CreateAnd(Src, Mask, "cmse.clear"); 3085 } 3086 3087 // Emit code to clear the bits in a record, which aren't a part of any user 3088 // declared member, when the record is a function argument. 3089 llvm::Value *CodeGenFunction::EmitCMSEClearRecord(llvm::Value *Src, 3090 llvm::ArrayType *ATy, 3091 QualType QTy) { 3092 const llvm::DataLayout &DataLayout = CGM.getDataLayout(); 3093 int Size = DataLayout.getTypeStoreSize(ATy); 3094 SmallVector<uint64_t, 16> Bits(Size); 3095 setUsedBits(CGM, QTy->getAs<RecordType>(), 0, Bits); 3096 3097 // Clear each element of the LLVM array. 3098 int CharWidth = CGM.getContext().getCharWidth(); 3099 int CharsPerElt = 3100 ATy->getArrayElementType()->getScalarSizeInBits() / CharWidth; 3101 int MaskIndex = 0; 3102 llvm::Value *R = llvm::UndefValue::get(ATy); 3103 for (int I = 0, N = ATy->getArrayNumElements(); I != N; ++I) { 3104 uint64_t Mask = buildMultiCharMask(Bits, MaskIndex, CharsPerElt, CharWidth, 3105 DataLayout.isBigEndian()); 3106 MaskIndex += CharsPerElt; 3107 llvm::Value *T0 = Builder.CreateExtractValue(Src, I); 3108 llvm::Value *T1 = Builder.CreateAnd(T0, Mask, "cmse.clear"); 3109 R = Builder.CreateInsertValue(R, T1, I); 3110 } 3111 3112 return R; 3113 } 3114 3115 // Emit code to clear the padding bits when returning or passing as an argument 3116 // a 16-bit floating-point value. 3117 llvm::Value *CodeGenFunction::EmitCMSEClearFP16(llvm::Value *Src) { 3118 llvm::Type *RetTy = Src->getType(); 3119 assert(RetTy->isFloatTy() || 3120 (RetTy->isIntegerTy() && RetTy->getIntegerBitWidth() == 32)); 3121 if (RetTy->isFloatTy()) { 3122 llvm::Value *T0 = Builder.CreateBitCast(Src, Builder.getIntNTy(32)); 3123 llvm::Value *T1 = Builder.CreateAnd(T0, 0xffff, "cmse.clear"); 3124 return Builder.CreateBitCast(T1, RetTy); 3125 } 3126 return Builder.CreateAnd(Src, 0xffff, "cmse.clear"); 3127 } 3128 3129 void CodeGenFunction::EmitFunctionEpilog(const CGFunctionInfo &FI, 3130 bool EmitRetDbgLoc, 3131 SourceLocation EndLoc) { 3132 if (FI.isNoReturn()) { 3133 // Noreturn functions don't return. 3134 EmitUnreachable(EndLoc); 3135 return; 3136 } 3137 3138 if (CurCodeDecl && CurCodeDecl->hasAttr<NakedAttr>()) { 3139 // Naked functions don't have epilogues. 3140 Builder.CreateUnreachable(); 3141 return; 3142 } 3143 3144 // Functions with no result always return void. 3145 if (!ReturnValue.isValid()) { 3146 Builder.CreateRetVoid(); 3147 return; 3148 } 3149 3150 llvm::DebugLoc RetDbgLoc; 3151 llvm::Value *RV = nullptr; 3152 QualType RetTy = FI.getReturnType(); 3153 const ABIArgInfo &RetAI = FI.getReturnInfo(); 3154 3155 switch (RetAI.getKind()) { 3156 case ABIArgInfo::InAlloca: 3157 // Aggregrates get evaluated directly into the destination. Sometimes we 3158 // need to return the sret value in a register, though. 3159 assert(hasAggregateEvaluationKind(RetTy)); 3160 if (RetAI.getInAllocaSRet()) { 3161 llvm::Function::arg_iterator EI = CurFn->arg_end(); 3162 --EI; 3163 llvm::Value *ArgStruct = &*EI; 3164 llvm::Value *SRet = Builder.CreateStructGEP( 3165 nullptr, ArgStruct, RetAI.getInAllocaFieldIndex()); 3166 RV = Builder.CreateAlignedLoad(SRet, getPointerAlign(), "sret"); 3167 } 3168 break; 3169 3170 case ABIArgInfo::Indirect: { 3171 auto AI = CurFn->arg_begin(); 3172 if (RetAI.isSRetAfterThis()) 3173 ++AI; 3174 switch (getEvaluationKind(RetTy)) { 3175 case TEK_Complex: { 3176 ComplexPairTy RT = 3177 EmitLoadOfComplex(MakeAddrLValue(ReturnValue, RetTy), EndLoc); 3178 EmitStoreOfComplex(RT, MakeNaturalAlignAddrLValue(&*AI, RetTy), 3179 /*isInit*/ true); 3180 break; 3181 } 3182 case TEK_Aggregate: 3183 // Do nothing; aggregrates get evaluated directly into the destination. 3184 break; 3185 case TEK_Scalar: 3186 EmitStoreOfScalar(Builder.CreateLoad(ReturnValue), 3187 MakeNaturalAlignAddrLValue(&*AI, RetTy), 3188 /*isInit*/ true); 3189 break; 3190 } 3191 break; 3192 } 3193 3194 case ABIArgInfo::Extend: 3195 case ABIArgInfo::Direct: 3196 if (RetAI.getCoerceToType() == ConvertType(RetTy) && 3197 RetAI.getDirectOffset() == 0) { 3198 // The internal return value temp always will have pointer-to-return-type 3199 // type, just do a load. 3200 3201 // If there is a dominating store to ReturnValue, we can elide 3202 // the load, zap the store, and usually zap the alloca. 3203 if (llvm::StoreInst *SI = 3204 findDominatingStoreToReturnValue(*this)) { 3205 // Reuse the debug location from the store unless there is 3206 // cleanup code to be emitted between the store and return 3207 // instruction. 3208 if (EmitRetDbgLoc && !AutoreleaseResult) 3209 RetDbgLoc = SI->getDebugLoc(); 3210 // Get the stored value and nuke the now-dead store. 3211 RV = SI->getValueOperand(); 3212 SI->eraseFromParent(); 3213 3214 // Otherwise, we have to do a simple load. 3215 } else { 3216 RV = Builder.CreateLoad(ReturnValue); 3217 } 3218 } else { 3219 // If the value is offset in memory, apply the offset now. 3220 Address V = emitAddressAtOffset(*this, ReturnValue, RetAI); 3221 3222 RV = CreateCoercedLoad(V, RetAI.getCoerceToType(), *this); 3223 } 3224 3225 // In ARC, end functions that return a retainable type with a call 3226 // to objc_autoreleaseReturnValue. 3227 if (AutoreleaseResult) { 3228 #ifndef NDEBUG 3229 // Type::isObjCRetainabletype has to be called on a QualType that hasn't 3230 // been stripped of the typedefs, so we cannot use RetTy here. Get the 3231 // original return type of FunctionDecl, CurCodeDecl, and BlockDecl from 3232 // CurCodeDecl or BlockInfo. 3233 QualType RT; 3234 3235 if (auto *FD = dyn_cast<FunctionDecl>(CurCodeDecl)) 3236 RT = FD->getReturnType(); 3237 else if (auto *MD = dyn_cast<ObjCMethodDecl>(CurCodeDecl)) 3238 RT = MD->getReturnType(); 3239 else if (isa<BlockDecl>(CurCodeDecl)) 3240 RT = BlockInfo->BlockExpression->getFunctionType()->getReturnType(); 3241 else 3242 llvm_unreachable("Unexpected function/method type"); 3243 3244 assert(getLangOpts().ObjCAutoRefCount && 3245 !FI.isReturnsRetained() && 3246 RT->isObjCRetainableType()); 3247 #endif 3248 RV = emitAutoreleaseOfResult(*this, RV); 3249 } 3250 3251 break; 3252 3253 case ABIArgInfo::Ignore: 3254 break; 3255 3256 case ABIArgInfo::CoerceAndExpand: { 3257 auto coercionType = RetAI.getCoerceAndExpandType(); 3258 3259 // Load all of the coerced elements out into results. 3260 llvm::SmallVector<llvm::Value*, 4> results; 3261 Address addr = Builder.CreateElementBitCast(ReturnValue, coercionType); 3262 for (unsigned i = 0, e = coercionType->getNumElements(); i != e; ++i) { 3263 auto coercedEltType = coercionType->getElementType(i); 3264 if (ABIArgInfo::isPaddingForCoerceAndExpand(coercedEltType)) 3265 continue; 3266 3267 auto eltAddr = Builder.CreateStructGEP(addr, i); 3268 auto elt = Builder.CreateLoad(eltAddr); 3269 results.push_back(elt); 3270 } 3271 3272 // If we have one result, it's the single direct result type. 3273 if (results.size() == 1) { 3274 RV = results[0]; 3275 3276 // Otherwise, we need to make a first-class aggregate. 3277 } else { 3278 // Construct a return type that lacks padding elements. 3279 llvm::Type *returnType = RetAI.getUnpaddedCoerceAndExpandType(); 3280 3281 RV = llvm::UndefValue::get(returnType); 3282 for (unsigned i = 0, e = results.size(); i != e; ++i) { 3283 RV = Builder.CreateInsertValue(RV, results[i], i); 3284 } 3285 } 3286 break; 3287 } 3288 3289 case ABIArgInfo::Expand: 3290 llvm_unreachable("Invalid ABI kind for return argument"); 3291 } 3292 3293 llvm::Instruction *Ret; 3294 if (RV) { 3295 if (CurFuncDecl && CurFuncDecl->hasAttr<CmseNSEntryAttr>()) { 3296 // For certain return types, clear padding bits, as they may reveal 3297 // sensitive information. 3298 const Type *RTy = RetTy.getCanonicalType().getTypePtr(); 3299 if (RTy->isFloat16Type() || RTy->isHalfType()) { 3300 // 16-bit floating-point types are passed in a 32-bit integer or float, 3301 // with unspecified upper bits. 3302 RV = EmitCMSEClearFP16(RV); 3303 } else { 3304 // Small struct/union types are passed as integers. 3305 auto *ITy = dyn_cast<llvm::IntegerType>(RV->getType()); 3306 if (ITy != nullptr && isa<RecordType>(RetTy.getCanonicalType())) 3307 RV = EmitCMSEClearRecord(RV, ITy, RetTy); 3308 } 3309 } 3310 EmitReturnValueCheck(RV); 3311 Ret = Builder.CreateRet(RV); 3312 } else { 3313 Ret = Builder.CreateRetVoid(); 3314 } 3315 3316 if (RetDbgLoc) 3317 Ret->setDebugLoc(std::move(RetDbgLoc)); 3318 } 3319 3320 void CodeGenFunction::EmitReturnValueCheck(llvm::Value *RV) { 3321 // A current decl may not be available when emitting vtable thunks. 3322 if (!CurCodeDecl) 3323 return; 3324 3325 // If the return block isn't reachable, neither is this check, so don't emit 3326 // it. 3327 if (ReturnBlock.isValid() && ReturnBlock.getBlock()->use_empty()) 3328 return; 3329 3330 ReturnsNonNullAttr *RetNNAttr = nullptr; 3331 if (SanOpts.has(SanitizerKind::ReturnsNonnullAttribute)) 3332 RetNNAttr = CurCodeDecl->getAttr<ReturnsNonNullAttr>(); 3333 3334 if (!RetNNAttr && !requiresReturnValueNullabilityCheck()) 3335 return; 3336 3337 // Prefer the returns_nonnull attribute if it's present. 3338 SourceLocation AttrLoc; 3339 SanitizerMask CheckKind; 3340 SanitizerHandler Handler; 3341 if (RetNNAttr) { 3342 assert(!requiresReturnValueNullabilityCheck() && 3343 "Cannot check nullability and the nonnull attribute"); 3344 AttrLoc = RetNNAttr->getLocation(); 3345 CheckKind = SanitizerKind::ReturnsNonnullAttribute; 3346 Handler = SanitizerHandler::NonnullReturn; 3347 } else { 3348 if (auto *DD = dyn_cast<DeclaratorDecl>(CurCodeDecl)) 3349 if (auto *TSI = DD->getTypeSourceInfo()) 3350 if (auto FTL = TSI->getTypeLoc().getAsAdjusted<FunctionTypeLoc>()) 3351 AttrLoc = FTL.getReturnLoc().findNullabilityLoc(); 3352 CheckKind = SanitizerKind::NullabilityReturn; 3353 Handler = SanitizerHandler::NullabilityReturn; 3354 } 3355 3356 SanitizerScope SanScope(this); 3357 3358 // Make sure the "return" source location is valid. If we're checking a 3359 // nullability annotation, make sure the preconditions for the check are met. 3360 llvm::BasicBlock *Check = createBasicBlock("nullcheck"); 3361 llvm::BasicBlock *NoCheck = createBasicBlock("no.nullcheck"); 3362 llvm::Value *SLocPtr = Builder.CreateLoad(ReturnLocation, "return.sloc.load"); 3363 llvm::Value *CanNullCheck = Builder.CreateIsNotNull(SLocPtr); 3364 if (requiresReturnValueNullabilityCheck()) 3365 CanNullCheck = 3366 Builder.CreateAnd(CanNullCheck, RetValNullabilityPrecondition); 3367 Builder.CreateCondBr(CanNullCheck, Check, NoCheck); 3368 EmitBlock(Check); 3369 3370 // Now do the null check. 3371 llvm::Value *Cond = Builder.CreateIsNotNull(RV); 3372 llvm::Constant *StaticData[] = {EmitCheckSourceLocation(AttrLoc)}; 3373 llvm::Value *DynamicData[] = {SLocPtr}; 3374 EmitCheck(std::make_pair(Cond, CheckKind), Handler, StaticData, DynamicData); 3375 3376 EmitBlock(NoCheck); 3377 3378 #ifndef NDEBUG 3379 // The return location should not be used after the check has been emitted. 3380 ReturnLocation = Address::invalid(); 3381 #endif 3382 } 3383 3384 static bool isInAllocaArgument(CGCXXABI &ABI, QualType type) { 3385 const CXXRecordDecl *RD = type->getAsCXXRecordDecl(); 3386 return RD && ABI.getRecordArgABI(RD) == CGCXXABI::RAA_DirectInMemory; 3387 } 3388 3389 static AggValueSlot createPlaceholderSlot(CodeGenFunction &CGF, 3390 QualType Ty) { 3391 // FIXME: Generate IR in one pass, rather than going back and fixing up these 3392 // placeholders. 3393 llvm::Type *IRTy = CGF.ConvertTypeForMem(Ty); 3394 llvm::Type *IRPtrTy = IRTy->getPointerTo(); 3395 llvm::Value *Placeholder = llvm::UndefValue::get(IRPtrTy->getPointerTo()); 3396 3397 // FIXME: When we generate this IR in one pass, we shouldn't need 3398 // this win32-specific alignment hack. 3399 CharUnits Align = CharUnits::fromQuantity(4); 3400 Placeholder = CGF.Builder.CreateAlignedLoad(IRPtrTy, Placeholder, Align); 3401 3402 return AggValueSlot::forAddr(Address(Placeholder, Align), 3403 Ty.getQualifiers(), 3404 AggValueSlot::IsNotDestructed, 3405 AggValueSlot::DoesNotNeedGCBarriers, 3406 AggValueSlot::IsNotAliased, 3407 AggValueSlot::DoesNotOverlap); 3408 } 3409 3410 void CodeGenFunction::EmitDelegateCallArg(CallArgList &args, 3411 const VarDecl *param, 3412 SourceLocation loc) { 3413 // StartFunction converted the ABI-lowered parameter(s) into a 3414 // local alloca. We need to turn that into an r-value suitable 3415 // for EmitCall. 3416 Address local = GetAddrOfLocalVar(param); 3417 3418 QualType type = param->getType(); 3419 3420 if (isInAllocaArgument(CGM.getCXXABI(), type)) { 3421 CGM.ErrorUnsupported(param, "forwarded non-trivially copyable parameter"); 3422 } 3423 3424 // GetAddrOfLocalVar returns a pointer-to-pointer for references, 3425 // but the argument needs to be the original pointer. 3426 if (type->isReferenceType()) { 3427 args.add(RValue::get(Builder.CreateLoad(local)), type); 3428 3429 // In ARC, move out of consumed arguments so that the release cleanup 3430 // entered by StartFunction doesn't cause an over-release. This isn't 3431 // optimal -O0 code generation, but it should get cleaned up when 3432 // optimization is enabled. This also assumes that delegate calls are 3433 // performed exactly once for a set of arguments, but that should be safe. 3434 } else if (getLangOpts().ObjCAutoRefCount && 3435 param->hasAttr<NSConsumedAttr>() && 3436 type->isObjCRetainableType()) { 3437 llvm::Value *ptr = Builder.CreateLoad(local); 3438 auto null = 3439 llvm::ConstantPointerNull::get(cast<llvm::PointerType>(ptr->getType())); 3440 Builder.CreateStore(null, local); 3441 args.add(RValue::get(ptr), type); 3442 3443 // For the most part, we just need to load the alloca, except that 3444 // aggregate r-values are actually pointers to temporaries. 3445 } else { 3446 args.add(convertTempToRValue(local, type, loc), type); 3447 } 3448 3449 // Deactivate the cleanup for the callee-destructed param that was pushed. 3450 if (hasAggregateEvaluationKind(type) && !CurFuncIsThunk && 3451 type->castAs<RecordType>()->getDecl()->isParamDestroyedInCallee() && 3452 param->needsDestruction(getContext())) { 3453 EHScopeStack::stable_iterator cleanup = 3454 CalleeDestructedParamCleanups.lookup(cast<ParmVarDecl>(param)); 3455 assert(cleanup.isValid() && 3456 "cleanup for callee-destructed param not recorded"); 3457 // This unreachable is a temporary marker which will be removed later. 3458 llvm::Instruction *isActive = Builder.CreateUnreachable(); 3459 args.addArgCleanupDeactivation(cleanup, isActive); 3460 } 3461 } 3462 3463 static bool isProvablyNull(llvm::Value *addr) { 3464 return isa<llvm::ConstantPointerNull>(addr); 3465 } 3466 3467 /// Emit the actual writing-back of a writeback. 3468 static void emitWriteback(CodeGenFunction &CGF, 3469 const CallArgList::Writeback &writeback) { 3470 const LValue &srcLV = writeback.Source; 3471 Address srcAddr = srcLV.getAddress(CGF); 3472 assert(!isProvablyNull(srcAddr.getPointer()) && 3473 "shouldn't have writeback for provably null argument"); 3474 3475 llvm::BasicBlock *contBB = nullptr; 3476 3477 // If the argument wasn't provably non-null, we need to null check 3478 // before doing the store. 3479 bool provablyNonNull = llvm::isKnownNonZero(srcAddr.getPointer(), 3480 CGF.CGM.getDataLayout()); 3481 if (!provablyNonNull) { 3482 llvm::BasicBlock *writebackBB = CGF.createBasicBlock("icr.writeback"); 3483 contBB = CGF.createBasicBlock("icr.done"); 3484 3485 llvm::Value *isNull = 3486 CGF.Builder.CreateIsNull(srcAddr.getPointer(), "icr.isnull"); 3487 CGF.Builder.CreateCondBr(isNull, contBB, writebackBB); 3488 CGF.EmitBlock(writebackBB); 3489 } 3490 3491 // Load the value to writeback. 3492 llvm::Value *value = CGF.Builder.CreateLoad(writeback.Temporary); 3493 3494 // Cast it back, in case we're writing an id to a Foo* or something. 3495 value = CGF.Builder.CreateBitCast(value, srcAddr.getElementType(), 3496 "icr.writeback-cast"); 3497 3498 // Perform the writeback. 3499 3500 // If we have a "to use" value, it's something we need to emit a use 3501 // of. This has to be carefully threaded in: if it's done after the 3502 // release it's potentially undefined behavior (and the optimizer 3503 // will ignore it), and if it happens before the retain then the 3504 // optimizer could move the release there. 3505 if (writeback.ToUse) { 3506 assert(srcLV.getObjCLifetime() == Qualifiers::OCL_Strong); 3507 3508 // Retain the new value. No need to block-copy here: the block's 3509 // being passed up the stack. 3510 value = CGF.EmitARCRetainNonBlock(value); 3511 3512 // Emit the intrinsic use here. 3513 CGF.EmitARCIntrinsicUse(writeback.ToUse); 3514 3515 // Load the old value (primitively). 3516 llvm::Value *oldValue = CGF.EmitLoadOfScalar(srcLV, SourceLocation()); 3517 3518 // Put the new value in place (primitively). 3519 CGF.EmitStoreOfScalar(value, srcLV, /*init*/ false); 3520 3521 // Release the old value. 3522 CGF.EmitARCRelease(oldValue, srcLV.isARCPreciseLifetime()); 3523 3524 // Otherwise, we can just do a normal lvalue store. 3525 } else { 3526 CGF.EmitStoreThroughLValue(RValue::get(value), srcLV); 3527 } 3528 3529 // Jump to the continuation block. 3530 if (!provablyNonNull) 3531 CGF.EmitBlock(contBB); 3532 } 3533 3534 static void emitWritebacks(CodeGenFunction &CGF, 3535 const CallArgList &args) { 3536 for (const auto &I : args.writebacks()) 3537 emitWriteback(CGF, I); 3538 } 3539 3540 static void deactivateArgCleanupsBeforeCall(CodeGenFunction &CGF, 3541 const CallArgList &CallArgs) { 3542 ArrayRef<CallArgList::CallArgCleanup> Cleanups = 3543 CallArgs.getCleanupsToDeactivate(); 3544 // Iterate in reverse to increase the likelihood of popping the cleanup. 3545 for (const auto &I : llvm::reverse(Cleanups)) { 3546 CGF.DeactivateCleanupBlock(I.Cleanup, I.IsActiveIP); 3547 I.IsActiveIP->eraseFromParent(); 3548 } 3549 } 3550 3551 static const Expr *maybeGetUnaryAddrOfOperand(const Expr *E) { 3552 if (const UnaryOperator *uop = dyn_cast<UnaryOperator>(E->IgnoreParens())) 3553 if (uop->getOpcode() == UO_AddrOf) 3554 return uop->getSubExpr(); 3555 return nullptr; 3556 } 3557 3558 /// Emit an argument that's being passed call-by-writeback. That is, 3559 /// we are passing the address of an __autoreleased temporary; it 3560 /// might be copy-initialized with the current value of the given 3561 /// address, but it will definitely be copied out of after the call. 3562 static void emitWritebackArg(CodeGenFunction &CGF, CallArgList &args, 3563 const ObjCIndirectCopyRestoreExpr *CRE) { 3564 LValue srcLV; 3565 3566 // Make an optimistic effort to emit the address as an l-value. 3567 // This can fail if the argument expression is more complicated. 3568 if (const Expr *lvExpr = maybeGetUnaryAddrOfOperand(CRE->getSubExpr())) { 3569 srcLV = CGF.EmitLValue(lvExpr); 3570 3571 // Otherwise, just emit it as a scalar. 3572 } else { 3573 Address srcAddr = CGF.EmitPointerWithAlignment(CRE->getSubExpr()); 3574 3575 QualType srcAddrType = 3576 CRE->getSubExpr()->getType()->castAs<PointerType>()->getPointeeType(); 3577 srcLV = CGF.MakeAddrLValue(srcAddr, srcAddrType); 3578 } 3579 Address srcAddr = srcLV.getAddress(CGF); 3580 3581 // The dest and src types don't necessarily match in LLVM terms 3582 // because of the crazy ObjC compatibility rules. 3583 3584 llvm::PointerType *destType = 3585 cast<llvm::PointerType>(CGF.ConvertType(CRE->getType())); 3586 3587 // If the address is a constant null, just pass the appropriate null. 3588 if (isProvablyNull(srcAddr.getPointer())) { 3589 args.add(RValue::get(llvm::ConstantPointerNull::get(destType)), 3590 CRE->getType()); 3591 return; 3592 } 3593 3594 // Create the temporary. 3595 Address temp = CGF.CreateTempAlloca(destType->getElementType(), 3596 CGF.getPointerAlign(), 3597 "icr.temp"); 3598 // Loading an l-value can introduce a cleanup if the l-value is __weak, 3599 // and that cleanup will be conditional if we can't prove that the l-value 3600 // isn't null, so we need to register a dominating point so that the cleanups 3601 // system will make valid IR. 3602 CodeGenFunction::ConditionalEvaluation condEval(CGF); 3603 3604 // Zero-initialize it if we're not doing a copy-initialization. 3605 bool shouldCopy = CRE->shouldCopy(); 3606 if (!shouldCopy) { 3607 llvm::Value *null = 3608 llvm::ConstantPointerNull::get( 3609 cast<llvm::PointerType>(destType->getElementType())); 3610 CGF.Builder.CreateStore(null, temp); 3611 } 3612 3613 llvm::BasicBlock *contBB = nullptr; 3614 llvm::BasicBlock *originBB = nullptr; 3615 3616 // If the address is *not* known to be non-null, we need to switch. 3617 llvm::Value *finalArgument; 3618 3619 bool provablyNonNull = llvm::isKnownNonZero(srcAddr.getPointer(), 3620 CGF.CGM.getDataLayout()); 3621 if (provablyNonNull) { 3622 finalArgument = temp.getPointer(); 3623 } else { 3624 llvm::Value *isNull = 3625 CGF.Builder.CreateIsNull(srcAddr.getPointer(), "icr.isnull"); 3626 3627 finalArgument = CGF.Builder.CreateSelect(isNull, 3628 llvm::ConstantPointerNull::get(destType), 3629 temp.getPointer(), "icr.argument"); 3630 3631 // If we need to copy, then the load has to be conditional, which 3632 // means we need control flow. 3633 if (shouldCopy) { 3634 originBB = CGF.Builder.GetInsertBlock(); 3635 contBB = CGF.createBasicBlock("icr.cont"); 3636 llvm::BasicBlock *copyBB = CGF.createBasicBlock("icr.copy"); 3637 CGF.Builder.CreateCondBr(isNull, contBB, copyBB); 3638 CGF.EmitBlock(copyBB); 3639 condEval.begin(CGF); 3640 } 3641 } 3642 3643 llvm::Value *valueToUse = nullptr; 3644 3645 // Perform a copy if necessary. 3646 if (shouldCopy) { 3647 RValue srcRV = CGF.EmitLoadOfLValue(srcLV, SourceLocation()); 3648 assert(srcRV.isScalar()); 3649 3650 llvm::Value *src = srcRV.getScalarVal(); 3651 src = CGF.Builder.CreateBitCast(src, destType->getElementType(), 3652 "icr.cast"); 3653 3654 // Use an ordinary store, not a store-to-lvalue. 3655 CGF.Builder.CreateStore(src, temp); 3656 3657 // If optimization is enabled, and the value was held in a 3658 // __strong variable, we need to tell the optimizer that this 3659 // value has to stay alive until we're doing the store back. 3660 // This is because the temporary is effectively unretained, 3661 // and so otherwise we can violate the high-level semantics. 3662 if (CGF.CGM.getCodeGenOpts().OptimizationLevel != 0 && 3663 srcLV.getObjCLifetime() == Qualifiers::OCL_Strong) { 3664 valueToUse = src; 3665 } 3666 } 3667 3668 // Finish the control flow if we needed it. 3669 if (shouldCopy && !provablyNonNull) { 3670 llvm::BasicBlock *copyBB = CGF.Builder.GetInsertBlock(); 3671 CGF.EmitBlock(contBB); 3672 3673 // Make a phi for the value to intrinsically use. 3674 if (valueToUse) { 3675 llvm::PHINode *phiToUse = CGF.Builder.CreatePHI(valueToUse->getType(), 2, 3676 "icr.to-use"); 3677 phiToUse->addIncoming(valueToUse, copyBB); 3678 phiToUse->addIncoming(llvm::UndefValue::get(valueToUse->getType()), 3679 originBB); 3680 valueToUse = phiToUse; 3681 } 3682 3683 condEval.end(CGF); 3684 } 3685 3686 args.addWriteback(srcLV, temp, valueToUse); 3687 args.add(RValue::get(finalArgument), CRE->getType()); 3688 } 3689 3690 void CallArgList::allocateArgumentMemory(CodeGenFunction &CGF) { 3691 assert(!StackBase); 3692 3693 // Save the stack. 3694 llvm::Function *F = CGF.CGM.getIntrinsic(llvm::Intrinsic::stacksave); 3695 StackBase = CGF.Builder.CreateCall(F, {}, "inalloca.save"); 3696 } 3697 3698 void CallArgList::freeArgumentMemory(CodeGenFunction &CGF) const { 3699 if (StackBase) { 3700 // Restore the stack after the call. 3701 llvm::Function *F = CGF.CGM.getIntrinsic(llvm::Intrinsic::stackrestore); 3702 CGF.Builder.CreateCall(F, StackBase); 3703 } 3704 } 3705 3706 void CodeGenFunction::EmitNonNullArgCheck(RValue RV, QualType ArgType, 3707 SourceLocation ArgLoc, 3708 AbstractCallee AC, 3709 unsigned ParmNum) { 3710 if (!AC.getDecl() || !(SanOpts.has(SanitizerKind::NonnullAttribute) || 3711 SanOpts.has(SanitizerKind::NullabilityArg))) 3712 return; 3713 3714 // The param decl may be missing in a variadic function. 3715 auto PVD = ParmNum < AC.getNumParams() ? AC.getParamDecl(ParmNum) : nullptr; 3716 unsigned ArgNo = PVD ? PVD->getFunctionScopeIndex() : ParmNum; 3717 3718 // Prefer the nonnull attribute if it's present. 3719 const NonNullAttr *NNAttr = nullptr; 3720 if (SanOpts.has(SanitizerKind::NonnullAttribute)) 3721 NNAttr = getNonNullAttr(AC.getDecl(), PVD, ArgType, ArgNo); 3722 3723 bool CanCheckNullability = false; 3724 if (SanOpts.has(SanitizerKind::NullabilityArg) && !NNAttr && PVD) { 3725 auto Nullability = PVD->getType()->getNullability(getContext()); 3726 CanCheckNullability = Nullability && 3727 *Nullability == NullabilityKind::NonNull && 3728 PVD->getTypeSourceInfo(); 3729 } 3730 3731 if (!NNAttr && !CanCheckNullability) 3732 return; 3733 3734 SourceLocation AttrLoc; 3735 SanitizerMask CheckKind; 3736 SanitizerHandler Handler; 3737 if (NNAttr) { 3738 AttrLoc = NNAttr->getLocation(); 3739 CheckKind = SanitizerKind::NonnullAttribute; 3740 Handler = SanitizerHandler::NonnullArg; 3741 } else { 3742 AttrLoc = PVD->getTypeSourceInfo()->getTypeLoc().findNullabilityLoc(); 3743 CheckKind = SanitizerKind::NullabilityArg; 3744 Handler = SanitizerHandler::NullabilityArg; 3745 } 3746 3747 SanitizerScope SanScope(this); 3748 assert(RV.isScalar()); 3749 llvm::Value *V = RV.getScalarVal(); 3750 llvm::Value *Cond = 3751 Builder.CreateICmpNE(V, llvm::Constant::getNullValue(V->getType())); 3752 llvm::Constant *StaticData[] = { 3753 EmitCheckSourceLocation(ArgLoc), EmitCheckSourceLocation(AttrLoc), 3754 llvm::ConstantInt::get(Int32Ty, ArgNo + 1), 3755 }; 3756 EmitCheck(std::make_pair(Cond, CheckKind), Handler, StaticData, None); 3757 } 3758 3759 void CodeGenFunction::EmitCallArgs( 3760 CallArgList &Args, ArrayRef<QualType> ArgTypes, 3761 llvm::iterator_range<CallExpr::const_arg_iterator> ArgRange, 3762 AbstractCallee AC, unsigned ParamsToSkip, EvaluationOrder Order) { 3763 assert((int)ArgTypes.size() == (ArgRange.end() - ArgRange.begin())); 3764 3765 // We *have* to evaluate arguments from right to left in the MS C++ ABI, 3766 // because arguments are destroyed left to right in the callee. As a special 3767 // case, there are certain language constructs that require left-to-right 3768 // evaluation, and in those cases we consider the evaluation order requirement 3769 // to trump the "destruction order is reverse construction order" guarantee. 3770 bool LeftToRight = 3771 CGM.getTarget().getCXXABI().areArgsDestroyedLeftToRightInCallee() 3772 ? Order == EvaluationOrder::ForceLeftToRight 3773 : Order != EvaluationOrder::ForceRightToLeft; 3774 3775 auto MaybeEmitImplicitObjectSize = [&](unsigned I, const Expr *Arg, 3776 RValue EmittedArg) { 3777 if (!AC.hasFunctionDecl() || I >= AC.getNumParams()) 3778 return; 3779 auto *PS = AC.getParamDecl(I)->getAttr<PassObjectSizeAttr>(); 3780 if (PS == nullptr) 3781 return; 3782 3783 const auto &Context = getContext(); 3784 auto SizeTy = Context.getSizeType(); 3785 auto T = Builder.getIntNTy(Context.getTypeSize(SizeTy)); 3786 assert(EmittedArg.getScalarVal() && "We emitted nothing for the arg?"); 3787 llvm::Value *V = evaluateOrEmitBuiltinObjectSize(Arg, PS->getType(), T, 3788 EmittedArg.getScalarVal(), 3789 PS->isDynamic()); 3790 Args.add(RValue::get(V), SizeTy); 3791 // If we're emitting args in reverse, be sure to do so with 3792 // pass_object_size, as well. 3793 if (!LeftToRight) 3794 std::swap(Args.back(), *(&Args.back() - 1)); 3795 }; 3796 3797 // Insert a stack save if we're going to need any inalloca args. 3798 bool HasInAllocaArgs = false; 3799 if (CGM.getTarget().getCXXABI().isMicrosoft()) { 3800 for (ArrayRef<QualType>::iterator I = ArgTypes.begin(), E = ArgTypes.end(); 3801 I != E && !HasInAllocaArgs; ++I) 3802 HasInAllocaArgs = isInAllocaArgument(CGM.getCXXABI(), *I); 3803 if (HasInAllocaArgs) { 3804 assert(getTarget().getTriple().getArch() == llvm::Triple::x86); 3805 Args.allocateArgumentMemory(*this); 3806 } 3807 } 3808 3809 // Evaluate each argument in the appropriate order. 3810 size_t CallArgsStart = Args.size(); 3811 for (unsigned I = 0, E = ArgTypes.size(); I != E; ++I) { 3812 unsigned Idx = LeftToRight ? I : E - I - 1; 3813 CallExpr::const_arg_iterator Arg = ArgRange.begin() + Idx; 3814 unsigned InitialArgSize = Args.size(); 3815 // If *Arg is an ObjCIndirectCopyRestoreExpr, check that either the types of 3816 // the argument and parameter match or the objc method is parameterized. 3817 assert((!isa<ObjCIndirectCopyRestoreExpr>(*Arg) || 3818 getContext().hasSameUnqualifiedType((*Arg)->getType(), 3819 ArgTypes[Idx]) || 3820 (isa<ObjCMethodDecl>(AC.getDecl()) && 3821 isObjCMethodWithTypeParams(cast<ObjCMethodDecl>(AC.getDecl())))) && 3822 "Argument and parameter types don't match"); 3823 EmitCallArg(Args, *Arg, ArgTypes[Idx]); 3824 // In particular, we depend on it being the last arg in Args, and the 3825 // objectsize bits depend on there only being one arg if !LeftToRight. 3826 assert(InitialArgSize + 1 == Args.size() && 3827 "The code below depends on only adding one arg per EmitCallArg"); 3828 (void)InitialArgSize; 3829 // Since pointer argument are never emitted as LValue, it is safe to emit 3830 // non-null argument check for r-value only. 3831 if (!Args.back().hasLValue()) { 3832 RValue RVArg = Args.back().getKnownRValue(); 3833 EmitNonNullArgCheck(RVArg, ArgTypes[Idx], (*Arg)->getExprLoc(), AC, 3834 ParamsToSkip + Idx); 3835 // @llvm.objectsize should never have side-effects and shouldn't need 3836 // destruction/cleanups, so we can safely "emit" it after its arg, 3837 // regardless of right-to-leftness 3838 MaybeEmitImplicitObjectSize(Idx, *Arg, RVArg); 3839 } 3840 } 3841 3842 if (!LeftToRight) { 3843 // Un-reverse the arguments we just evaluated so they match up with the LLVM 3844 // IR function. 3845 std::reverse(Args.begin() + CallArgsStart, Args.end()); 3846 } 3847 } 3848 3849 namespace { 3850 3851 struct DestroyUnpassedArg final : EHScopeStack::Cleanup { 3852 DestroyUnpassedArg(Address Addr, QualType Ty) 3853 : Addr(Addr), Ty(Ty) {} 3854 3855 Address Addr; 3856 QualType Ty; 3857 3858 void Emit(CodeGenFunction &CGF, Flags flags) override { 3859 QualType::DestructionKind DtorKind = Ty.isDestructedType(); 3860 if (DtorKind == QualType::DK_cxx_destructor) { 3861 const CXXDestructorDecl *Dtor = Ty->getAsCXXRecordDecl()->getDestructor(); 3862 assert(!Dtor->isTrivial()); 3863 CGF.EmitCXXDestructorCall(Dtor, Dtor_Complete, /*for vbase*/ false, 3864 /*Delegating=*/false, Addr, Ty); 3865 } else { 3866 CGF.callCStructDestructor(CGF.MakeAddrLValue(Addr, Ty)); 3867 } 3868 } 3869 }; 3870 3871 struct DisableDebugLocationUpdates { 3872 CodeGenFunction &CGF; 3873 bool disabledDebugInfo; 3874 DisableDebugLocationUpdates(CodeGenFunction &CGF, const Expr *E) : CGF(CGF) { 3875 if ((disabledDebugInfo = isa<CXXDefaultArgExpr>(E) && CGF.getDebugInfo())) 3876 CGF.disableDebugInfo(); 3877 } 3878 ~DisableDebugLocationUpdates() { 3879 if (disabledDebugInfo) 3880 CGF.enableDebugInfo(); 3881 } 3882 }; 3883 3884 } // end anonymous namespace 3885 3886 RValue CallArg::getRValue(CodeGenFunction &CGF) const { 3887 if (!HasLV) 3888 return RV; 3889 LValue Copy = CGF.MakeAddrLValue(CGF.CreateMemTemp(Ty), Ty); 3890 CGF.EmitAggregateCopy(Copy, LV, Ty, AggValueSlot::DoesNotOverlap, 3891 LV.isVolatile()); 3892 IsUsed = true; 3893 return RValue::getAggregate(Copy.getAddress(CGF)); 3894 } 3895 3896 void CallArg::copyInto(CodeGenFunction &CGF, Address Addr) const { 3897 LValue Dst = CGF.MakeAddrLValue(Addr, Ty); 3898 if (!HasLV && RV.isScalar()) 3899 CGF.EmitStoreOfScalar(RV.getScalarVal(), Dst, /*isInit=*/true); 3900 else if (!HasLV && RV.isComplex()) 3901 CGF.EmitStoreOfComplex(RV.getComplexVal(), Dst, /*init=*/true); 3902 else { 3903 auto Addr = HasLV ? LV.getAddress(CGF) : RV.getAggregateAddress(); 3904 LValue SrcLV = CGF.MakeAddrLValue(Addr, Ty); 3905 // We assume that call args are never copied into subobjects. 3906 CGF.EmitAggregateCopy(Dst, SrcLV, Ty, AggValueSlot::DoesNotOverlap, 3907 HasLV ? LV.isVolatileQualified() 3908 : RV.isVolatileQualified()); 3909 } 3910 IsUsed = true; 3911 } 3912 3913 void CodeGenFunction::EmitCallArg(CallArgList &args, const Expr *E, 3914 QualType type) { 3915 DisableDebugLocationUpdates Dis(*this, E); 3916 if (const ObjCIndirectCopyRestoreExpr *CRE 3917 = dyn_cast<ObjCIndirectCopyRestoreExpr>(E)) { 3918 assert(getLangOpts().ObjCAutoRefCount); 3919 return emitWritebackArg(*this, args, CRE); 3920 } 3921 3922 assert(type->isReferenceType() == E->isGLValue() && 3923 "reference binding to unmaterialized r-value!"); 3924 3925 if (E->isGLValue()) { 3926 assert(E->getObjectKind() == OK_Ordinary); 3927 return args.add(EmitReferenceBindingToExpr(E), type); 3928 } 3929 3930 bool HasAggregateEvalKind = hasAggregateEvaluationKind(type); 3931 3932 // In the Microsoft C++ ABI, aggregate arguments are destructed by the callee. 3933 // However, we still have to push an EH-only cleanup in case we unwind before 3934 // we make it to the call. 3935 if (HasAggregateEvalKind && 3936 type->castAs<RecordType>()->getDecl()->isParamDestroyedInCallee()) { 3937 // If we're using inalloca, use the argument memory. Otherwise, use a 3938 // temporary. 3939 AggValueSlot Slot; 3940 if (args.isUsingInAlloca()) 3941 Slot = createPlaceholderSlot(*this, type); 3942 else 3943 Slot = CreateAggTemp(type, "agg.tmp"); 3944 3945 bool DestroyedInCallee = true, NeedsEHCleanup = true; 3946 if (const auto *RD = type->getAsCXXRecordDecl()) 3947 DestroyedInCallee = RD->hasNonTrivialDestructor(); 3948 else 3949 NeedsEHCleanup = needsEHCleanup(type.isDestructedType()); 3950 3951 if (DestroyedInCallee) 3952 Slot.setExternallyDestructed(); 3953 3954 EmitAggExpr(E, Slot); 3955 RValue RV = Slot.asRValue(); 3956 args.add(RV, type); 3957 3958 if (DestroyedInCallee && NeedsEHCleanup) { 3959 // Create a no-op GEP between the placeholder and the cleanup so we can 3960 // RAUW it successfully. It also serves as a marker of the first 3961 // instruction where the cleanup is active. 3962 pushFullExprCleanup<DestroyUnpassedArg>(EHCleanup, Slot.getAddress(), 3963 type); 3964 // This unreachable is a temporary marker which will be removed later. 3965 llvm::Instruction *IsActive = Builder.CreateUnreachable(); 3966 args.addArgCleanupDeactivation(EHStack.getInnermostEHScope(), IsActive); 3967 } 3968 return; 3969 } 3970 3971 if (HasAggregateEvalKind && isa<ImplicitCastExpr>(E) && 3972 cast<CastExpr>(E)->getCastKind() == CK_LValueToRValue) { 3973 LValue L = EmitLValue(cast<CastExpr>(E)->getSubExpr()); 3974 assert(L.isSimple()); 3975 args.addUncopiedAggregate(L, type); 3976 return; 3977 } 3978 3979 args.add(EmitAnyExprToTemp(E), type); 3980 } 3981 3982 QualType CodeGenFunction::getVarArgType(const Expr *Arg) { 3983 // System headers on Windows define NULL to 0 instead of 0LL on Win64. MSVC 3984 // implicitly widens null pointer constants that are arguments to varargs 3985 // functions to pointer-sized ints. 3986 if (!getTarget().getTriple().isOSWindows()) 3987 return Arg->getType(); 3988 3989 if (Arg->getType()->isIntegerType() && 3990 getContext().getTypeSize(Arg->getType()) < 3991 getContext().getTargetInfo().getPointerWidth(0) && 3992 Arg->isNullPointerConstant(getContext(), 3993 Expr::NPC_ValueDependentIsNotNull)) { 3994 return getContext().getIntPtrType(); 3995 } 3996 3997 return Arg->getType(); 3998 } 3999 4000 // In ObjC ARC mode with no ObjC ARC exception safety, tell the ARC 4001 // optimizer it can aggressively ignore unwind edges. 4002 void 4003 CodeGenFunction::AddObjCARCExceptionMetadata(llvm::Instruction *Inst) { 4004 if (CGM.getCodeGenOpts().OptimizationLevel != 0 && 4005 !CGM.getCodeGenOpts().ObjCAutoRefCountExceptions) 4006 Inst->setMetadata("clang.arc.no_objc_arc_exceptions", 4007 CGM.getNoObjCARCExceptionsMetadata()); 4008 } 4009 4010 /// Emits a call to the given no-arguments nounwind runtime function. 4011 llvm::CallInst * 4012 CodeGenFunction::EmitNounwindRuntimeCall(llvm::FunctionCallee callee, 4013 const llvm::Twine &name) { 4014 return EmitNounwindRuntimeCall(callee, None, name); 4015 } 4016 4017 /// Emits a call to the given nounwind runtime function. 4018 llvm::CallInst * 4019 CodeGenFunction::EmitNounwindRuntimeCall(llvm::FunctionCallee callee, 4020 ArrayRef<llvm::Value *> args, 4021 const llvm::Twine &name) { 4022 llvm::CallInst *call = EmitRuntimeCall(callee, args, name); 4023 call->setDoesNotThrow(); 4024 return call; 4025 } 4026 4027 /// Emits a simple call (never an invoke) to the given no-arguments 4028 /// runtime function. 4029 llvm::CallInst *CodeGenFunction::EmitRuntimeCall(llvm::FunctionCallee callee, 4030 const llvm::Twine &name) { 4031 return EmitRuntimeCall(callee, None, name); 4032 } 4033 4034 // Calls which may throw must have operand bundles indicating which funclet 4035 // they are nested within. 4036 SmallVector<llvm::OperandBundleDef, 1> 4037 CodeGenFunction::getBundlesForFunclet(llvm::Value *Callee) { 4038 SmallVector<llvm::OperandBundleDef, 1> BundleList; 4039 // There is no need for a funclet operand bundle if we aren't inside a 4040 // funclet. 4041 if (!CurrentFuncletPad) 4042 return BundleList; 4043 4044 // Skip intrinsics which cannot throw. 4045 auto *CalleeFn = dyn_cast<llvm::Function>(Callee->stripPointerCasts()); 4046 if (CalleeFn && CalleeFn->isIntrinsic() && CalleeFn->doesNotThrow()) 4047 return BundleList; 4048 4049 BundleList.emplace_back("funclet", CurrentFuncletPad); 4050 return BundleList; 4051 } 4052 4053 /// Emits a simple call (never an invoke) to the given runtime function. 4054 llvm::CallInst *CodeGenFunction::EmitRuntimeCall(llvm::FunctionCallee callee, 4055 ArrayRef<llvm::Value *> args, 4056 const llvm::Twine &name) { 4057 llvm::CallInst *call = Builder.CreateCall( 4058 callee, args, getBundlesForFunclet(callee.getCallee()), name); 4059 call->setCallingConv(getRuntimeCC()); 4060 return call; 4061 } 4062 4063 /// Emits a call or invoke to the given noreturn runtime function. 4064 void CodeGenFunction::EmitNoreturnRuntimeCallOrInvoke( 4065 llvm::FunctionCallee callee, ArrayRef<llvm::Value *> args) { 4066 SmallVector<llvm::OperandBundleDef, 1> BundleList = 4067 getBundlesForFunclet(callee.getCallee()); 4068 4069 if (getInvokeDest()) { 4070 llvm::InvokeInst *invoke = 4071 Builder.CreateInvoke(callee, 4072 getUnreachableBlock(), 4073 getInvokeDest(), 4074 args, 4075 BundleList); 4076 invoke->setDoesNotReturn(); 4077 invoke->setCallingConv(getRuntimeCC()); 4078 } else { 4079 llvm::CallInst *call = Builder.CreateCall(callee, args, BundleList); 4080 call->setDoesNotReturn(); 4081 call->setCallingConv(getRuntimeCC()); 4082 Builder.CreateUnreachable(); 4083 } 4084 } 4085 4086 /// Emits a call or invoke instruction to the given nullary runtime function. 4087 llvm::CallBase * 4088 CodeGenFunction::EmitRuntimeCallOrInvoke(llvm::FunctionCallee callee, 4089 const Twine &name) { 4090 return EmitRuntimeCallOrInvoke(callee, None, name); 4091 } 4092 4093 /// Emits a call or invoke instruction to the given runtime function. 4094 llvm::CallBase * 4095 CodeGenFunction::EmitRuntimeCallOrInvoke(llvm::FunctionCallee callee, 4096 ArrayRef<llvm::Value *> args, 4097 const Twine &name) { 4098 llvm::CallBase *call = EmitCallOrInvoke(callee, args, name); 4099 call->setCallingConv(getRuntimeCC()); 4100 return call; 4101 } 4102 4103 /// Emits a call or invoke instruction to the given function, depending 4104 /// on the current state of the EH stack. 4105 llvm::CallBase *CodeGenFunction::EmitCallOrInvoke(llvm::FunctionCallee Callee, 4106 ArrayRef<llvm::Value *> Args, 4107 const Twine &Name) { 4108 llvm::BasicBlock *InvokeDest = getInvokeDest(); 4109 SmallVector<llvm::OperandBundleDef, 1> BundleList = 4110 getBundlesForFunclet(Callee.getCallee()); 4111 4112 llvm::CallBase *Inst; 4113 if (!InvokeDest) 4114 Inst = Builder.CreateCall(Callee, Args, BundleList, Name); 4115 else { 4116 llvm::BasicBlock *ContBB = createBasicBlock("invoke.cont"); 4117 Inst = Builder.CreateInvoke(Callee, ContBB, InvokeDest, Args, BundleList, 4118 Name); 4119 EmitBlock(ContBB); 4120 } 4121 4122 // In ObjC ARC mode with no ObjC ARC exception safety, tell the ARC 4123 // optimizer it can aggressively ignore unwind edges. 4124 if (CGM.getLangOpts().ObjCAutoRefCount) 4125 AddObjCARCExceptionMetadata(Inst); 4126 4127 return Inst; 4128 } 4129 4130 void CodeGenFunction::deferPlaceholderReplacement(llvm::Instruction *Old, 4131 llvm::Value *New) { 4132 DeferredReplacements.push_back(std::make_pair(Old, New)); 4133 } 4134 4135 namespace { 4136 4137 /// Specify given \p NewAlign as the alignment of return value attribute. If 4138 /// such attribute already exists, re-set it to the maximal one of two options. 4139 LLVM_NODISCARD llvm::AttributeList 4140 maybeRaiseRetAlignmentAttribute(llvm::LLVMContext &Ctx, 4141 const llvm::AttributeList &Attrs, 4142 llvm::Align NewAlign) { 4143 llvm::Align CurAlign = Attrs.getRetAlignment().valueOrOne(); 4144 if (CurAlign >= NewAlign) 4145 return Attrs; 4146 llvm::Attribute AlignAttr = llvm::Attribute::getWithAlignment(Ctx, NewAlign); 4147 return Attrs 4148 .removeAttribute(Ctx, llvm::AttributeList::ReturnIndex, 4149 llvm::Attribute::AttrKind::Alignment) 4150 .addAttribute(Ctx, llvm::AttributeList::ReturnIndex, AlignAttr); 4151 } 4152 4153 template <typename AlignedAttrTy> class AbstractAssumeAlignedAttrEmitter { 4154 protected: 4155 CodeGenFunction &CGF; 4156 4157 /// We do nothing if this is, or becomes, nullptr. 4158 const AlignedAttrTy *AA = nullptr; 4159 4160 llvm::Value *Alignment = nullptr; // May or may not be a constant. 4161 llvm::ConstantInt *OffsetCI = nullptr; // Constant, hopefully zero. 4162 4163 AbstractAssumeAlignedAttrEmitter(CodeGenFunction &CGF_, const Decl *FuncDecl) 4164 : CGF(CGF_) { 4165 if (!FuncDecl) 4166 return; 4167 AA = FuncDecl->getAttr<AlignedAttrTy>(); 4168 } 4169 4170 public: 4171 /// If we can, materialize the alignment as an attribute on return value. 4172 LLVM_NODISCARD llvm::AttributeList 4173 TryEmitAsCallSiteAttribute(const llvm::AttributeList &Attrs) { 4174 if (!AA || OffsetCI || CGF.SanOpts.has(SanitizerKind::Alignment)) 4175 return Attrs; 4176 const auto *AlignmentCI = dyn_cast<llvm::ConstantInt>(Alignment); 4177 if (!AlignmentCI) 4178 return Attrs; 4179 // We may legitimately have non-power-of-2 alignment here. 4180 // If so, this is UB land, emit it via `@llvm.assume` instead. 4181 if (!AlignmentCI->getValue().isPowerOf2()) 4182 return Attrs; 4183 llvm::AttributeList NewAttrs = maybeRaiseRetAlignmentAttribute( 4184 CGF.getLLVMContext(), Attrs, 4185 llvm::Align( 4186 AlignmentCI->getLimitedValue(llvm::Value::MaximumAlignment))); 4187 AA = nullptr; // We're done. Disallow doing anything else. 4188 return NewAttrs; 4189 } 4190 4191 /// Emit alignment assumption. 4192 /// This is a general fallback that we take if either there is an offset, 4193 /// or the alignment is variable or we are sanitizing for alignment. 4194 void EmitAsAnAssumption(SourceLocation Loc, QualType RetTy, RValue &Ret) { 4195 if (!AA) 4196 return; 4197 CGF.emitAlignmentAssumption(Ret.getScalarVal(), RetTy, Loc, 4198 AA->getLocation(), Alignment, OffsetCI); 4199 AA = nullptr; // We're done. Disallow doing anything else. 4200 } 4201 }; 4202 4203 /// Helper data structure to emit `AssumeAlignedAttr`. 4204 class AssumeAlignedAttrEmitter final 4205 : public AbstractAssumeAlignedAttrEmitter<AssumeAlignedAttr> { 4206 public: 4207 AssumeAlignedAttrEmitter(CodeGenFunction &CGF_, const Decl *FuncDecl) 4208 : AbstractAssumeAlignedAttrEmitter(CGF_, FuncDecl) { 4209 if (!AA) 4210 return; 4211 // It is guaranteed that the alignment/offset are constants. 4212 Alignment = cast<llvm::ConstantInt>(CGF.EmitScalarExpr(AA->getAlignment())); 4213 if (Expr *Offset = AA->getOffset()) { 4214 OffsetCI = cast<llvm::ConstantInt>(CGF.EmitScalarExpr(Offset)); 4215 if (OffsetCI->isNullValue()) // Canonicalize zero offset to no offset. 4216 OffsetCI = nullptr; 4217 } 4218 } 4219 }; 4220 4221 /// Helper data structure to emit `AllocAlignAttr`. 4222 class AllocAlignAttrEmitter final 4223 : public AbstractAssumeAlignedAttrEmitter<AllocAlignAttr> { 4224 public: 4225 AllocAlignAttrEmitter(CodeGenFunction &CGF_, const Decl *FuncDecl, 4226 const CallArgList &CallArgs) 4227 : AbstractAssumeAlignedAttrEmitter(CGF_, FuncDecl) { 4228 if (!AA) 4229 return; 4230 // Alignment may or may not be a constant, and that is okay. 4231 Alignment = CallArgs[AA->getParamIndex().getLLVMIndex()] 4232 .getRValue(CGF) 4233 .getScalarVal(); 4234 } 4235 }; 4236 4237 } // namespace 4238 4239 RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo, 4240 const CGCallee &Callee, 4241 ReturnValueSlot ReturnValue, 4242 const CallArgList &CallArgs, 4243 llvm::CallBase **callOrInvoke, 4244 SourceLocation Loc) { 4245 // FIXME: We no longer need the types from CallArgs; lift up and simplify. 4246 4247 assert(Callee.isOrdinary() || Callee.isVirtual()); 4248 4249 // Handle struct-return functions by passing a pointer to the 4250 // location that we would like to return into. 4251 QualType RetTy = CallInfo.getReturnType(); 4252 const ABIArgInfo &RetAI = CallInfo.getReturnInfo(); 4253 4254 llvm::FunctionType *IRFuncTy = getTypes().GetFunctionType(CallInfo); 4255 4256 const Decl *TargetDecl = Callee.getAbstractInfo().getCalleeDecl().getDecl(); 4257 if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(TargetDecl)) 4258 // We can only guarantee that a function is called from the correct 4259 // context/function based on the appropriate target attributes, 4260 // so only check in the case where we have both always_inline and target 4261 // since otherwise we could be making a conditional call after a check for 4262 // the proper cpu features (and it won't cause code generation issues due to 4263 // function based code generation). 4264 if (TargetDecl->hasAttr<AlwaysInlineAttr>() && 4265 TargetDecl->hasAttr<TargetAttr>()) 4266 checkTargetFeatures(Loc, FD); 4267 4268 #ifndef NDEBUG 4269 if (!(CallInfo.isVariadic() && CallInfo.getArgStruct())) { 4270 // For an inalloca varargs function, we don't expect CallInfo to match the 4271 // function pointer's type, because the inalloca struct a will have extra 4272 // fields in it for the varargs parameters. Code later in this function 4273 // bitcasts the function pointer to the type derived from CallInfo. 4274 // 4275 // In other cases, we assert that the types match up (until pointers stop 4276 // having pointee types). 4277 llvm::Type *TypeFromVal; 4278 if (Callee.isVirtual()) 4279 TypeFromVal = Callee.getVirtualFunctionType(); 4280 else 4281 TypeFromVal = 4282 Callee.getFunctionPointer()->getType()->getPointerElementType(); 4283 assert(IRFuncTy == TypeFromVal); 4284 } 4285 #endif 4286 4287 // 1. Set up the arguments. 4288 4289 // If we're using inalloca, insert the allocation after the stack save. 4290 // FIXME: Do this earlier rather than hacking it in here! 4291 Address ArgMemory = Address::invalid(); 4292 if (llvm::StructType *ArgStruct = CallInfo.getArgStruct()) { 4293 const llvm::DataLayout &DL = CGM.getDataLayout(); 4294 llvm::Instruction *IP = CallArgs.getStackBase(); 4295 llvm::AllocaInst *AI; 4296 if (IP) { 4297 IP = IP->getNextNode(); 4298 AI = new llvm::AllocaInst(ArgStruct, DL.getAllocaAddrSpace(), 4299 "argmem", IP); 4300 } else { 4301 AI = CreateTempAlloca(ArgStruct, "argmem"); 4302 } 4303 auto Align = CallInfo.getArgStructAlignment(); 4304 AI->setAlignment(Align.getAsAlign()); 4305 AI->setUsedWithInAlloca(true); 4306 assert(AI->isUsedWithInAlloca() && !AI->isStaticAlloca()); 4307 ArgMemory = Address(AI, Align); 4308 } 4309 4310 ClangToLLVMArgMapping IRFunctionArgs(CGM.getContext(), CallInfo); 4311 SmallVector<llvm::Value *, 16> IRCallArgs(IRFunctionArgs.totalIRArgs()); 4312 4313 // If the call returns a temporary with struct return, create a temporary 4314 // alloca to hold the result, unless one is given to us. 4315 Address SRetPtr = Address::invalid(); 4316 Address SRetAlloca = Address::invalid(); 4317 llvm::Value *UnusedReturnSizePtr = nullptr; 4318 if (RetAI.isIndirect() || RetAI.isInAlloca() || RetAI.isCoerceAndExpand()) { 4319 if (!ReturnValue.isNull()) { 4320 SRetPtr = ReturnValue.getValue(); 4321 } else { 4322 SRetPtr = CreateMemTemp(RetTy, "tmp", &SRetAlloca); 4323 if (HaveInsertPoint() && ReturnValue.isUnused()) { 4324 uint64_t size = 4325 CGM.getDataLayout().getTypeAllocSize(ConvertTypeForMem(RetTy)); 4326 UnusedReturnSizePtr = EmitLifetimeStart(size, SRetAlloca.getPointer()); 4327 } 4328 } 4329 if (IRFunctionArgs.hasSRetArg()) { 4330 IRCallArgs[IRFunctionArgs.getSRetArgNo()] = SRetPtr.getPointer(); 4331 } else if (RetAI.isInAlloca()) { 4332 Address Addr = 4333 Builder.CreateStructGEP(ArgMemory, RetAI.getInAllocaFieldIndex()); 4334 Builder.CreateStore(SRetPtr.getPointer(), Addr); 4335 } 4336 } 4337 4338 Address swiftErrorTemp = Address::invalid(); 4339 Address swiftErrorArg = Address::invalid(); 4340 4341 // When passing arguments using temporary allocas, we need to add the 4342 // appropriate lifetime markers. This vector keeps track of all the lifetime 4343 // markers that need to be ended right after the call. 4344 SmallVector<CallLifetimeEnd, 2> CallLifetimeEndAfterCall; 4345 4346 // Translate all of the arguments as necessary to match the IR lowering. 4347 assert(CallInfo.arg_size() == CallArgs.size() && 4348 "Mismatch between function signature & arguments."); 4349 unsigned ArgNo = 0; 4350 CGFunctionInfo::const_arg_iterator info_it = CallInfo.arg_begin(); 4351 for (CallArgList::const_iterator I = CallArgs.begin(), E = CallArgs.end(); 4352 I != E; ++I, ++info_it, ++ArgNo) { 4353 const ABIArgInfo &ArgInfo = info_it->info; 4354 4355 // Insert a padding argument to ensure proper alignment. 4356 if (IRFunctionArgs.hasPaddingArg(ArgNo)) 4357 IRCallArgs[IRFunctionArgs.getPaddingArgNo(ArgNo)] = 4358 llvm::UndefValue::get(ArgInfo.getPaddingType()); 4359 4360 unsigned FirstIRArg, NumIRArgs; 4361 std::tie(FirstIRArg, NumIRArgs) = IRFunctionArgs.getIRArgs(ArgNo); 4362 4363 switch (ArgInfo.getKind()) { 4364 case ABIArgInfo::InAlloca: { 4365 assert(NumIRArgs == 0); 4366 assert(getTarget().getTriple().getArch() == llvm::Triple::x86); 4367 if (I->isAggregate()) { 4368 Address Addr = I->hasLValue() 4369 ? I->getKnownLValue().getAddress(*this) 4370 : I->getKnownRValue().getAggregateAddress(); 4371 llvm::Instruction *Placeholder = 4372 cast<llvm::Instruction>(Addr.getPointer()); 4373 4374 if (!ArgInfo.getInAllocaIndirect()) { 4375 // Replace the placeholder with the appropriate argument slot GEP. 4376 CGBuilderTy::InsertPoint IP = Builder.saveIP(); 4377 Builder.SetInsertPoint(Placeholder); 4378 Addr = Builder.CreateStructGEP(ArgMemory, 4379 ArgInfo.getInAllocaFieldIndex()); 4380 Builder.restoreIP(IP); 4381 } else { 4382 // For indirect things such as overaligned structs, replace the 4383 // placeholder with a regular aggregate temporary alloca. Store the 4384 // address of this alloca into the struct. 4385 Addr = CreateMemTemp(info_it->type, "inalloca.indirect.tmp"); 4386 Address ArgSlot = Builder.CreateStructGEP( 4387 ArgMemory, ArgInfo.getInAllocaFieldIndex()); 4388 Builder.CreateStore(Addr.getPointer(), ArgSlot); 4389 } 4390 deferPlaceholderReplacement(Placeholder, Addr.getPointer()); 4391 } else if (ArgInfo.getInAllocaIndirect()) { 4392 // Make a temporary alloca and store the address of it into the argument 4393 // struct. 4394 Address Addr = CreateMemTempWithoutCast( 4395 I->Ty, getContext().getTypeAlignInChars(I->Ty), 4396 "indirect-arg-temp"); 4397 I->copyInto(*this, Addr); 4398 Address ArgSlot = 4399 Builder.CreateStructGEP(ArgMemory, ArgInfo.getInAllocaFieldIndex()); 4400 Builder.CreateStore(Addr.getPointer(), ArgSlot); 4401 } else { 4402 // Store the RValue into the argument struct. 4403 Address Addr = 4404 Builder.CreateStructGEP(ArgMemory, ArgInfo.getInAllocaFieldIndex()); 4405 unsigned AS = Addr.getType()->getPointerAddressSpace(); 4406 llvm::Type *MemType = ConvertTypeForMem(I->Ty)->getPointerTo(AS); 4407 // There are some cases where a trivial bitcast is not avoidable. The 4408 // definition of a type later in a translation unit may change it's type 4409 // from {}* to (%struct.foo*)*. 4410 if (Addr.getType() != MemType) 4411 Addr = Builder.CreateBitCast(Addr, MemType); 4412 I->copyInto(*this, Addr); 4413 } 4414 break; 4415 } 4416 4417 case ABIArgInfo::Indirect: { 4418 assert(NumIRArgs == 1); 4419 if (!I->isAggregate()) { 4420 // Make a temporary alloca to pass the argument. 4421 Address Addr = CreateMemTempWithoutCast( 4422 I->Ty, ArgInfo.getIndirectAlign(), "indirect-arg-temp"); 4423 IRCallArgs[FirstIRArg] = Addr.getPointer(); 4424 4425 I->copyInto(*this, Addr); 4426 } else { 4427 // We want to avoid creating an unnecessary temporary+copy here; 4428 // however, we need one in three cases: 4429 // 1. If the argument is not byval, and we are required to copy the 4430 // source. (This case doesn't occur on any common architecture.) 4431 // 2. If the argument is byval, RV is not sufficiently aligned, and 4432 // we cannot force it to be sufficiently aligned. 4433 // 3. If the argument is byval, but RV is not located in default 4434 // or alloca address space. 4435 Address Addr = I->hasLValue() 4436 ? I->getKnownLValue().getAddress(*this) 4437 : I->getKnownRValue().getAggregateAddress(); 4438 llvm::Value *V = Addr.getPointer(); 4439 CharUnits Align = ArgInfo.getIndirectAlign(); 4440 const llvm::DataLayout *TD = &CGM.getDataLayout(); 4441 4442 assert((FirstIRArg >= IRFuncTy->getNumParams() || 4443 IRFuncTy->getParamType(FirstIRArg)->getPointerAddressSpace() == 4444 TD->getAllocaAddrSpace()) && 4445 "indirect argument must be in alloca address space"); 4446 4447 bool NeedCopy = false; 4448 4449 if (Addr.getAlignment() < Align && 4450 llvm::getOrEnforceKnownAlignment(V, Align.getAsAlign(), *TD) < 4451 Align.getAsAlign()) { 4452 NeedCopy = true; 4453 } else if (I->hasLValue()) { 4454 auto LV = I->getKnownLValue(); 4455 auto AS = LV.getAddressSpace(); 4456 4457 if (!ArgInfo.getIndirectByVal() || 4458 (LV.getAlignment() < getContext().getTypeAlignInChars(I->Ty))) { 4459 NeedCopy = true; 4460 } 4461 if (!getLangOpts().OpenCL) { 4462 if ((ArgInfo.getIndirectByVal() && 4463 (AS != LangAS::Default && 4464 AS != CGM.getASTAllocaAddressSpace()))) { 4465 NeedCopy = true; 4466 } 4467 } 4468 // For OpenCL even if RV is located in default or alloca address space 4469 // we don't want to perform address space cast for it. 4470 else if ((ArgInfo.getIndirectByVal() && 4471 Addr.getType()->getAddressSpace() != IRFuncTy-> 4472 getParamType(FirstIRArg)->getPointerAddressSpace())) { 4473 NeedCopy = true; 4474 } 4475 } 4476 4477 if (NeedCopy) { 4478 // Create an aligned temporary, and copy to it. 4479 Address AI = CreateMemTempWithoutCast( 4480 I->Ty, ArgInfo.getIndirectAlign(), "byval-temp"); 4481 IRCallArgs[FirstIRArg] = AI.getPointer(); 4482 4483 // Emit lifetime markers for the temporary alloca. 4484 uint64_t ByvalTempElementSize = 4485 CGM.getDataLayout().getTypeAllocSize(AI.getElementType()); 4486 llvm::Value *LifetimeSize = 4487 EmitLifetimeStart(ByvalTempElementSize, AI.getPointer()); 4488 4489 // Add cleanup code to emit the end lifetime marker after the call. 4490 if (LifetimeSize) // In case we disabled lifetime markers. 4491 CallLifetimeEndAfterCall.emplace_back(AI, LifetimeSize); 4492 4493 // Generate the copy. 4494 I->copyInto(*this, AI); 4495 } else { 4496 // Skip the extra memcpy call. 4497 auto *T = V->getType()->getPointerElementType()->getPointerTo( 4498 CGM.getDataLayout().getAllocaAddrSpace()); 4499 IRCallArgs[FirstIRArg] = getTargetHooks().performAddrSpaceCast( 4500 *this, V, LangAS::Default, CGM.getASTAllocaAddressSpace(), T, 4501 true); 4502 } 4503 } 4504 break; 4505 } 4506 4507 case ABIArgInfo::Ignore: 4508 assert(NumIRArgs == 0); 4509 break; 4510 4511 case ABIArgInfo::Extend: 4512 case ABIArgInfo::Direct: { 4513 if (!isa<llvm::StructType>(ArgInfo.getCoerceToType()) && 4514 ArgInfo.getCoerceToType() == ConvertType(info_it->type) && 4515 ArgInfo.getDirectOffset() == 0) { 4516 assert(NumIRArgs == 1); 4517 llvm::Value *V; 4518 if (!I->isAggregate()) 4519 V = I->getKnownRValue().getScalarVal(); 4520 else 4521 V = Builder.CreateLoad( 4522 I->hasLValue() ? I->getKnownLValue().getAddress(*this) 4523 : I->getKnownRValue().getAggregateAddress()); 4524 4525 // Implement swifterror by copying into a new swifterror argument. 4526 // We'll write back in the normal path out of the call. 4527 if (CallInfo.getExtParameterInfo(ArgNo).getABI() 4528 == ParameterABI::SwiftErrorResult) { 4529 assert(!swiftErrorTemp.isValid() && "multiple swifterror args"); 4530 4531 QualType pointeeTy = I->Ty->getPointeeType(); 4532 swiftErrorArg = 4533 Address(V, getContext().getTypeAlignInChars(pointeeTy)); 4534 4535 swiftErrorTemp = 4536 CreateMemTemp(pointeeTy, getPointerAlign(), "swifterror.temp"); 4537 V = swiftErrorTemp.getPointer(); 4538 cast<llvm::AllocaInst>(V)->setSwiftError(true); 4539 4540 llvm::Value *errorValue = Builder.CreateLoad(swiftErrorArg); 4541 Builder.CreateStore(errorValue, swiftErrorTemp); 4542 } 4543 4544 // We might have to widen integers, but we should never truncate. 4545 if (ArgInfo.getCoerceToType() != V->getType() && 4546 V->getType()->isIntegerTy()) 4547 V = Builder.CreateZExt(V, ArgInfo.getCoerceToType()); 4548 4549 // If the argument doesn't match, perform a bitcast to coerce it. This 4550 // can happen due to trivial type mismatches. 4551 if (FirstIRArg < IRFuncTy->getNumParams() && 4552 V->getType() != IRFuncTy->getParamType(FirstIRArg)) 4553 V = Builder.CreateBitCast(V, IRFuncTy->getParamType(FirstIRArg)); 4554 4555 IRCallArgs[FirstIRArg] = V; 4556 break; 4557 } 4558 4559 // FIXME: Avoid the conversion through memory if possible. 4560 Address Src = Address::invalid(); 4561 if (!I->isAggregate()) { 4562 Src = CreateMemTemp(I->Ty, "coerce"); 4563 I->copyInto(*this, Src); 4564 } else { 4565 Src = I->hasLValue() ? I->getKnownLValue().getAddress(*this) 4566 : I->getKnownRValue().getAggregateAddress(); 4567 } 4568 4569 // If the value is offset in memory, apply the offset now. 4570 Src = emitAddressAtOffset(*this, Src, ArgInfo); 4571 4572 // Fast-isel and the optimizer generally like scalar values better than 4573 // FCAs, so we flatten them if this is safe to do for this argument. 4574 llvm::StructType *STy = 4575 dyn_cast<llvm::StructType>(ArgInfo.getCoerceToType()); 4576 if (STy && ArgInfo.isDirect() && ArgInfo.getCanBeFlattened()) { 4577 llvm::Type *SrcTy = Src.getElementType(); 4578 uint64_t SrcSize = CGM.getDataLayout().getTypeAllocSize(SrcTy); 4579 uint64_t DstSize = CGM.getDataLayout().getTypeAllocSize(STy); 4580 4581 // If the source type is smaller than the destination type of the 4582 // coerce-to logic, copy the source value into a temp alloca the size 4583 // of the destination type to allow loading all of it. The bits past 4584 // the source value are left undef. 4585 if (SrcSize < DstSize) { 4586 Address TempAlloca 4587 = CreateTempAlloca(STy, Src.getAlignment(), 4588 Src.getName() + ".coerce"); 4589 Builder.CreateMemCpy(TempAlloca, Src, SrcSize); 4590 Src = TempAlloca; 4591 } else { 4592 Src = Builder.CreateBitCast(Src, 4593 STy->getPointerTo(Src.getAddressSpace())); 4594 } 4595 4596 assert(NumIRArgs == STy->getNumElements()); 4597 for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) { 4598 Address EltPtr = Builder.CreateStructGEP(Src, i); 4599 llvm::Value *LI = Builder.CreateLoad(EltPtr); 4600 IRCallArgs[FirstIRArg + i] = LI; 4601 } 4602 } else { 4603 // In the simple case, just pass the coerced loaded value. 4604 assert(NumIRArgs == 1); 4605 llvm::Value *Load = 4606 CreateCoercedLoad(Src, ArgInfo.getCoerceToType(), *this); 4607 4608 if (CallInfo.isCmseNSCall()) { 4609 // For certain parameter types, clear padding bits, as they may reveal 4610 // sensitive information. 4611 const Type *PTy = I->Ty.getCanonicalType().getTypePtr(); 4612 // 16-bit floating-point types are passed in a 32-bit integer or 4613 // float, with unspecified upper bits. 4614 if (PTy->isFloat16Type() || PTy->isHalfType()) { 4615 Load = EmitCMSEClearFP16(Load); 4616 } else { 4617 // Small struct/union types are passed as integer arrays. 4618 auto *ATy = dyn_cast<llvm::ArrayType>(Load->getType()); 4619 if (ATy != nullptr && isa<RecordType>(I->Ty.getCanonicalType())) 4620 Load = EmitCMSEClearRecord(Load, ATy, I->Ty); 4621 } 4622 } 4623 IRCallArgs[FirstIRArg] = Load; 4624 } 4625 4626 break; 4627 } 4628 4629 case ABIArgInfo::CoerceAndExpand: { 4630 auto coercionType = ArgInfo.getCoerceAndExpandType(); 4631 auto layout = CGM.getDataLayout().getStructLayout(coercionType); 4632 4633 llvm::Value *tempSize = nullptr; 4634 Address addr = Address::invalid(); 4635 Address AllocaAddr = Address::invalid(); 4636 if (I->isAggregate()) { 4637 addr = I->hasLValue() ? I->getKnownLValue().getAddress(*this) 4638 : I->getKnownRValue().getAggregateAddress(); 4639 4640 } else { 4641 RValue RV = I->getKnownRValue(); 4642 assert(RV.isScalar()); // complex should always just be direct 4643 4644 llvm::Type *scalarType = RV.getScalarVal()->getType(); 4645 auto scalarSize = CGM.getDataLayout().getTypeAllocSize(scalarType); 4646 auto scalarAlign = CGM.getDataLayout().getPrefTypeAlignment(scalarType); 4647 4648 // Materialize to a temporary. 4649 addr = CreateTempAlloca( 4650 RV.getScalarVal()->getType(), 4651 CharUnits::fromQuantity(std::max( 4652 (unsigned)layout->getAlignment().value(), scalarAlign)), 4653 "tmp", 4654 /*ArraySize=*/nullptr, &AllocaAddr); 4655 tempSize = EmitLifetimeStart(scalarSize, AllocaAddr.getPointer()); 4656 4657 Builder.CreateStore(RV.getScalarVal(), addr); 4658 } 4659 4660 addr = Builder.CreateElementBitCast(addr, coercionType); 4661 4662 unsigned IRArgPos = FirstIRArg; 4663 for (unsigned i = 0, e = coercionType->getNumElements(); i != e; ++i) { 4664 llvm::Type *eltType = coercionType->getElementType(i); 4665 if (ABIArgInfo::isPaddingForCoerceAndExpand(eltType)) continue; 4666 Address eltAddr = Builder.CreateStructGEP(addr, i); 4667 llvm::Value *elt = Builder.CreateLoad(eltAddr); 4668 IRCallArgs[IRArgPos++] = elt; 4669 } 4670 assert(IRArgPos == FirstIRArg + NumIRArgs); 4671 4672 if (tempSize) { 4673 EmitLifetimeEnd(tempSize, AllocaAddr.getPointer()); 4674 } 4675 4676 break; 4677 } 4678 4679 case ABIArgInfo::Expand: 4680 unsigned IRArgPos = FirstIRArg; 4681 ExpandTypeToArgs(I->Ty, *I, IRFuncTy, IRCallArgs, IRArgPos); 4682 assert(IRArgPos == FirstIRArg + NumIRArgs); 4683 break; 4684 } 4685 } 4686 4687 const CGCallee &ConcreteCallee = Callee.prepareConcreteCallee(*this); 4688 llvm::Value *CalleePtr = ConcreteCallee.getFunctionPointer(); 4689 4690 // If we're using inalloca, set up that argument. 4691 if (ArgMemory.isValid()) { 4692 llvm::Value *Arg = ArgMemory.getPointer(); 4693 if (CallInfo.isVariadic()) { 4694 // When passing non-POD arguments by value to variadic functions, we will 4695 // end up with a variadic prototype and an inalloca call site. In such 4696 // cases, we can't do any parameter mismatch checks. Give up and bitcast 4697 // the callee. 4698 unsigned CalleeAS = CalleePtr->getType()->getPointerAddressSpace(); 4699 CalleePtr = 4700 Builder.CreateBitCast(CalleePtr, IRFuncTy->getPointerTo(CalleeAS)); 4701 } else { 4702 llvm::Type *LastParamTy = 4703 IRFuncTy->getParamType(IRFuncTy->getNumParams() - 1); 4704 if (Arg->getType() != LastParamTy) { 4705 #ifndef NDEBUG 4706 // Assert that these structs have equivalent element types. 4707 llvm::StructType *FullTy = CallInfo.getArgStruct(); 4708 llvm::StructType *DeclaredTy = cast<llvm::StructType>( 4709 cast<llvm::PointerType>(LastParamTy)->getElementType()); 4710 assert(DeclaredTy->getNumElements() == FullTy->getNumElements()); 4711 for (llvm::StructType::element_iterator DI = DeclaredTy->element_begin(), 4712 DE = DeclaredTy->element_end(), 4713 FI = FullTy->element_begin(); 4714 DI != DE; ++DI, ++FI) 4715 assert(*DI == *FI); 4716 #endif 4717 Arg = Builder.CreateBitCast(Arg, LastParamTy); 4718 } 4719 } 4720 assert(IRFunctionArgs.hasInallocaArg()); 4721 IRCallArgs[IRFunctionArgs.getInallocaArgNo()] = Arg; 4722 } 4723 4724 // 2. Prepare the function pointer. 4725 4726 // If the callee is a bitcast of a non-variadic function to have a 4727 // variadic function pointer type, check to see if we can remove the 4728 // bitcast. This comes up with unprototyped functions. 4729 // 4730 // This makes the IR nicer, but more importantly it ensures that we 4731 // can inline the function at -O0 if it is marked always_inline. 4732 auto simplifyVariadicCallee = [](llvm::FunctionType *CalleeFT, 4733 llvm::Value *Ptr) -> llvm::Function * { 4734 if (!CalleeFT->isVarArg()) 4735 return nullptr; 4736 4737 // Get underlying value if it's a bitcast 4738 if (llvm::ConstantExpr *CE = dyn_cast<llvm::ConstantExpr>(Ptr)) { 4739 if (CE->getOpcode() == llvm::Instruction::BitCast) 4740 Ptr = CE->getOperand(0); 4741 } 4742 4743 llvm::Function *OrigFn = dyn_cast<llvm::Function>(Ptr); 4744 if (!OrigFn) 4745 return nullptr; 4746 4747 llvm::FunctionType *OrigFT = OrigFn->getFunctionType(); 4748 4749 // If the original type is variadic, or if any of the component types 4750 // disagree, we cannot remove the cast. 4751 if (OrigFT->isVarArg() || 4752 OrigFT->getNumParams() != CalleeFT->getNumParams() || 4753 OrigFT->getReturnType() != CalleeFT->getReturnType()) 4754 return nullptr; 4755 4756 for (unsigned i = 0, e = OrigFT->getNumParams(); i != e; ++i) 4757 if (OrigFT->getParamType(i) != CalleeFT->getParamType(i)) 4758 return nullptr; 4759 4760 return OrigFn; 4761 }; 4762 4763 if (llvm::Function *OrigFn = simplifyVariadicCallee(IRFuncTy, CalleePtr)) { 4764 CalleePtr = OrigFn; 4765 IRFuncTy = OrigFn->getFunctionType(); 4766 } 4767 4768 // 3. Perform the actual call. 4769 4770 // Deactivate any cleanups that we're supposed to do immediately before 4771 // the call. 4772 if (!CallArgs.getCleanupsToDeactivate().empty()) 4773 deactivateArgCleanupsBeforeCall(*this, CallArgs); 4774 4775 // Assert that the arguments we computed match up. The IR verifier 4776 // will catch this, but this is a common enough source of problems 4777 // during IRGen changes that it's way better for debugging to catch 4778 // it ourselves here. 4779 #ifndef NDEBUG 4780 assert(IRCallArgs.size() == IRFuncTy->getNumParams() || IRFuncTy->isVarArg()); 4781 for (unsigned i = 0; i < IRCallArgs.size(); ++i) { 4782 // Inalloca argument can have different type. 4783 if (IRFunctionArgs.hasInallocaArg() && 4784 i == IRFunctionArgs.getInallocaArgNo()) 4785 continue; 4786 if (i < IRFuncTy->getNumParams()) 4787 assert(IRCallArgs[i]->getType() == IRFuncTy->getParamType(i)); 4788 } 4789 #endif 4790 4791 // Update the largest vector width if any arguments have vector types. 4792 for (unsigned i = 0; i < IRCallArgs.size(); ++i) { 4793 if (auto *VT = dyn_cast<llvm::VectorType>(IRCallArgs[i]->getType())) 4794 LargestVectorWidth = 4795 std::max((uint64_t)LargestVectorWidth, 4796 VT->getPrimitiveSizeInBits().getKnownMinSize()); 4797 } 4798 4799 // Compute the calling convention and attributes. 4800 unsigned CallingConv; 4801 llvm::AttributeList Attrs; 4802 CGM.ConstructAttributeList(CalleePtr->getName(), CallInfo, 4803 Callee.getAbstractInfo(), Attrs, CallingConv, 4804 /*AttrOnCallSite=*/true); 4805 4806 if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(CurFuncDecl)) 4807 if (FD->usesFPIntrin()) 4808 // All calls within a strictfp function are marked strictfp 4809 Attrs = 4810 Attrs.addAttribute(getLLVMContext(), llvm::AttributeList::FunctionIndex, 4811 llvm::Attribute::StrictFP); 4812 4813 // Apply some call-site-specific attributes. 4814 // TODO: work this into building the attribute set. 4815 4816 // Apply always_inline to all calls within flatten functions. 4817 // FIXME: should this really take priority over __try, below? 4818 if (CurCodeDecl && CurCodeDecl->hasAttr<FlattenAttr>() && 4819 !(TargetDecl && TargetDecl->hasAttr<NoInlineAttr>())) { 4820 Attrs = 4821 Attrs.addAttribute(getLLVMContext(), llvm::AttributeList::FunctionIndex, 4822 llvm::Attribute::AlwaysInline); 4823 } 4824 4825 // Disable inlining inside SEH __try blocks. 4826 if (isSEHTryScope()) { 4827 Attrs = 4828 Attrs.addAttribute(getLLVMContext(), llvm::AttributeList::FunctionIndex, 4829 llvm::Attribute::NoInline); 4830 } 4831 4832 // Decide whether to use a call or an invoke. 4833 bool CannotThrow; 4834 if (currentFunctionUsesSEHTry()) { 4835 // SEH cares about asynchronous exceptions, so everything can "throw." 4836 CannotThrow = false; 4837 } else if (isCleanupPadScope() && 4838 EHPersonality::get(*this).isMSVCXXPersonality()) { 4839 // The MSVC++ personality will implicitly terminate the program if an 4840 // exception is thrown during a cleanup outside of a try/catch. 4841 // We don't need to model anything in IR to get this behavior. 4842 CannotThrow = true; 4843 } else { 4844 // Otherwise, nounwind call sites will never throw. 4845 CannotThrow = Attrs.hasAttribute(llvm::AttributeList::FunctionIndex, 4846 llvm::Attribute::NoUnwind); 4847 } 4848 4849 // If we made a temporary, be sure to clean up after ourselves. Note that we 4850 // can't depend on being inside of an ExprWithCleanups, so we need to manually 4851 // pop this cleanup later on. Being eager about this is OK, since this 4852 // temporary is 'invisible' outside of the callee. 4853 if (UnusedReturnSizePtr) 4854 pushFullExprCleanup<CallLifetimeEnd>(NormalEHLifetimeMarker, SRetAlloca, 4855 UnusedReturnSizePtr); 4856 4857 llvm::BasicBlock *InvokeDest = CannotThrow ? nullptr : getInvokeDest(); 4858 4859 SmallVector<llvm::OperandBundleDef, 1> BundleList = 4860 getBundlesForFunclet(CalleePtr); 4861 4862 if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(CurFuncDecl)) 4863 if (FD->usesFPIntrin()) 4864 // All calls within a strictfp function are marked strictfp 4865 Attrs = 4866 Attrs.addAttribute(getLLVMContext(), llvm::AttributeList::FunctionIndex, 4867 llvm::Attribute::StrictFP); 4868 4869 AssumeAlignedAttrEmitter AssumeAlignedAttrEmitter(*this, TargetDecl); 4870 Attrs = AssumeAlignedAttrEmitter.TryEmitAsCallSiteAttribute(Attrs); 4871 4872 AllocAlignAttrEmitter AllocAlignAttrEmitter(*this, TargetDecl, CallArgs); 4873 Attrs = AllocAlignAttrEmitter.TryEmitAsCallSiteAttribute(Attrs); 4874 4875 // Emit the actual call/invoke instruction. 4876 llvm::CallBase *CI; 4877 if (!InvokeDest) { 4878 CI = Builder.CreateCall(IRFuncTy, CalleePtr, IRCallArgs, BundleList); 4879 } else { 4880 llvm::BasicBlock *Cont = createBasicBlock("invoke.cont"); 4881 CI = Builder.CreateInvoke(IRFuncTy, CalleePtr, Cont, InvokeDest, IRCallArgs, 4882 BundleList); 4883 EmitBlock(Cont); 4884 } 4885 if (callOrInvoke) 4886 *callOrInvoke = CI; 4887 4888 // If this is within a function that has the guard(nocf) attribute and is an 4889 // indirect call, add the "guard_nocf" attribute to this call to indicate that 4890 // Control Flow Guard checks should not be added, even if the call is inlined. 4891 if (const auto *FD = dyn_cast_or_null<FunctionDecl>(CurFuncDecl)) { 4892 if (const auto *A = FD->getAttr<CFGuardAttr>()) { 4893 if (A->getGuard() == CFGuardAttr::GuardArg::nocf && !CI->getCalledFunction()) 4894 Attrs = Attrs.addAttribute( 4895 getLLVMContext(), llvm::AttributeList::FunctionIndex, "guard_nocf"); 4896 } 4897 } 4898 4899 // Apply the attributes and calling convention. 4900 CI->setAttributes(Attrs); 4901 CI->setCallingConv(static_cast<llvm::CallingConv::ID>(CallingConv)); 4902 4903 // Apply various metadata. 4904 4905 if (!CI->getType()->isVoidTy()) 4906 CI->setName("call"); 4907 4908 // Update largest vector width from the return type. 4909 if (auto *VT = dyn_cast<llvm::VectorType>(CI->getType())) 4910 LargestVectorWidth = 4911 std::max((uint64_t)LargestVectorWidth, 4912 VT->getPrimitiveSizeInBits().getKnownMinSize()); 4913 4914 // Insert instrumentation or attach profile metadata at indirect call sites. 4915 // For more details, see the comment before the definition of 4916 // IPVK_IndirectCallTarget in InstrProfData.inc. 4917 if (!CI->getCalledFunction()) 4918 PGO.valueProfile(Builder, llvm::IPVK_IndirectCallTarget, 4919 CI, CalleePtr); 4920 4921 // In ObjC ARC mode with no ObjC ARC exception safety, tell the ARC 4922 // optimizer it can aggressively ignore unwind edges. 4923 if (CGM.getLangOpts().ObjCAutoRefCount) 4924 AddObjCARCExceptionMetadata(CI); 4925 4926 // Suppress tail calls if requested. 4927 if (llvm::CallInst *Call = dyn_cast<llvm::CallInst>(CI)) { 4928 if (TargetDecl && TargetDecl->hasAttr<NotTailCalledAttr>()) 4929 Call->setTailCallKind(llvm::CallInst::TCK_NoTail); 4930 } 4931 4932 // Add metadata for calls to MSAllocator functions 4933 if (getDebugInfo() && TargetDecl && 4934 TargetDecl->hasAttr<MSAllocatorAttr>()) 4935 getDebugInfo()->addHeapAllocSiteMetadata(CI, RetTy, Loc); 4936 4937 // 4. Finish the call. 4938 4939 // If the call doesn't return, finish the basic block and clear the 4940 // insertion point; this allows the rest of IRGen to discard 4941 // unreachable code. 4942 if (CI->doesNotReturn()) { 4943 if (UnusedReturnSizePtr) 4944 PopCleanupBlock(); 4945 4946 // Strip away the noreturn attribute to better diagnose unreachable UB. 4947 if (SanOpts.has(SanitizerKind::Unreachable)) { 4948 // Also remove from function since CallBase::hasFnAttr additionally checks 4949 // attributes of the called function. 4950 if (auto *F = CI->getCalledFunction()) 4951 F->removeFnAttr(llvm::Attribute::NoReturn); 4952 CI->removeAttribute(llvm::AttributeList::FunctionIndex, 4953 llvm::Attribute::NoReturn); 4954 4955 // Avoid incompatibility with ASan which relies on the `noreturn` 4956 // attribute to insert handler calls. 4957 if (SanOpts.hasOneOf(SanitizerKind::Address | 4958 SanitizerKind::KernelAddress)) { 4959 SanitizerScope SanScope(this); 4960 llvm::IRBuilder<>::InsertPointGuard IPGuard(Builder); 4961 Builder.SetInsertPoint(CI); 4962 auto *FnType = llvm::FunctionType::get(CGM.VoidTy, /*isVarArg=*/false); 4963 llvm::FunctionCallee Fn = 4964 CGM.CreateRuntimeFunction(FnType, "__asan_handle_no_return"); 4965 EmitNounwindRuntimeCall(Fn); 4966 } 4967 } 4968 4969 EmitUnreachable(Loc); 4970 Builder.ClearInsertionPoint(); 4971 4972 // FIXME: For now, emit a dummy basic block because expr emitters in 4973 // generally are not ready to handle emitting expressions at unreachable 4974 // points. 4975 EnsureInsertPoint(); 4976 4977 // Return a reasonable RValue. 4978 return GetUndefRValue(RetTy); 4979 } 4980 4981 // Perform the swifterror writeback. 4982 if (swiftErrorTemp.isValid()) { 4983 llvm::Value *errorResult = Builder.CreateLoad(swiftErrorTemp); 4984 Builder.CreateStore(errorResult, swiftErrorArg); 4985 } 4986 4987 // Emit any call-associated writebacks immediately. Arguably this 4988 // should happen after any return-value munging. 4989 if (CallArgs.hasWritebacks()) 4990 emitWritebacks(*this, CallArgs); 4991 4992 // The stack cleanup for inalloca arguments has to run out of the normal 4993 // lexical order, so deactivate it and run it manually here. 4994 CallArgs.freeArgumentMemory(*this); 4995 4996 // Extract the return value. 4997 RValue Ret = [&] { 4998 switch (RetAI.getKind()) { 4999 case ABIArgInfo::CoerceAndExpand: { 5000 auto coercionType = RetAI.getCoerceAndExpandType(); 5001 5002 Address addr = SRetPtr; 5003 addr = Builder.CreateElementBitCast(addr, coercionType); 5004 5005 assert(CI->getType() == RetAI.getUnpaddedCoerceAndExpandType()); 5006 bool requiresExtract = isa<llvm::StructType>(CI->getType()); 5007 5008 unsigned unpaddedIndex = 0; 5009 for (unsigned i = 0, e = coercionType->getNumElements(); i != e; ++i) { 5010 llvm::Type *eltType = coercionType->getElementType(i); 5011 if (ABIArgInfo::isPaddingForCoerceAndExpand(eltType)) continue; 5012 Address eltAddr = Builder.CreateStructGEP(addr, i); 5013 llvm::Value *elt = CI; 5014 if (requiresExtract) 5015 elt = Builder.CreateExtractValue(elt, unpaddedIndex++); 5016 else 5017 assert(unpaddedIndex == 0); 5018 Builder.CreateStore(elt, eltAddr); 5019 } 5020 // FALLTHROUGH 5021 LLVM_FALLTHROUGH; 5022 } 5023 5024 case ABIArgInfo::InAlloca: 5025 case ABIArgInfo::Indirect: { 5026 RValue ret = convertTempToRValue(SRetPtr, RetTy, SourceLocation()); 5027 if (UnusedReturnSizePtr) 5028 PopCleanupBlock(); 5029 return ret; 5030 } 5031 5032 case ABIArgInfo::Ignore: 5033 // If we are ignoring an argument that had a result, make sure to 5034 // construct the appropriate return value for our caller. 5035 return GetUndefRValue(RetTy); 5036 5037 case ABIArgInfo::Extend: 5038 case ABIArgInfo::Direct: { 5039 llvm::Type *RetIRTy = ConvertType(RetTy); 5040 if (RetAI.getCoerceToType() == RetIRTy && RetAI.getDirectOffset() == 0) { 5041 switch (getEvaluationKind(RetTy)) { 5042 case TEK_Complex: { 5043 llvm::Value *Real = Builder.CreateExtractValue(CI, 0); 5044 llvm::Value *Imag = Builder.CreateExtractValue(CI, 1); 5045 return RValue::getComplex(std::make_pair(Real, Imag)); 5046 } 5047 case TEK_Aggregate: { 5048 Address DestPtr = ReturnValue.getValue(); 5049 bool DestIsVolatile = ReturnValue.isVolatile(); 5050 5051 if (!DestPtr.isValid()) { 5052 DestPtr = CreateMemTemp(RetTy, "agg.tmp"); 5053 DestIsVolatile = false; 5054 } 5055 BuildAggStore(*this, CI, DestPtr, DestIsVolatile); 5056 return RValue::getAggregate(DestPtr); 5057 } 5058 case TEK_Scalar: { 5059 // If the argument doesn't match, perform a bitcast to coerce it. This 5060 // can happen due to trivial type mismatches. 5061 llvm::Value *V = CI; 5062 if (V->getType() != RetIRTy) 5063 V = Builder.CreateBitCast(V, RetIRTy); 5064 return RValue::get(V); 5065 } 5066 } 5067 llvm_unreachable("bad evaluation kind"); 5068 } 5069 5070 Address DestPtr = ReturnValue.getValue(); 5071 bool DestIsVolatile = ReturnValue.isVolatile(); 5072 5073 if (!DestPtr.isValid()) { 5074 DestPtr = CreateMemTemp(RetTy, "coerce"); 5075 DestIsVolatile = false; 5076 } 5077 5078 // If the value is offset in memory, apply the offset now. 5079 Address StorePtr = emitAddressAtOffset(*this, DestPtr, RetAI); 5080 CreateCoercedStore(CI, StorePtr, DestIsVolatile, *this); 5081 5082 return convertTempToRValue(DestPtr, RetTy, SourceLocation()); 5083 } 5084 5085 case ABIArgInfo::Expand: 5086 llvm_unreachable("Invalid ABI kind for return argument"); 5087 } 5088 5089 llvm_unreachable("Unhandled ABIArgInfo::Kind"); 5090 } (); 5091 5092 // Emit the assume_aligned check on the return value. 5093 if (Ret.isScalar() && TargetDecl) { 5094 AssumeAlignedAttrEmitter.EmitAsAnAssumption(Loc, RetTy, Ret); 5095 AllocAlignAttrEmitter.EmitAsAnAssumption(Loc, RetTy, Ret); 5096 } 5097 5098 // Explicitly call CallLifetimeEnd::Emit just to re-use the code even though 5099 // we can't use the full cleanup mechanism. 5100 for (CallLifetimeEnd &LifetimeEnd : CallLifetimeEndAfterCall) 5101 LifetimeEnd.Emit(*this, /*Flags=*/{}); 5102 5103 if (!ReturnValue.isExternallyDestructed() && 5104 RetTy.isDestructedType() == QualType::DK_nontrivial_c_struct) 5105 pushDestroy(QualType::DK_nontrivial_c_struct, Ret.getAggregateAddress(), 5106 RetTy); 5107 5108 return Ret; 5109 } 5110 5111 CGCallee CGCallee::prepareConcreteCallee(CodeGenFunction &CGF) const { 5112 if (isVirtual()) { 5113 const CallExpr *CE = getVirtualCallExpr(); 5114 return CGF.CGM.getCXXABI().getVirtualFunctionPointer( 5115 CGF, getVirtualMethodDecl(), getThisAddress(), getVirtualFunctionType(), 5116 CE ? CE->getBeginLoc() : SourceLocation()); 5117 } 5118 5119 return *this; 5120 } 5121 5122 /* VarArg handling */ 5123 5124 Address CodeGenFunction::EmitVAArg(VAArgExpr *VE, Address &VAListAddr) { 5125 VAListAddr = VE->isMicrosoftABI() 5126 ? EmitMSVAListRef(VE->getSubExpr()) 5127 : EmitVAListRef(VE->getSubExpr()); 5128 QualType Ty = VE->getType(); 5129 if (VE->isMicrosoftABI()) 5130 return CGM.getTypes().getABIInfo().EmitMSVAArg(*this, VAListAddr, Ty); 5131 return CGM.getTypes().getABIInfo().EmitVAArg(*this, VAListAddr, Ty); 5132 } 5133