1 //===--- CGCall.cpp - Encapsulate calling convention details --------------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // These classes wrap the information about a call or function 11 // definition used to handle ABI compliancy. 12 // 13 //===----------------------------------------------------------------------===// 14 15 #include "CGCall.h" 16 #include "ABIInfo.h" 17 #include "CGBlocks.h" 18 #include "CGCXXABI.h" 19 #include "CGCleanup.h" 20 #include "CodeGenFunction.h" 21 #include "CodeGenModule.h" 22 #include "TargetInfo.h" 23 #include "clang/AST/Decl.h" 24 #include "clang/AST/DeclCXX.h" 25 #include "clang/AST/DeclObjC.h" 26 #include "clang/Basic/TargetBuiltins.h" 27 #include "clang/Basic/TargetInfo.h" 28 #include "clang/CodeGen/CGFunctionInfo.h" 29 #include "clang/CodeGen/SwiftCallingConv.h" 30 #include "clang/Frontend/CodeGenOptions.h" 31 #include "llvm/ADT/StringExtras.h" 32 #include "llvm/Analysis/ValueTracking.h" 33 #include "llvm/IR/Attributes.h" 34 #include "llvm/IR/CallingConv.h" 35 #include "llvm/IR/CallSite.h" 36 #include "llvm/IR/DataLayout.h" 37 #include "llvm/IR/InlineAsm.h" 38 #include "llvm/IR/Intrinsics.h" 39 #include "llvm/IR/IntrinsicInst.h" 40 #include "llvm/Transforms/Utils/Local.h" 41 using namespace clang; 42 using namespace CodeGen; 43 44 /***/ 45 46 unsigned CodeGenTypes::ClangCallConvToLLVMCallConv(CallingConv CC) { 47 switch (CC) { 48 default: return llvm::CallingConv::C; 49 case CC_X86StdCall: return llvm::CallingConv::X86_StdCall; 50 case CC_X86FastCall: return llvm::CallingConv::X86_FastCall; 51 case CC_X86RegCall: return llvm::CallingConv::X86_RegCall; 52 case CC_X86ThisCall: return llvm::CallingConv::X86_ThisCall; 53 case CC_Win64: return llvm::CallingConv::Win64; 54 case CC_X86_64SysV: return llvm::CallingConv::X86_64_SysV; 55 case CC_AAPCS: return llvm::CallingConv::ARM_AAPCS; 56 case CC_AAPCS_VFP: return llvm::CallingConv::ARM_AAPCS_VFP; 57 case CC_IntelOclBicc: return llvm::CallingConv::Intel_OCL_BI; 58 // TODO: Add support for __pascal to LLVM. 59 case CC_X86Pascal: return llvm::CallingConv::C; 60 // TODO: Add support for __vectorcall to LLVM. 61 case CC_X86VectorCall: return llvm::CallingConv::X86_VectorCall; 62 case CC_SpirFunction: return llvm::CallingConv::SPIR_FUNC; 63 case CC_OpenCLKernel: return CGM.getTargetCodeGenInfo().getOpenCLKernelCallingConv(); 64 case CC_PreserveMost: return llvm::CallingConv::PreserveMost; 65 case CC_PreserveAll: return llvm::CallingConv::PreserveAll; 66 case CC_Swift: return llvm::CallingConv::Swift; 67 } 68 } 69 70 /// Derives the 'this' type for codegen purposes, i.e. ignoring method 71 /// qualification. 72 /// FIXME: address space qualification? 73 static CanQualType GetThisType(ASTContext &Context, const CXXRecordDecl *RD) { 74 QualType RecTy = Context.getTagDeclType(RD)->getCanonicalTypeInternal(); 75 return Context.getPointerType(CanQualType::CreateUnsafe(RecTy)); 76 } 77 78 /// Returns the canonical formal type of the given C++ method. 79 static CanQual<FunctionProtoType> GetFormalType(const CXXMethodDecl *MD) { 80 return MD->getType()->getCanonicalTypeUnqualified() 81 .getAs<FunctionProtoType>(); 82 } 83 84 /// Returns the "extra-canonicalized" return type, which discards 85 /// qualifiers on the return type. Codegen doesn't care about them, 86 /// and it makes ABI code a little easier to be able to assume that 87 /// all parameter and return types are top-level unqualified. 88 static CanQualType GetReturnType(QualType RetTy) { 89 return RetTy->getCanonicalTypeUnqualified().getUnqualifiedType(); 90 } 91 92 /// Arrange the argument and result information for a value of the given 93 /// unprototyped freestanding function type. 94 const CGFunctionInfo & 95 CodeGenTypes::arrangeFreeFunctionType(CanQual<FunctionNoProtoType> FTNP) { 96 // When translating an unprototyped function type, always use a 97 // variadic type. 98 return arrangeLLVMFunctionInfo(FTNP->getReturnType().getUnqualifiedType(), 99 /*instanceMethod=*/false, 100 /*chainCall=*/false, None, 101 FTNP->getExtInfo(), {}, RequiredArgs(0)); 102 } 103 104 static void addExtParameterInfosForCall( 105 llvm::SmallVectorImpl<FunctionProtoType::ExtParameterInfo> ¶mInfos, 106 const FunctionProtoType *proto, 107 unsigned prefixArgs, 108 unsigned totalArgs) { 109 assert(proto->hasExtParameterInfos()); 110 assert(paramInfos.size() <= prefixArgs); 111 assert(proto->getNumParams() + prefixArgs <= totalArgs); 112 113 paramInfos.reserve(totalArgs); 114 115 // Add default infos for any prefix args that don't already have infos. 116 paramInfos.resize(prefixArgs); 117 118 // Add infos for the prototype. 119 for (const auto &ParamInfo : proto->getExtParameterInfos()) { 120 paramInfos.push_back(ParamInfo); 121 // pass_object_size params have no parameter info. 122 if (ParamInfo.hasPassObjectSize()) 123 paramInfos.emplace_back(); 124 } 125 126 assert(paramInfos.size() <= totalArgs && 127 "Did we forget to insert pass_object_size args?"); 128 // Add default infos for the variadic and/or suffix arguments. 129 paramInfos.resize(totalArgs); 130 } 131 132 /// Adds the formal parameters in FPT to the given prefix. If any parameter in 133 /// FPT has pass_object_size attrs, then we'll add parameters for those, too. 134 static void appendParameterTypes(const CodeGenTypes &CGT, 135 SmallVectorImpl<CanQualType> &prefix, 136 SmallVectorImpl<FunctionProtoType::ExtParameterInfo> ¶mInfos, 137 CanQual<FunctionProtoType> FPT) { 138 // Fast path: don't touch param info if we don't need to. 139 if (!FPT->hasExtParameterInfos()) { 140 assert(paramInfos.empty() && 141 "We have paramInfos, but the prototype doesn't?"); 142 prefix.append(FPT->param_type_begin(), FPT->param_type_end()); 143 return; 144 } 145 146 unsigned PrefixSize = prefix.size(); 147 // In the vast majority of cases, we'll have precisely FPT->getNumParams() 148 // parameters; the only thing that can change this is the presence of 149 // pass_object_size. So, we preallocate for the common case. 150 prefix.reserve(prefix.size() + FPT->getNumParams()); 151 152 auto ExtInfos = FPT->getExtParameterInfos(); 153 assert(ExtInfos.size() == FPT->getNumParams()); 154 for (unsigned I = 0, E = FPT->getNumParams(); I != E; ++I) { 155 prefix.push_back(FPT->getParamType(I)); 156 if (ExtInfos[I].hasPassObjectSize()) 157 prefix.push_back(CGT.getContext().getSizeType()); 158 } 159 160 addExtParameterInfosForCall(paramInfos, FPT.getTypePtr(), PrefixSize, 161 prefix.size()); 162 } 163 164 /// Arrange the LLVM function layout for a value of the given function 165 /// type, on top of any implicit parameters already stored. 166 static const CGFunctionInfo & 167 arrangeLLVMFunctionInfo(CodeGenTypes &CGT, bool instanceMethod, 168 SmallVectorImpl<CanQualType> &prefix, 169 CanQual<FunctionProtoType> FTP, 170 const FunctionDecl *FD) { 171 SmallVector<FunctionProtoType::ExtParameterInfo, 16> paramInfos; 172 RequiredArgs Required = 173 RequiredArgs::forPrototypePlus(FTP, prefix.size(), FD); 174 // FIXME: Kill copy. 175 appendParameterTypes(CGT, prefix, paramInfos, FTP); 176 CanQualType resultType = FTP->getReturnType().getUnqualifiedType(); 177 178 return CGT.arrangeLLVMFunctionInfo(resultType, instanceMethod, 179 /*chainCall=*/false, prefix, 180 FTP->getExtInfo(), paramInfos, 181 Required); 182 } 183 184 /// Arrange the argument and result information for a value of the 185 /// given freestanding function type. 186 const CGFunctionInfo & 187 CodeGenTypes::arrangeFreeFunctionType(CanQual<FunctionProtoType> FTP, 188 const FunctionDecl *FD) { 189 SmallVector<CanQualType, 16> argTypes; 190 return ::arrangeLLVMFunctionInfo(*this, /*instanceMethod=*/false, argTypes, 191 FTP, FD); 192 } 193 194 static CallingConv getCallingConventionForDecl(const Decl *D, bool IsWindows) { 195 // Set the appropriate calling convention for the Function. 196 if (D->hasAttr<StdCallAttr>()) 197 return CC_X86StdCall; 198 199 if (D->hasAttr<FastCallAttr>()) 200 return CC_X86FastCall; 201 202 if (D->hasAttr<RegCallAttr>()) 203 return CC_X86RegCall; 204 205 if (D->hasAttr<ThisCallAttr>()) 206 return CC_X86ThisCall; 207 208 if (D->hasAttr<VectorCallAttr>()) 209 return CC_X86VectorCall; 210 211 if (D->hasAttr<PascalAttr>()) 212 return CC_X86Pascal; 213 214 if (PcsAttr *PCS = D->getAttr<PcsAttr>()) 215 return (PCS->getPCS() == PcsAttr::AAPCS ? CC_AAPCS : CC_AAPCS_VFP); 216 217 if (D->hasAttr<IntelOclBiccAttr>()) 218 return CC_IntelOclBicc; 219 220 if (D->hasAttr<MSABIAttr>()) 221 return IsWindows ? CC_C : CC_Win64; 222 223 if (D->hasAttr<SysVABIAttr>()) 224 return IsWindows ? CC_X86_64SysV : CC_C; 225 226 if (D->hasAttr<PreserveMostAttr>()) 227 return CC_PreserveMost; 228 229 if (D->hasAttr<PreserveAllAttr>()) 230 return CC_PreserveAll; 231 232 return CC_C; 233 } 234 235 /// Arrange the argument and result information for a call to an 236 /// unknown C++ non-static member function of the given abstract type. 237 /// (Zero value of RD means we don't have any meaningful "this" argument type, 238 /// so fall back to a generic pointer type). 239 /// The member function must be an ordinary function, i.e. not a 240 /// constructor or destructor. 241 const CGFunctionInfo & 242 CodeGenTypes::arrangeCXXMethodType(const CXXRecordDecl *RD, 243 const FunctionProtoType *FTP, 244 const CXXMethodDecl *MD) { 245 SmallVector<CanQualType, 16> argTypes; 246 247 // Add the 'this' pointer. 248 if (RD) 249 argTypes.push_back(GetThisType(Context, RD)); 250 else 251 argTypes.push_back(Context.VoidPtrTy); 252 253 return ::arrangeLLVMFunctionInfo( 254 *this, true, argTypes, 255 FTP->getCanonicalTypeUnqualified().getAs<FunctionProtoType>(), MD); 256 } 257 258 /// Arrange the argument and result information for a declaration or 259 /// definition of the given C++ non-static member function. The 260 /// member function must be an ordinary function, i.e. not a 261 /// constructor or destructor. 262 const CGFunctionInfo & 263 CodeGenTypes::arrangeCXXMethodDeclaration(const CXXMethodDecl *MD) { 264 assert(!isa<CXXConstructorDecl>(MD) && "wrong method for constructors!"); 265 assert(!isa<CXXDestructorDecl>(MD) && "wrong method for destructors!"); 266 267 CanQual<FunctionProtoType> prototype = GetFormalType(MD); 268 269 if (MD->isInstance()) { 270 // The abstract case is perfectly fine. 271 const CXXRecordDecl *ThisType = TheCXXABI.getThisArgumentTypeForMethod(MD); 272 return arrangeCXXMethodType(ThisType, prototype.getTypePtr(), MD); 273 } 274 275 return arrangeFreeFunctionType(prototype, MD); 276 } 277 278 bool CodeGenTypes::inheritingCtorHasParams( 279 const InheritedConstructor &Inherited, CXXCtorType Type) { 280 // Parameters are unnecessary if we're constructing a base class subobject 281 // and the inherited constructor lives in a virtual base. 282 return Type == Ctor_Complete || 283 !Inherited.getShadowDecl()->constructsVirtualBase() || 284 !Target.getCXXABI().hasConstructorVariants(); 285 } 286 287 const CGFunctionInfo & 288 CodeGenTypes::arrangeCXXStructorDeclaration(const CXXMethodDecl *MD, 289 StructorType Type) { 290 291 SmallVector<CanQualType, 16> argTypes; 292 SmallVector<FunctionProtoType::ExtParameterInfo, 16> paramInfos; 293 argTypes.push_back(GetThisType(Context, MD->getParent())); 294 295 bool PassParams = true; 296 297 GlobalDecl GD; 298 if (auto *CD = dyn_cast<CXXConstructorDecl>(MD)) { 299 GD = GlobalDecl(CD, toCXXCtorType(Type)); 300 301 // A base class inheriting constructor doesn't get forwarded arguments 302 // needed to construct a virtual base (or base class thereof). 303 if (auto Inherited = CD->getInheritedConstructor()) 304 PassParams = inheritingCtorHasParams(Inherited, toCXXCtorType(Type)); 305 } else { 306 auto *DD = dyn_cast<CXXDestructorDecl>(MD); 307 GD = GlobalDecl(DD, toCXXDtorType(Type)); 308 } 309 310 CanQual<FunctionProtoType> FTP = GetFormalType(MD); 311 312 // Add the formal parameters. 313 if (PassParams) 314 appendParameterTypes(*this, argTypes, paramInfos, FTP); 315 316 CGCXXABI::AddedStructorArgs AddedArgs = 317 TheCXXABI.buildStructorSignature(MD, Type, argTypes); 318 if (!paramInfos.empty()) { 319 // Note: prefix implies after the first param. 320 if (AddedArgs.Prefix) 321 paramInfos.insert(paramInfos.begin() + 1, AddedArgs.Prefix, 322 FunctionProtoType::ExtParameterInfo{}); 323 if (AddedArgs.Suffix) 324 paramInfos.append(AddedArgs.Suffix, 325 FunctionProtoType::ExtParameterInfo{}); 326 } 327 328 RequiredArgs required = 329 (PassParams && MD->isVariadic() ? RequiredArgs(argTypes.size()) 330 : RequiredArgs::All); 331 332 FunctionType::ExtInfo extInfo = FTP->getExtInfo(); 333 CanQualType resultType = TheCXXABI.HasThisReturn(GD) 334 ? argTypes.front() 335 : TheCXXABI.hasMostDerivedReturn(GD) 336 ? CGM.getContext().VoidPtrTy 337 : Context.VoidTy; 338 return arrangeLLVMFunctionInfo(resultType, /*instanceMethod=*/true, 339 /*chainCall=*/false, argTypes, extInfo, 340 paramInfos, required); 341 } 342 343 static SmallVector<CanQualType, 16> 344 getArgTypesForCall(ASTContext &ctx, const CallArgList &args) { 345 SmallVector<CanQualType, 16> argTypes; 346 for (auto &arg : args) 347 argTypes.push_back(ctx.getCanonicalParamType(arg.Ty)); 348 return argTypes; 349 } 350 351 static SmallVector<CanQualType, 16> 352 getArgTypesForDeclaration(ASTContext &ctx, const FunctionArgList &args) { 353 SmallVector<CanQualType, 16> argTypes; 354 for (auto &arg : args) 355 argTypes.push_back(ctx.getCanonicalParamType(arg->getType())); 356 return argTypes; 357 } 358 359 static llvm::SmallVector<FunctionProtoType::ExtParameterInfo, 16> 360 getExtParameterInfosForCall(const FunctionProtoType *proto, 361 unsigned prefixArgs, unsigned totalArgs) { 362 llvm::SmallVector<FunctionProtoType::ExtParameterInfo, 16> result; 363 if (proto->hasExtParameterInfos()) { 364 addExtParameterInfosForCall(result, proto, prefixArgs, totalArgs); 365 } 366 return result; 367 } 368 369 /// Arrange a call to a C++ method, passing the given arguments. 370 /// 371 /// ExtraPrefixArgs is the number of ABI-specific args passed after the `this` 372 /// parameter. 373 /// ExtraSuffixArgs is the number of ABI-specific args passed at the end of 374 /// args. 375 /// PassProtoArgs indicates whether `args` has args for the parameters in the 376 /// given CXXConstructorDecl. 377 const CGFunctionInfo & 378 CodeGenTypes::arrangeCXXConstructorCall(const CallArgList &args, 379 const CXXConstructorDecl *D, 380 CXXCtorType CtorKind, 381 unsigned ExtraPrefixArgs, 382 unsigned ExtraSuffixArgs, 383 bool PassProtoArgs) { 384 // FIXME: Kill copy. 385 SmallVector<CanQualType, 16> ArgTypes; 386 for (const auto &Arg : args) 387 ArgTypes.push_back(Context.getCanonicalParamType(Arg.Ty)); 388 389 // +1 for implicit this, which should always be args[0]. 390 unsigned TotalPrefixArgs = 1 + ExtraPrefixArgs; 391 392 CanQual<FunctionProtoType> FPT = GetFormalType(D); 393 RequiredArgs Required = 394 RequiredArgs::forPrototypePlus(FPT, TotalPrefixArgs + ExtraSuffixArgs, D); 395 GlobalDecl GD(D, CtorKind); 396 CanQualType ResultType = TheCXXABI.HasThisReturn(GD) 397 ? ArgTypes.front() 398 : TheCXXABI.hasMostDerivedReturn(GD) 399 ? CGM.getContext().VoidPtrTy 400 : Context.VoidTy; 401 402 FunctionType::ExtInfo Info = FPT->getExtInfo(); 403 llvm::SmallVector<FunctionProtoType::ExtParameterInfo, 16> ParamInfos; 404 // If the prototype args are elided, we should only have ABI-specific args, 405 // which never have param info. 406 if (PassProtoArgs && FPT->hasExtParameterInfos()) { 407 // ABI-specific suffix arguments are treated the same as variadic arguments. 408 addExtParameterInfosForCall(ParamInfos, FPT.getTypePtr(), TotalPrefixArgs, 409 ArgTypes.size()); 410 } 411 return arrangeLLVMFunctionInfo(ResultType, /*instanceMethod=*/true, 412 /*chainCall=*/false, ArgTypes, Info, 413 ParamInfos, Required); 414 } 415 416 /// Arrange the argument and result information for the declaration or 417 /// definition of the given function. 418 const CGFunctionInfo & 419 CodeGenTypes::arrangeFunctionDeclaration(const FunctionDecl *FD) { 420 if (const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(FD)) 421 if (MD->isInstance()) 422 return arrangeCXXMethodDeclaration(MD); 423 424 CanQualType FTy = FD->getType()->getCanonicalTypeUnqualified(); 425 426 assert(isa<FunctionType>(FTy)); 427 428 // When declaring a function without a prototype, always use a 429 // non-variadic type. 430 if (CanQual<FunctionNoProtoType> noProto = FTy.getAs<FunctionNoProtoType>()) { 431 return arrangeLLVMFunctionInfo( 432 noProto->getReturnType(), /*instanceMethod=*/false, 433 /*chainCall=*/false, None, noProto->getExtInfo(), {},RequiredArgs::All); 434 } 435 436 return arrangeFreeFunctionType(FTy.castAs<FunctionProtoType>(), FD); 437 } 438 439 /// Arrange the argument and result information for the declaration or 440 /// definition of an Objective-C method. 441 const CGFunctionInfo & 442 CodeGenTypes::arrangeObjCMethodDeclaration(const ObjCMethodDecl *MD) { 443 // It happens that this is the same as a call with no optional 444 // arguments, except also using the formal 'self' type. 445 return arrangeObjCMessageSendSignature(MD, MD->getSelfDecl()->getType()); 446 } 447 448 /// Arrange the argument and result information for the function type 449 /// through which to perform a send to the given Objective-C method, 450 /// using the given receiver type. The receiver type is not always 451 /// the 'self' type of the method or even an Objective-C pointer type. 452 /// This is *not* the right method for actually performing such a 453 /// message send, due to the possibility of optional arguments. 454 const CGFunctionInfo & 455 CodeGenTypes::arrangeObjCMessageSendSignature(const ObjCMethodDecl *MD, 456 QualType receiverType) { 457 SmallVector<CanQualType, 16> argTys; 458 SmallVector<FunctionProtoType::ExtParameterInfo, 4> extParamInfos(2); 459 argTys.push_back(Context.getCanonicalParamType(receiverType)); 460 argTys.push_back(Context.getCanonicalParamType(Context.getObjCSelType())); 461 // FIXME: Kill copy? 462 for (const auto *I : MD->parameters()) { 463 argTys.push_back(Context.getCanonicalParamType(I->getType())); 464 auto extParamInfo = FunctionProtoType::ExtParameterInfo().withIsNoEscape( 465 I->hasAttr<NoEscapeAttr>()); 466 extParamInfos.push_back(extParamInfo); 467 } 468 469 FunctionType::ExtInfo einfo; 470 bool IsWindows = getContext().getTargetInfo().getTriple().isOSWindows(); 471 einfo = einfo.withCallingConv(getCallingConventionForDecl(MD, IsWindows)); 472 473 if (getContext().getLangOpts().ObjCAutoRefCount && 474 MD->hasAttr<NSReturnsRetainedAttr>()) 475 einfo = einfo.withProducesResult(true); 476 477 RequiredArgs required = 478 (MD->isVariadic() ? RequiredArgs(argTys.size()) : RequiredArgs::All); 479 480 return arrangeLLVMFunctionInfo( 481 GetReturnType(MD->getReturnType()), /*instanceMethod=*/false, 482 /*chainCall=*/false, argTys, einfo, extParamInfos, required); 483 } 484 485 const CGFunctionInfo & 486 CodeGenTypes::arrangeUnprototypedObjCMessageSend(QualType returnType, 487 const CallArgList &args) { 488 auto argTypes = getArgTypesForCall(Context, args); 489 FunctionType::ExtInfo einfo; 490 491 return arrangeLLVMFunctionInfo( 492 GetReturnType(returnType), /*instanceMethod=*/false, 493 /*chainCall=*/false, argTypes, einfo, {}, RequiredArgs::All); 494 } 495 496 const CGFunctionInfo & 497 CodeGenTypes::arrangeGlobalDeclaration(GlobalDecl GD) { 498 // FIXME: Do we need to handle ObjCMethodDecl? 499 const FunctionDecl *FD = cast<FunctionDecl>(GD.getDecl()); 500 501 if (const CXXConstructorDecl *CD = dyn_cast<CXXConstructorDecl>(FD)) 502 return arrangeCXXStructorDeclaration(CD, getFromCtorType(GD.getCtorType())); 503 504 if (const CXXDestructorDecl *DD = dyn_cast<CXXDestructorDecl>(FD)) 505 return arrangeCXXStructorDeclaration(DD, getFromDtorType(GD.getDtorType())); 506 507 return arrangeFunctionDeclaration(FD); 508 } 509 510 /// Arrange a thunk that takes 'this' as the first parameter followed by 511 /// varargs. Return a void pointer, regardless of the actual return type. 512 /// The body of the thunk will end in a musttail call to a function of the 513 /// correct type, and the caller will bitcast the function to the correct 514 /// prototype. 515 const CGFunctionInfo & 516 CodeGenTypes::arrangeMSMemberPointerThunk(const CXXMethodDecl *MD) { 517 assert(MD->isVirtual() && "only virtual memptrs have thunks"); 518 CanQual<FunctionProtoType> FTP = GetFormalType(MD); 519 CanQualType ArgTys[] = { GetThisType(Context, MD->getParent()) }; 520 return arrangeLLVMFunctionInfo(Context.VoidTy, /*instanceMethod=*/false, 521 /*chainCall=*/false, ArgTys, 522 FTP->getExtInfo(), {}, RequiredArgs(1)); 523 } 524 525 const CGFunctionInfo & 526 CodeGenTypes::arrangeMSCtorClosure(const CXXConstructorDecl *CD, 527 CXXCtorType CT) { 528 assert(CT == Ctor_CopyingClosure || CT == Ctor_DefaultClosure); 529 530 CanQual<FunctionProtoType> FTP = GetFormalType(CD); 531 SmallVector<CanQualType, 2> ArgTys; 532 const CXXRecordDecl *RD = CD->getParent(); 533 ArgTys.push_back(GetThisType(Context, RD)); 534 if (CT == Ctor_CopyingClosure) 535 ArgTys.push_back(*FTP->param_type_begin()); 536 if (RD->getNumVBases() > 0) 537 ArgTys.push_back(Context.IntTy); 538 CallingConv CC = Context.getDefaultCallingConvention( 539 /*IsVariadic=*/false, /*IsCXXMethod=*/true); 540 return arrangeLLVMFunctionInfo(Context.VoidTy, /*instanceMethod=*/true, 541 /*chainCall=*/false, ArgTys, 542 FunctionType::ExtInfo(CC), {}, 543 RequiredArgs::All); 544 } 545 546 /// Arrange a call as unto a free function, except possibly with an 547 /// additional number of formal parameters considered required. 548 static const CGFunctionInfo & 549 arrangeFreeFunctionLikeCall(CodeGenTypes &CGT, 550 CodeGenModule &CGM, 551 const CallArgList &args, 552 const FunctionType *fnType, 553 unsigned numExtraRequiredArgs, 554 bool chainCall) { 555 assert(args.size() >= numExtraRequiredArgs); 556 557 llvm::SmallVector<FunctionProtoType::ExtParameterInfo, 16> paramInfos; 558 559 // In most cases, there are no optional arguments. 560 RequiredArgs required = RequiredArgs::All; 561 562 // If we have a variadic prototype, the required arguments are the 563 // extra prefix plus the arguments in the prototype. 564 if (const FunctionProtoType *proto = dyn_cast<FunctionProtoType>(fnType)) { 565 if (proto->isVariadic()) 566 required = RequiredArgs(proto->getNumParams() + numExtraRequiredArgs); 567 568 if (proto->hasExtParameterInfos()) 569 addExtParameterInfosForCall(paramInfos, proto, numExtraRequiredArgs, 570 args.size()); 571 572 // If we don't have a prototype at all, but we're supposed to 573 // explicitly use the variadic convention for unprototyped calls, 574 // treat all of the arguments as required but preserve the nominal 575 // possibility of variadics. 576 } else if (CGM.getTargetCodeGenInfo() 577 .isNoProtoCallVariadic(args, 578 cast<FunctionNoProtoType>(fnType))) { 579 required = RequiredArgs(args.size()); 580 } 581 582 // FIXME: Kill copy. 583 SmallVector<CanQualType, 16> argTypes; 584 for (const auto &arg : args) 585 argTypes.push_back(CGT.getContext().getCanonicalParamType(arg.Ty)); 586 return CGT.arrangeLLVMFunctionInfo(GetReturnType(fnType->getReturnType()), 587 /*instanceMethod=*/false, chainCall, 588 argTypes, fnType->getExtInfo(), paramInfos, 589 required); 590 } 591 592 /// Figure out the rules for calling a function with the given formal 593 /// type using the given arguments. The arguments are necessary 594 /// because the function might be unprototyped, in which case it's 595 /// target-dependent in crazy ways. 596 const CGFunctionInfo & 597 CodeGenTypes::arrangeFreeFunctionCall(const CallArgList &args, 598 const FunctionType *fnType, 599 bool chainCall) { 600 return arrangeFreeFunctionLikeCall(*this, CGM, args, fnType, 601 chainCall ? 1 : 0, chainCall); 602 } 603 604 /// A block function is essentially a free function with an 605 /// extra implicit argument. 606 const CGFunctionInfo & 607 CodeGenTypes::arrangeBlockFunctionCall(const CallArgList &args, 608 const FunctionType *fnType) { 609 return arrangeFreeFunctionLikeCall(*this, CGM, args, fnType, 1, 610 /*chainCall=*/false); 611 } 612 613 const CGFunctionInfo & 614 CodeGenTypes::arrangeBlockFunctionDeclaration(const FunctionProtoType *proto, 615 const FunctionArgList ¶ms) { 616 auto paramInfos = getExtParameterInfosForCall(proto, 1, params.size()); 617 auto argTypes = getArgTypesForDeclaration(Context, params); 618 619 return arrangeLLVMFunctionInfo( 620 GetReturnType(proto->getReturnType()), 621 /*instanceMethod*/ false, /*chainCall*/ false, argTypes, 622 proto->getExtInfo(), paramInfos, 623 RequiredArgs::forPrototypePlus(proto, 1, nullptr)); 624 } 625 626 const CGFunctionInfo & 627 CodeGenTypes::arrangeBuiltinFunctionCall(QualType resultType, 628 const CallArgList &args) { 629 // FIXME: Kill copy. 630 SmallVector<CanQualType, 16> argTypes; 631 for (const auto &Arg : args) 632 argTypes.push_back(Context.getCanonicalParamType(Arg.Ty)); 633 return arrangeLLVMFunctionInfo( 634 GetReturnType(resultType), /*instanceMethod=*/false, 635 /*chainCall=*/false, argTypes, FunctionType::ExtInfo(), 636 /*paramInfos=*/ {}, RequiredArgs::All); 637 } 638 639 const CGFunctionInfo & 640 CodeGenTypes::arrangeBuiltinFunctionDeclaration(QualType resultType, 641 const FunctionArgList &args) { 642 auto argTypes = getArgTypesForDeclaration(Context, args); 643 644 return arrangeLLVMFunctionInfo( 645 GetReturnType(resultType), /*instanceMethod=*/false, /*chainCall=*/false, 646 argTypes, FunctionType::ExtInfo(), {}, RequiredArgs::All); 647 } 648 649 const CGFunctionInfo & 650 CodeGenTypes::arrangeBuiltinFunctionDeclaration(CanQualType resultType, 651 ArrayRef<CanQualType> argTypes) { 652 return arrangeLLVMFunctionInfo( 653 resultType, /*instanceMethod=*/false, /*chainCall=*/false, 654 argTypes, FunctionType::ExtInfo(), {}, RequiredArgs::All); 655 } 656 657 /// Arrange a call to a C++ method, passing the given arguments. 658 /// 659 /// numPrefixArgs is the number of ABI-specific prefix arguments we have. It 660 /// does not count `this`. 661 const CGFunctionInfo & 662 CodeGenTypes::arrangeCXXMethodCall(const CallArgList &args, 663 const FunctionProtoType *proto, 664 RequiredArgs required, 665 unsigned numPrefixArgs) { 666 assert(numPrefixArgs + 1 <= args.size() && 667 "Emitting a call with less args than the required prefix?"); 668 // Add one to account for `this`. It's a bit awkward here, but we don't count 669 // `this` in similar places elsewhere. 670 auto paramInfos = 671 getExtParameterInfosForCall(proto, numPrefixArgs + 1, args.size()); 672 673 // FIXME: Kill copy. 674 auto argTypes = getArgTypesForCall(Context, args); 675 676 FunctionType::ExtInfo info = proto->getExtInfo(); 677 return arrangeLLVMFunctionInfo( 678 GetReturnType(proto->getReturnType()), /*instanceMethod=*/true, 679 /*chainCall=*/false, argTypes, info, paramInfos, required); 680 } 681 682 const CGFunctionInfo &CodeGenTypes::arrangeNullaryFunction() { 683 return arrangeLLVMFunctionInfo( 684 getContext().VoidTy, /*instanceMethod=*/false, /*chainCall=*/false, 685 None, FunctionType::ExtInfo(), {}, RequiredArgs::All); 686 } 687 688 const CGFunctionInfo & 689 CodeGenTypes::arrangeCall(const CGFunctionInfo &signature, 690 const CallArgList &args) { 691 assert(signature.arg_size() <= args.size()); 692 if (signature.arg_size() == args.size()) 693 return signature; 694 695 SmallVector<FunctionProtoType::ExtParameterInfo, 16> paramInfos; 696 auto sigParamInfos = signature.getExtParameterInfos(); 697 if (!sigParamInfos.empty()) { 698 paramInfos.append(sigParamInfos.begin(), sigParamInfos.end()); 699 paramInfos.resize(args.size()); 700 } 701 702 auto argTypes = getArgTypesForCall(Context, args); 703 704 assert(signature.getRequiredArgs().allowsOptionalArgs()); 705 return arrangeLLVMFunctionInfo(signature.getReturnType(), 706 signature.isInstanceMethod(), 707 signature.isChainCall(), 708 argTypes, 709 signature.getExtInfo(), 710 paramInfos, 711 signature.getRequiredArgs()); 712 } 713 714 namespace clang { 715 namespace CodeGen { 716 void computeSPIRKernelABIInfo(CodeGenModule &CGM, CGFunctionInfo &FI); 717 } 718 } 719 720 /// Arrange the argument and result information for an abstract value 721 /// of a given function type. This is the method which all of the 722 /// above functions ultimately defer to. 723 const CGFunctionInfo & 724 CodeGenTypes::arrangeLLVMFunctionInfo(CanQualType resultType, 725 bool instanceMethod, 726 bool chainCall, 727 ArrayRef<CanQualType> argTypes, 728 FunctionType::ExtInfo info, 729 ArrayRef<FunctionProtoType::ExtParameterInfo> paramInfos, 730 RequiredArgs required) { 731 assert(std::all_of(argTypes.begin(), argTypes.end(), 732 [](CanQualType T) { return T.isCanonicalAsParam(); })); 733 734 // Lookup or create unique function info. 735 llvm::FoldingSetNodeID ID; 736 CGFunctionInfo::Profile(ID, instanceMethod, chainCall, info, paramInfos, 737 required, resultType, argTypes); 738 739 void *insertPos = nullptr; 740 CGFunctionInfo *FI = FunctionInfos.FindNodeOrInsertPos(ID, insertPos); 741 if (FI) 742 return *FI; 743 744 unsigned CC = ClangCallConvToLLVMCallConv(info.getCC()); 745 746 // Construct the function info. We co-allocate the ArgInfos. 747 FI = CGFunctionInfo::create(CC, instanceMethod, chainCall, info, 748 paramInfos, resultType, argTypes, required); 749 FunctionInfos.InsertNode(FI, insertPos); 750 751 bool inserted = FunctionsBeingProcessed.insert(FI).second; 752 (void)inserted; 753 assert(inserted && "Recursively being processed?"); 754 755 // Compute ABI information. 756 if (CC == llvm::CallingConv::SPIR_KERNEL) { 757 // Force target independent argument handling for the host visible 758 // kernel functions. 759 computeSPIRKernelABIInfo(CGM, *FI); 760 } else if (info.getCC() == CC_Swift) { 761 swiftcall::computeABIInfo(CGM, *FI); 762 } else { 763 getABIInfo().computeInfo(*FI); 764 } 765 766 // Loop over all of the computed argument and return value info. If any of 767 // them are direct or extend without a specified coerce type, specify the 768 // default now. 769 ABIArgInfo &retInfo = FI->getReturnInfo(); 770 if (retInfo.canHaveCoerceToType() && retInfo.getCoerceToType() == nullptr) 771 retInfo.setCoerceToType(ConvertType(FI->getReturnType())); 772 773 for (auto &I : FI->arguments()) 774 if (I.info.canHaveCoerceToType() && I.info.getCoerceToType() == nullptr) 775 I.info.setCoerceToType(ConvertType(I.type)); 776 777 bool erased = FunctionsBeingProcessed.erase(FI); (void)erased; 778 assert(erased && "Not in set?"); 779 780 return *FI; 781 } 782 783 CGFunctionInfo *CGFunctionInfo::create(unsigned llvmCC, 784 bool instanceMethod, 785 bool chainCall, 786 const FunctionType::ExtInfo &info, 787 ArrayRef<ExtParameterInfo> paramInfos, 788 CanQualType resultType, 789 ArrayRef<CanQualType> argTypes, 790 RequiredArgs required) { 791 assert(paramInfos.empty() || paramInfos.size() == argTypes.size()); 792 793 void *buffer = 794 operator new(totalSizeToAlloc<ArgInfo, ExtParameterInfo>( 795 argTypes.size() + 1, paramInfos.size())); 796 797 CGFunctionInfo *FI = new(buffer) CGFunctionInfo(); 798 FI->CallingConvention = llvmCC; 799 FI->EffectiveCallingConvention = llvmCC; 800 FI->ASTCallingConvention = info.getCC(); 801 FI->InstanceMethod = instanceMethod; 802 FI->ChainCall = chainCall; 803 FI->NoReturn = info.getNoReturn(); 804 FI->ReturnsRetained = info.getProducesResult(); 805 FI->NoCallerSavedRegs = info.getNoCallerSavedRegs(); 806 FI->Required = required; 807 FI->HasRegParm = info.getHasRegParm(); 808 FI->RegParm = info.getRegParm(); 809 FI->ArgStruct = nullptr; 810 FI->ArgStructAlign = 0; 811 FI->NumArgs = argTypes.size(); 812 FI->HasExtParameterInfos = !paramInfos.empty(); 813 FI->getArgsBuffer()[0].type = resultType; 814 for (unsigned i = 0, e = argTypes.size(); i != e; ++i) 815 FI->getArgsBuffer()[i + 1].type = argTypes[i]; 816 for (unsigned i = 0, e = paramInfos.size(); i != e; ++i) 817 FI->getExtParameterInfosBuffer()[i] = paramInfos[i]; 818 return FI; 819 } 820 821 /***/ 822 823 namespace { 824 // ABIArgInfo::Expand implementation. 825 826 // Specifies the way QualType passed as ABIArgInfo::Expand is expanded. 827 struct TypeExpansion { 828 enum TypeExpansionKind { 829 // Elements of constant arrays are expanded recursively. 830 TEK_ConstantArray, 831 // Record fields are expanded recursively (but if record is a union, only 832 // the field with the largest size is expanded). 833 TEK_Record, 834 // For complex types, real and imaginary parts are expanded recursively. 835 TEK_Complex, 836 // All other types are not expandable. 837 TEK_None 838 }; 839 840 const TypeExpansionKind Kind; 841 842 TypeExpansion(TypeExpansionKind K) : Kind(K) {} 843 virtual ~TypeExpansion() {} 844 }; 845 846 struct ConstantArrayExpansion : TypeExpansion { 847 QualType EltTy; 848 uint64_t NumElts; 849 850 ConstantArrayExpansion(QualType EltTy, uint64_t NumElts) 851 : TypeExpansion(TEK_ConstantArray), EltTy(EltTy), NumElts(NumElts) {} 852 static bool classof(const TypeExpansion *TE) { 853 return TE->Kind == TEK_ConstantArray; 854 } 855 }; 856 857 struct RecordExpansion : TypeExpansion { 858 SmallVector<const CXXBaseSpecifier *, 1> Bases; 859 860 SmallVector<const FieldDecl *, 1> Fields; 861 862 RecordExpansion(SmallVector<const CXXBaseSpecifier *, 1> &&Bases, 863 SmallVector<const FieldDecl *, 1> &&Fields) 864 : TypeExpansion(TEK_Record), Bases(std::move(Bases)), 865 Fields(std::move(Fields)) {} 866 static bool classof(const TypeExpansion *TE) { 867 return TE->Kind == TEK_Record; 868 } 869 }; 870 871 struct ComplexExpansion : TypeExpansion { 872 QualType EltTy; 873 874 ComplexExpansion(QualType EltTy) : TypeExpansion(TEK_Complex), EltTy(EltTy) {} 875 static bool classof(const TypeExpansion *TE) { 876 return TE->Kind == TEK_Complex; 877 } 878 }; 879 880 struct NoExpansion : TypeExpansion { 881 NoExpansion() : TypeExpansion(TEK_None) {} 882 static bool classof(const TypeExpansion *TE) { 883 return TE->Kind == TEK_None; 884 } 885 }; 886 } // namespace 887 888 static std::unique_ptr<TypeExpansion> 889 getTypeExpansion(QualType Ty, const ASTContext &Context) { 890 if (const ConstantArrayType *AT = Context.getAsConstantArrayType(Ty)) { 891 return llvm::make_unique<ConstantArrayExpansion>( 892 AT->getElementType(), AT->getSize().getZExtValue()); 893 } 894 if (const RecordType *RT = Ty->getAs<RecordType>()) { 895 SmallVector<const CXXBaseSpecifier *, 1> Bases; 896 SmallVector<const FieldDecl *, 1> Fields; 897 const RecordDecl *RD = RT->getDecl(); 898 assert(!RD->hasFlexibleArrayMember() && 899 "Cannot expand structure with flexible array."); 900 if (RD->isUnion()) { 901 // Unions can be here only in degenerative cases - all the fields are same 902 // after flattening. Thus we have to use the "largest" field. 903 const FieldDecl *LargestFD = nullptr; 904 CharUnits UnionSize = CharUnits::Zero(); 905 906 for (const auto *FD : RD->fields()) { 907 // Skip zero length bitfields. 908 if (FD->isBitField() && FD->getBitWidthValue(Context) == 0) 909 continue; 910 assert(!FD->isBitField() && 911 "Cannot expand structure with bit-field members."); 912 CharUnits FieldSize = Context.getTypeSizeInChars(FD->getType()); 913 if (UnionSize < FieldSize) { 914 UnionSize = FieldSize; 915 LargestFD = FD; 916 } 917 } 918 if (LargestFD) 919 Fields.push_back(LargestFD); 920 } else { 921 if (const auto *CXXRD = dyn_cast<CXXRecordDecl>(RD)) { 922 assert(!CXXRD->isDynamicClass() && 923 "cannot expand vtable pointers in dynamic classes"); 924 for (const CXXBaseSpecifier &BS : CXXRD->bases()) 925 Bases.push_back(&BS); 926 } 927 928 for (const auto *FD : RD->fields()) { 929 // Skip zero length bitfields. 930 if (FD->isBitField() && FD->getBitWidthValue(Context) == 0) 931 continue; 932 assert(!FD->isBitField() && 933 "Cannot expand structure with bit-field members."); 934 Fields.push_back(FD); 935 } 936 } 937 return llvm::make_unique<RecordExpansion>(std::move(Bases), 938 std::move(Fields)); 939 } 940 if (const ComplexType *CT = Ty->getAs<ComplexType>()) { 941 return llvm::make_unique<ComplexExpansion>(CT->getElementType()); 942 } 943 return llvm::make_unique<NoExpansion>(); 944 } 945 946 static int getExpansionSize(QualType Ty, const ASTContext &Context) { 947 auto Exp = getTypeExpansion(Ty, Context); 948 if (auto CAExp = dyn_cast<ConstantArrayExpansion>(Exp.get())) { 949 return CAExp->NumElts * getExpansionSize(CAExp->EltTy, Context); 950 } 951 if (auto RExp = dyn_cast<RecordExpansion>(Exp.get())) { 952 int Res = 0; 953 for (auto BS : RExp->Bases) 954 Res += getExpansionSize(BS->getType(), Context); 955 for (auto FD : RExp->Fields) 956 Res += getExpansionSize(FD->getType(), Context); 957 return Res; 958 } 959 if (isa<ComplexExpansion>(Exp.get())) 960 return 2; 961 assert(isa<NoExpansion>(Exp.get())); 962 return 1; 963 } 964 965 void 966 CodeGenTypes::getExpandedTypes(QualType Ty, 967 SmallVectorImpl<llvm::Type *>::iterator &TI) { 968 auto Exp = getTypeExpansion(Ty, Context); 969 if (auto CAExp = dyn_cast<ConstantArrayExpansion>(Exp.get())) { 970 for (int i = 0, n = CAExp->NumElts; i < n; i++) { 971 getExpandedTypes(CAExp->EltTy, TI); 972 } 973 } else if (auto RExp = dyn_cast<RecordExpansion>(Exp.get())) { 974 for (auto BS : RExp->Bases) 975 getExpandedTypes(BS->getType(), TI); 976 for (auto FD : RExp->Fields) 977 getExpandedTypes(FD->getType(), TI); 978 } else if (auto CExp = dyn_cast<ComplexExpansion>(Exp.get())) { 979 llvm::Type *EltTy = ConvertType(CExp->EltTy); 980 *TI++ = EltTy; 981 *TI++ = EltTy; 982 } else { 983 assert(isa<NoExpansion>(Exp.get())); 984 *TI++ = ConvertType(Ty); 985 } 986 } 987 988 static void forConstantArrayExpansion(CodeGenFunction &CGF, 989 ConstantArrayExpansion *CAE, 990 Address BaseAddr, 991 llvm::function_ref<void(Address)> Fn) { 992 CharUnits EltSize = CGF.getContext().getTypeSizeInChars(CAE->EltTy); 993 CharUnits EltAlign = 994 BaseAddr.getAlignment().alignmentOfArrayElement(EltSize); 995 996 for (int i = 0, n = CAE->NumElts; i < n; i++) { 997 llvm::Value *EltAddr = 998 CGF.Builder.CreateConstGEP2_32(nullptr, BaseAddr.getPointer(), 0, i); 999 Fn(Address(EltAddr, EltAlign)); 1000 } 1001 } 1002 1003 void CodeGenFunction::ExpandTypeFromArgs( 1004 QualType Ty, LValue LV, SmallVectorImpl<llvm::Value *>::iterator &AI) { 1005 assert(LV.isSimple() && 1006 "Unexpected non-simple lvalue during struct expansion."); 1007 1008 auto Exp = getTypeExpansion(Ty, getContext()); 1009 if (auto CAExp = dyn_cast<ConstantArrayExpansion>(Exp.get())) { 1010 forConstantArrayExpansion(*this, CAExp, LV.getAddress(), 1011 [&](Address EltAddr) { 1012 LValue LV = MakeAddrLValue(EltAddr, CAExp->EltTy); 1013 ExpandTypeFromArgs(CAExp->EltTy, LV, AI); 1014 }); 1015 } else if (auto RExp = dyn_cast<RecordExpansion>(Exp.get())) { 1016 Address This = LV.getAddress(); 1017 for (const CXXBaseSpecifier *BS : RExp->Bases) { 1018 // Perform a single step derived-to-base conversion. 1019 Address Base = 1020 GetAddressOfBaseClass(This, Ty->getAsCXXRecordDecl(), &BS, &BS + 1, 1021 /*NullCheckValue=*/false, SourceLocation()); 1022 LValue SubLV = MakeAddrLValue(Base, BS->getType()); 1023 1024 // Recurse onto bases. 1025 ExpandTypeFromArgs(BS->getType(), SubLV, AI); 1026 } 1027 for (auto FD : RExp->Fields) { 1028 // FIXME: What are the right qualifiers here? 1029 LValue SubLV = EmitLValueForFieldInitialization(LV, FD); 1030 ExpandTypeFromArgs(FD->getType(), SubLV, AI); 1031 } 1032 } else if (isa<ComplexExpansion>(Exp.get())) { 1033 auto realValue = *AI++; 1034 auto imagValue = *AI++; 1035 EmitStoreOfComplex(ComplexPairTy(realValue, imagValue), LV, /*init*/ true); 1036 } else { 1037 assert(isa<NoExpansion>(Exp.get())); 1038 EmitStoreThroughLValue(RValue::get(*AI++), LV); 1039 } 1040 } 1041 1042 void CodeGenFunction::ExpandTypeToArgs( 1043 QualType Ty, RValue RV, llvm::FunctionType *IRFuncTy, 1044 SmallVectorImpl<llvm::Value *> &IRCallArgs, unsigned &IRCallArgPos) { 1045 auto Exp = getTypeExpansion(Ty, getContext()); 1046 if (auto CAExp = dyn_cast<ConstantArrayExpansion>(Exp.get())) { 1047 forConstantArrayExpansion(*this, CAExp, RV.getAggregateAddress(), 1048 [&](Address EltAddr) { 1049 RValue EltRV = 1050 convertTempToRValue(EltAddr, CAExp->EltTy, SourceLocation()); 1051 ExpandTypeToArgs(CAExp->EltTy, EltRV, IRFuncTy, IRCallArgs, IRCallArgPos); 1052 }); 1053 } else if (auto RExp = dyn_cast<RecordExpansion>(Exp.get())) { 1054 Address This = RV.getAggregateAddress(); 1055 for (const CXXBaseSpecifier *BS : RExp->Bases) { 1056 // Perform a single step derived-to-base conversion. 1057 Address Base = 1058 GetAddressOfBaseClass(This, Ty->getAsCXXRecordDecl(), &BS, &BS + 1, 1059 /*NullCheckValue=*/false, SourceLocation()); 1060 RValue BaseRV = RValue::getAggregate(Base); 1061 1062 // Recurse onto bases. 1063 ExpandTypeToArgs(BS->getType(), BaseRV, IRFuncTy, IRCallArgs, 1064 IRCallArgPos); 1065 } 1066 1067 LValue LV = MakeAddrLValue(This, Ty); 1068 for (auto FD : RExp->Fields) { 1069 RValue FldRV = EmitRValueForField(LV, FD, SourceLocation()); 1070 ExpandTypeToArgs(FD->getType(), FldRV, IRFuncTy, IRCallArgs, 1071 IRCallArgPos); 1072 } 1073 } else if (isa<ComplexExpansion>(Exp.get())) { 1074 ComplexPairTy CV = RV.getComplexVal(); 1075 IRCallArgs[IRCallArgPos++] = CV.first; 1076 IRCallArgs[IRCallArgPos++] = CV.second; 1077 } else { 1078 assert(isa<NoExpansion>(Exp.get())); 1079 assert(RV.isScalar() && 1080 "Unexpected non-scalar rvalue during struct expansion."); 1081 1082 // Insert a bitcast as needed. 1083 llvm::Value *V = RV.getScalarVal(); 1084 if (IRCallArgPos < IRFuncTy->getNumParams() && 1085 V->getType() != IRFuncTy->getParamType(IRCallArgPos)) 1086 V = Builder.CreateBitCast(V, IRFuncTy->getParamType(IRCallArgPos)); 1087 1088 IRCallArgs[IRCallArgPos++] = V; 1089 } 1090 } 1091 1092 /// Create a temporary allocation for the purposes of coercion. 1093 static Address CreateTempAllocaForCoercion(CodeGenFunction &CGF, llvm::Type *Ty, 1094 CharUnits MinAlign) { 1095 // Don't use an alignment that's worse than what LLVM would prefer. 1096 auto PrefAlign = CGF.CGM.getDataLayout().getPrefTypeAlignment(Ty); 1097 CharUnits Align = std::max(MinAlign, CharUnits::fromQuantity(PrefAlign)); 1098 1099 return CGF.CreateTempAlloca(Ty, Align); 1100 } 1101 1102 /// EnterStructPointerForCoercedAccess - Given a struct pointer that we are 1103 /// accessing some number of bytes out of it, try to gep into the struct to get 1104 /// at its inner goodness. Dive as deep as possible without entering an element 1105 /// with an in-memory size smaller than DstSize. 1106 static Address 1107 EnterStructPointerForCoercedAccess(Address SrcPtr, 1108 llvm::StructType *SrcSTy, 1109 uint64_t DstSize, CodeGenFunction &CGF) { 1110 // We can't dive into a zero-element struct. 1111 if (SrcSTy->getNumElements() == 0) return SrcPtr; 1112 1113 llvm::Type *FirstElt = SrcSTy->getElementType(0); 1114 1115 // If the first elt is at least as large as what we're looking for, or if the 1116 // first element is the same size as the whole struct, we can enter it. The 1117 // comparison must be made on the store size and not the alloca size. Using 1118 // the alloca size may overstate the size of the load. 1119 uint64_t FirstEltSize = 1120 CGF.CGM.getDataLayout().getTypeStoreSize(FirstElt); 1121 if (FirstEltSize < DstSize && 1122 FirstEltSize < CGF.CGM.getDataLayout().getTypeStoreSize(SrcSTy)) 1123 return SrcPtr; 1124 1125 // GEP into the first element. 1126 SrcPtr = CGF.Builder.CreateStructGEP(SrcPtr, 0, CharUnits(), "coerce.dive"); 1127 1128 // If the first element is a struct, recurse. 1129 llvm::Type *SrcTy = SrcPtr.getElementType(); 1130 if (llvm::StructType *SrcSTy = dyn_cast<llvm::StructType>(SrcTy)) 1131 return EnterStructPointerForCoercedAccess(SrcPtr, SrcSTy, DstSize, CGF); 1132 1133 return SrcPtr; 1134 } 1135 1136 /// CoerceIntOrPtrToIntOrPtr - Convert a value Val to the specific Ty where both 1137 /// are either integers or pointers. This does a truncation of the value if it 1138 /// is too large or a zero extension if it is too small. 1139 /// 1140 /// This behaves as if the value were coerced through memory, so on big-endian 1141 /// targets the high bits are preserved in a truncation, while little-endian 1142 /// targets preserve the low bits. 1143 static llvm::Value *CoerceIntOrPtrToIntOrPtr(llvm::Value *Val, 1144 llvm::Type *Ty, 1145 CodeGenFunction &CGF) { 1146 if (Val->getType() == Ty) 1147 return Val; 1148 1149 if (isa<llvm::PointerType>(Val->getType())) { 1150 // If this is Pointer->Pointer avoid conversion to and from int. 1151 if (isa<llvm::PointerType>(Ty)) 1152 return CGF.Builder.CreateBitCast(Val, Ty, "coerce.val"); 1153 1154 // Convert the pointer to an integer so we can play with its width. 1155 Val = CGF.Builder.CreatePtrToInt(Val, CGF.IntPtrTy, "coerce.val.pi"); 1156 } 1157 1158 llvm::Type *DestIntTy = Ty; 1159 if (isa<llvm::PointerType>(DestIntTy)) 1160 DestIntTy = CGF.IntPtrTy; 1161 1162 if (Val->getType() != DestIntTy) { 1163 const llvm::DataLayout &DL = CGF.CGM.getDataLayout(); 1164 if (DL.isBigEndian()) { 1165 // Preserve the high bits on big-endian targets. 1166 // That is what memory coercion does. 1167 uint64_t SrcSize = DL.getTypeSizeInBits(Val->getType()); 1168 uint64_t DstSize = DL.getTypeSizeInBits(DestIntTy); 1169 1170 if (SrcSize > DstSize) { 1171 Val = CGF.Builder.CreateLShr(Val, SrcSize - DstSize, "coerce.highbits"); 1172 Val = CGF.Builder.CreateTrunc(Val, DestIntTy, "coerce.val.ii"); 1173 } else { 1174 Val = CGF.Builder.CreateZExt(Val, DestIntTy, "coerce.val.ii"); 1175 Val = CGF.Builder.CreateShl(Val, DstSize - SrcSize, "coerce.highbits"); 1176 } 1177 } else { 1178 // Little-endian targets preserve the low bits. No shifts required. 1179 Val = CGF.Builder.CreateIntCast(Val, DestIntTy, false, "coerce.val.ii"); 1180 } 1181 } 1182 1183 if (isa<llvm::PointerType>(Ty)) 1184 Val = CGF.Builder.CreateIntToPtr(Val, Ty, "coerce.val.ip"); 1185 return Val; 1186 } 1187 1188 1189 1190 /// CreateCoercedLoad - Create a load from \arg SrcPtr interpreted as 1191 /// a pointer to an object of type \arg Ty, known to be aligned to 1192 /// \arg SrcAlign bytes. 1193 /// 1194 /// This safely handles the case when the src type is smaller than the 1195 /// destination type; in this situation the values of bits which not 1196 /// present in the src are undefined. 1197 static llvm::Value *CreateCoercedLoad(Address Src, llvm::Type *Ty, 1198 CodeGenFunction &CGF) { 1199 llvm::Type *SrcTy = Src.getElementType(); 1200 1201 // If SrcTy and Ty are the same, just do a load. 1202 if (SrcTy == Ty) 1203 return CGF.Builder.CreateLoad(Src); 1204 1205 uint64_t DstSize = CGF.CGM.getDataLayout().getTypeAllocSize(Ty); 1206 1207 if (llvm::StructType *SrcSTy = dyn_cast<llvm::StructType>(SrcTy)) { 1208 Src = EnterStructPointerForCoercedAccess(Src, SrcSTy, DstSize, CGF); 1209 SrcTy = Src.getType()->getElementType(); 1210 } 1211 1212 uint64_t SrcSize = CGF.CGM.getDataLayout().getTypeAllocSize(SrcTy); 1213 1214 // If the source and destination are integer or pointer types, just do an 1215 // extension or truncation to the desired type. 1216 if ((isa<llvm::IntegerType>(Ty) || isa<llvm::PointerType>(Ty)) && 1217 (isa<llvm::IntegerType>(SrcTy) || isa<llvm::PointerType>(SrcTy))) { 1218 llvm::Value *Load = CGF.Builder.CreateLoad(Src); 1219 return CoerceIntOrPtrToIntOrPtr(Load, Ty, CGF); 1220 } 1221 1222 // If load is legal, just bitcast the src pointer. 1223 if (SrcSize >= DstSize) { 1224 // Generally SrcSize is never greater than DstSize, since this means we are 1225 // losing bits. However, this can happen in cases where the structure has 1226 // additional padding, for example due to a user specified alignment. 1227 // 1228 // FIXME: Assert that we aren't truncating non-padding bits when have access 1229 // to that information. 1230 Src = CGF.Builder.CreateBitCast(Src, 1231 Ty->getPointerTo(Src.getAddressSpace())); 1232 return CGF.Builder.CreateLoad(Src); 1233 } 1234 1235 // Otherwise do coercion through memory. This is stupid, but simple. 1236 Address Tmp = CreateTempAllocaForCoercion(CGF, Ty, Src.getAlignment()); 1237 Address Casted = CGF.Builder.CreateBitCast(Tmp, CGF.AllocaInt8PtrTy); 1238 Address SrcCasted = CGF.Builder.CreateBitCast(Src, CGF.AllocaInt8PtrTy); 1239 CGF.Builder.CreateMemCpy(Casted, SrcCasted, 1240 llvm::ConstantInt::get(CGF.IntPtrTy, SrcSize), 1241 false); 1242 return CGF.Builder.CreateLoad(Tmp); 1243 } 1244 1245 // Function to store a first-class aggregate into memory. We prefer to 1246 // store the elements rather than the aggregate to be more friendly to 1247 // fast-isel. 1248 // FIXME: Do we need to recurse here? 1249 static void BuildAggStore(CodeGenFunction &CGF, llvm::Value *Val, 1250 Address Dest, bool DestIsVolatile) { 1251 // Prefer scalar stores to first-class aggregate stores. 1252 if (llvm::StructType *STy = 1253 dyn_cast<llvm::StructType>(Val->getType())) { 1254 const llvm::StructLayout *Layout = 1255 CGF.CGM.getDataLayout().getStructLayout(STy); 1256 1257 for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) { 1258 auto EltOffset = CharUnits::fromQuantity(Layout->getElementOffset(i)); 1259 Address EltPtr = CGF.Builder.CreateStructGEP(Dest, i, EltOffset); 1260 llvm::Value *Elt = CGF.Builder.CreateExtractValue(Val, i); 1261 CGF.Builder.CreateStore(Elt, EltPtr, DestIsVolatile); 1262 } 1263 } else { 1264 CGF.Builder.CreateStore(Val, Dest, DestIsVolatile); 1265 } 1266 } 1267 1268 /// CreateCoercedStore - Create a store to \arg DstPtr from \arg Src, 1269 /// where the source and destination may have different types. The 1270 /// destination is known to be aligned to \arg DstAlign bytes. 1271 /// 1272 /// This safely handles the case when the src type is larger than the 1273 /// destination type; the upper bits of the src will be lost. 1274 static void CreateCoercedStore(llvm::Value *Src, 1275 Address Dst, 1276 bool DstIsVolatile, 1277 CodeGenFunction &CGF) { 1278 llvm::Type *SrcTy = Src->getType(); 1279 llvm::Type *DstTy = Dst.getType()->getElementType(); 1280 if (SrcTy == DstTy) { 1281 CGF.Builder.CreateStore(Src, Dst, DstIsVolatile); 1282 return; 1283 } 1284 1285 uint64_t SrcSize = CGF.CGM.getDataLayout().getTypeAllocSize(SrcTy); 1286 1287 if (llvm::StructType *DstSTy = dyn_cast<llvm::StructType>(DstTy)) { 1288 Dst = EnterStructPointerForCoercedAccess(Dst, DstSTy, SrcSize, CGF); 1289 DstTy = Dst.getType()->getElementType(); 1290 } 1291 1292 // If the source and destination are integer or pointer types, just do an 1293 // extension or truncation to the desired type. 1294 if ((isa<llvm::IntegerType>(SrcTy) || isa<llvm::PointerType>(SrcTy)) && 1295 (isa<llvm::IntegerType>(DstTy) || isa<llvm::PointerType>(DstTy))) { 1296 Src = CoerceIntOrPtrToIntOrPtr(Src, DstTy, CGF); 1297 CGF.Builder.CreateStore(Src, Dst, DstIsVolatile); 1298 return; 1299 } 1300 1301 uint64_t DstSize = CGF.CGM.getDataLayout().getTypeAllocSize(DstTy); 1302 1303 // If store is legal, just bitcast the src pointer. 1304 if (SrcSize <= DstSize) { 1305 Dst = CGF.Builder.CreateElementBitCast(Dst, SrcTy); 1306 BuildAggStore(CGF, Src, Dst, DstIsVolatile); 1307 } else { 1308 // Otherwise do coercion through memory. This is stupid, but 1309 // simple. 1310 1311 // Generally SrcSize is never greater than DstSize, since this means we are 1312 // losing bits. However, this can happen in cases where the structure has 1313 // additional padding, for example due to a user specified alignment. 1314 // 1315 // FIXME: Assert that we aren't truncating non-padding bits when have access 1316 // to that information. 1317 Address Tmp = CreateTempAllocaForCoercion(CGF, SrcTy, Dst.getAlignment()); 1318 CGF.Builder.CreateStore(Src, Tmp); 1319 Address Casted = CGF.Builder.CreateBitCast(Tmp, CGF.AllocaInt8PtrTy); 1320 Address DstCasted = CGF.Builder.CreateBitCast(Dst, CGF.AllocaInt8PtrTy); 1321 CGF.Builder.CreateMemCpy(DstCasted, Casted, 1322 llvm::ConstantInt::get(CGF.IntPtrTy, DstSize), 1323 false); 1324 } 1325 } 1326 1327 static Address emitAddressAtOffset(CodeGenFunction &CGF, Address addr, 1328 const ABIArgInfo &info) { 1329 if (unsigned offset = info.getDirectOffset()) { 1330 addr = CGF.Builder.CreateElementBitCast(addr, CGF.Int8Ty); 1331 addr = CGF.Builder.CreateConstInBoundsByteGEP(addr, 1332 CharUnits::fromQuantity(offset)); 1333 addr = CGF.Builder.CreateElementBitCast(addr, info.getCoerceToType()); 1334 } 1335 return addr; 1336 } 1337 1338 namespace { 1339 1340 /// Encapsulates information about the way function arguments from 1341 /// CGFunctionInfo should be passed to actual LLVM IR function. 1342 class ClangToLLVMArgMapping { 1343 static const unsigned InvalidIndex = ~0U; 1344 unsigned InallocaArgNo; 1345 unsigned SRetArgNo; 1346 unsigned TotalIRArgs; 1347 1348 /// Arguments of LLVM IR function corresponding to single Clang argument. 1349 struct IRArgs { 1350 unsigned PaddingArgIndex; 1351 // Argument is expanded to IR arguments at positions 1352 // [FirstArgIndex, FirstArgIndex + NumberOfArgs). 1353 unsigned FirstArgIndex; 1354 unsigned NumberOfArgs; 1355 1356 IRArgs() 1357 : PaddingArgIndex(InvalidIndex), FirstArgIndex(InvalidIndex), 1358 NumberOfArgs(0) {} 1359 }; 1360 1361 SmallVector<IRArgs, 8> ArgInfo; 1362 1363 public: 1364 ClangToLLVMArgMapping(const ASTContext &Context, const CGFunctionInfo &FI, 1365 bool OnlyRequiredArgs = false) 1366 : InallocaArgNo(InvalidIndex), SRetArgNo(InvalidIndex), TotalIRArgs(0), 1367 ArgInfo(OnlyRequiredArgs ? FI.getNumRequiredArgs() : FI.arg_size()) { 1368 construct(Context, FI, OnlyRequiredArgs); 1369 } 1370 1371 bool hasInallocaArg() const { return InallocaArgNo != InvalidIndex; } 1372 unsigned getInallocaArgNo() const { 1373 assert(hasInallocaArg()); 1374 return InallocaArgNo; 1375 } 1376 1377 bool hasSRetArg() const { return SRetArgNo != InvalidIndex; } 1378 unsigned getSRetArgNo() const { 1379 assert(hasSRetArg()); 1380 return SRetArgNo; 1381 } 1382 1383 unsigned totalIRArgs() const { return TotalIRArgs; } 1384 1385 bool hasPaddingArg(unsigned ArgNo) const { 1386 assert(ArgNo < ArgInfo.size()); 1387 return ArgInfo[ArgNo].PaddingArgIndex != InvalidIndex; 1388 } 1389 unsigned getPaddingArgNo(unsigned ArgNo) const { 1390 assert(hasPaddingArg(ArgNo)); 1391 return ArgInfo[ArgNo].PaddingArgIndex; 1392 } 1393 1394 /// Returns index of first IR argument corresponding to ArgNo, and their 1395 /// quantity. 1396 std::pair<unsigned, unsigned> getIRArgs(unsigned ArgNo) const { 1397 assert(ArgNo < ArgInfo.size()); 1398 return std::make_pair(ArgInfo[ArgNo].FirstArgIndex, 1399 ArgInfo[ArgNo].NumberOfArgs); 1400 } 1401 1402 private: 1403 void construct(const ASTContext &Context, const CGFunctionInfo &FI, 1404 bool OnlyRequiredArgs); 1405 }; 1406 1407 void ClangToLLVMArgMapping::construct(const ASTContext &Context, 1408 const CGFunctionInfo &FI, 1409 bool OnlyRequiredArgs) { 1410 unsigned IRArgNo = 0; 1411 bool SwapThisWithSRet = false; 1412 const ABIArgInfo &RetAI = FI.getReturnInfo(); 1413 1414 if (RetAI.getKind() == ABIArgInfo::Indirect) { 1415 SwapThisWithSRet = RetAI.isSRetAfterThis(); 1416 SRetArgNo = SwapThisWithSRet ? 1 : IRArgNo++; 1417 } 1418 1419 unsigned ArgNo = 0; 1420 unsigned NumArgs = OnlyRequiredArgs ? FI.getNumRequiredArgs() : FI.arg_size(); 1421 for (CGFunctionInfo::const_arg_iterator I = FI.arg_begin(); ArgNo < NumArgs; 1422 ++I, ++ArgNo) { 1423 assert(I != FI.arg_end()); 1424 QualType ArgType = I->type; 1425 const ABIArgInfo &AI = I->info; 1426 // Collect data about IR arguments corresponding to Clang argument ArgNo. 1427 auto &IRArgs = ArgInfo[ArgNo]; 1428 1429 if (AI.getPaddingType()) 1430 IRArgs.PaddingArgIndex = IRArgNo++; 1431 1432 switch (AI.getKind()) { 1433 case ABIArgInfo::Extend: 1434 case ABIArgInfo::Direct: { 1435 // FIXME: handle sseregparm someday... 1436 llvm::StructType *STy = dyn_cast<llvm::StructType>(AI.getCoerceToType()); 1437 if (AI.isDirect() && AI.getCanBeFlattened() && STy) { 1438 IRArgs.NumberOfArgs = STy->getNumElements(); 1439 } else { 1440 IRArgs.NumberOfArgs = 1; 1441 } 1442 break; 1443 } 1444 case ABIArgInfo::Indirect: 1445 IRArgs.NumberOfArgs = 1; 1446 break; 1447 case ABIArgInfo::Ignore: 1448 case ABIArgInfo::InAlloca: 1449 // ignore and inalloca doesn't have matching LLVM parameters. 1450 IRArgs.NumberOfArgs = 0; 1451 break; 1452 case ABIArgInfo::CoerceAndExpand: 1453 IRArgs.NumberOfArgs = AI.getCoerceAndExpandTypeSequence().size(); 1454 break; 1455 case ABIArgInfo::Expand: 1456 IRArgs.NumberOfArgs = getExpansionSize(ArgType, Context); 1457 break; 1458 } 1459 1460 if (IRArgs.NumberOfArgs > 0) { 1461 IRArgs.FirstArgIndex = IRArgNo; 1462 IRArgNo += IRArgs.NumberOfArgs; 1463 } 1464 1465 // Skip over the sret parameter when it comes second. We already handled it 1466 // above. 1467 if (IRArgNo == 1 && SwapThisWithSRet) 1468 IRArgNo++; 1469 } 1470 assert(ArgNo == ArgInfo.size()); 1471 1472 if (FI.usesInAlloca()) 1473 InallocaArgNo = IRArgNo++; 1474 1475 TotalIRArgs = IRArgNo; 1476 } 1477 } // namespace 1478 1479 /***/ 1480 1481 bool CodeGenModule::ReturnTypeUsesSRet(const CGFunctionInfo &FI) { 1482 return FI.getReturnInfo().isIndirect(); 1483 } 1484 1485 bool CodeGenModule::ReturnSlotInterferesWithArgs(const CGFunctionInfo &FI) { 1486 return ReturnTypeUsesSRet(FI) && 1487 getTargetCodeGenInfo().doesReturnSlotInterfereWithArgs(); 1488 } 1489 1490 bool CodeGenModule::ReturnTypeUsesFPRet(QualType ResultType) { 1491 if (const BuiltinType *BT = ResultType->getAs<BuiltinType>()) { 1492 switch (BT->getKind()) { 1493 default: 1494 return false; 1495 case BuiltinType::Float: 1496 return getTarget().useObjCFPRetForRealType(TargetInfo::Float); 1497 case BuiltinType::Double: 1498 return getTarget().useObjCFPRetForRealType(TargetInfo::Double); 1499 case BuiltinType::LongDouble: 1500 return getTarget().useObjCFPRetForRealType(TargetInfo::LongDouble); 1501 } 1502 } 1503 1504 return false; 1505 } 1506 1507 bool CodeGenModule::ReturnTypeUsesFP2Ret(QualType ResultType) { 1508 if (const ComplexType *CT = ResultType->getAs<ComplexType>()) { 1509 if (const BuiltinType *BT = CT->getElementType()->getAs<BuiltinType>()) { 1510 if (BT->getKind() == BuiltinType::LongDouble) 1511 return getTarget().useObjCFP2RetForComplexLongDouble(); 1512 } 1513 } 1514 1515 return false; 1516 } 1517 1518 llvm::FunctionType *CodeGenTypes::GetFunctionType(GlobalDecl GD) { 1519 const CGFunctionInfo &FI = arrangeGlobalDeclaration(GD); 1520 return GetFunctionType(FI); 1521 } 1522 1523 llvm::FunctionType * 1524 CodeGenTypes::GetFunctionType(const CGFunctionInfo &FI) { 1525 1526 bool Inserted = FunctionsBeingProcessed.insert(&FI).second; 1527 (void)Inserted; 1528 assert(Inserted && "Recursively being processed?"); 1529 1530 llvm::Type *resultType = nullptr; 1531 const ABIArgInfo &retAI = FI.getReturnInfo(); 1532 switch (retAI.getKind()) { 1533 case ABIArgInfo::Expand: 1534 llvm_unreachable("Invalid ABI kind for return argument"); 1535 1536 case ABIArgInfo::Extend: 1537 case ABIArgInfo::Direct: 1538 resultType = retAI.getCoerceToType(); 1539 break; 1540 1541 case ABIArgInfo::InAlloca: 1542 if (retAI.getInAllocaSRet()) { 1543 // sret things on win32 aren't void, they return the sret pointer. 1544 QualType ret = FI.getReturnType(); 1545 llvm::Type *ty = ConvertType(ret); 1546 unsigned addressSpace = Context.getTargetAddressSpace(ret); 1547 resultType = llvm::PointerType::get(ty, addressSpace); 1548 } else { 1549 resultType = llvm::Type::getVoidTy(getLLVMContext()); 1550 } 1551 break; 1552 1553 case ABIArgInfo::Indirect: 1554 case ABIArgInfo::Ignore: 1555 resultType = llvm::Type::getVoidTy(getLLVMContext()); 1556 break; 1557 1558 case ABIArgInfo::CoerceAndExpand: 1559 resultType = retAI.getUnpaddedCoerceAndExpandType(); 1560 break; 1561 } 1562 1563 ClangToLLVMArgMapping IRFunctionArgs(getContext(), FI, true); 1564 SmallVector<llvm::Type*, 8> ArgTypes(IRFunctionArgs.totalIRArgs()); 1565 1566 // Add type for sret argument. 1567 if (IRFunctionArgs.hasSRetArg()) { 1568 QualType Ret = FI.getReturnType(); 1569 llvm::Type *Ty = ConvertType(Ret); 1570 unsigned AddressSpace = Context.getTargetAddressSpace(Ret); 1571 ArgTypes[IRFunctionArgs.getSRetArgNo()] = 1572 llvm::PointerType::get(Ty, AddressSpace); 1573 } 1574 1575 // Add type for inalloca argument. 1576 if (IRFunctionArgs.hasInallocaArg()) { 1577 auto ArgStruct = FI.getArgStruct(); 1578 assert(ArgStruct); 1579 ArgTypes[IRFunctionArgs.getInallocaArgNo()] = ArgStruct->getPointerTo(); 1580 } 1581 1582 // Add in all of the required arguments. 1583 unsigned ArgNo = 0; 1584 CGFunctionInfo::const_arg_iterator it = FI.arg_begin(), 1585 ie = it + FI.getNumRequiredArgs(); 1586 for (; it != ie; ++it, ++ArgNo) { 1587 const ABIArgInfo &ArgInfo = it->info; 1588 1589 // Insert a padding type to ensure proper alignment. 1590 if (IRFunctionArgs.hasPaddingArg(ArgNo)) 1591 ArgTypes[IRFunctionArgs.getPaddingArgNo(ArgNo)] = 1592 ArgInfo.getPaddingType(); 1593 1594 unsigned FirstIRArg, NumIRArgs; 1595 std::tie(FirstIRArg, NumIRArgs) = IRFunctionArgs.getIRArgs(ArgNo); 1596 1597 switch (ArgInfo.getKind()) { 1598 case ABIArgInfo::Ignore: 1599 case ABIArgInfo::InAlloca: 1600 assert(NumIRArgs == 0); 1601 break; 1602 1603 case ABIArgInfo::Indirect: { 1604 assert(NumIRArgs == 1); 1605 // indirect arguments are always on the stack, which is alloca addr space. 1606 llvm::Type *LTy = ConvertTypeForMem(it->type); 1607 ArgTypes[FirstIRArg] = LTy->getPointerTo( 1608 CGM.getDataLayout().getAllocaAddrSpace()); 1609 break; 1610 } 1611 1612 case ABIArgInfo::Extend: 1613 case ABIArgInfo::Direct: { 1614 // Fast-isel and the optimizer generally like scalar values better than 1615 // FCAs, so we flatten them if this is safe to do for this argument. 1616 llvm::Type *argType = ArgInfo.getCoerceToType(); 1617 llvm::StructType *st = dyn_cast<llvm::StructType>(argType); 1618 if (st && ArgInfo.isDirect() && ArgInfo.getCanBeFlattened()) { 1619 assert(NumIRArgs == st->getNumElements()); 1620 for (unsigned i = 0, e = st->getNumElements(); i != e; ++i) 1621 ArgTypes[FirstIRArg + i] = st->getElementType(i); 1622 } else { 1623 assert(NumIRArgs == 1); 1624 ArgTypes[FirstIRArg] = argType; 1625 } 1626 break; 1627 } 1628 1629 case ABIArgInfo::CoerceAndExpand: { 1630 auto ArgTypesIter = ArgTypes.begin() + FirstIRArg; 1631 for (auto EltTy : ArgInfo.getCoerceAndExpandTypeSequence()) { 1632 *ArgTypesIter++ = EltTy; 1633 } 1634 assert(ArgTypesIter == ArgTypes.begin() + FirstIRArg + NumIRArgs); 1635 break; 1636 } 1637 1638 case ABIArgInfo::Expand: 1639 auto ArgTypesIter = ArgTypes.begin() + FirstIRArg; 1640 getExpandedTypes(it->type, ArgTypesIter); 1641 assert(ArgTypesIter == ArgTypes.begin() + FirstIRArg + NumIRArgs); 1642 break; 1643 } 1644 } 1645 1646 bool Erased = FunctionsBeingProcessed.erase(&FI); (void)Erased; 1647 assert(Erased && "Not in set?"); 1648 1649 return llvm::FunctionType::get(resultType, ArgTypes, FI.isVariadic()); 1650 } 1651 1652 llvm::Type *CodeGenTypes::GetFunctionTypeForVTable(GlobalDecl GD) { 1653 const CXXMethodDecl *MD = cast<CXXMethodDecl>(GD.getDecl()); 1654 const FunctionProtoType *FPT = MD->getType()->getAs<FunctionProtoType>(); 1655 1656 if (!isFuncTypeConvertible(FPT)) 1657 return llvm::StructType::get(getLLVMContext()); 1658 1659 const CGFunctionInfo *Info; 1660 if (isa<CXXDestructorDecl>(MD)) 1661 Info = 1662 &arrangeCXXStructorDeclaration(MD, getFromDtorType(GD.getDtorType())); 1663 else 1664 Info = &arrangeCXXMethodDeclaration(MD); 1665 return GetFunctionType(*Info); 1666 } 1667 1668 static void AddAttributesFromFunctionProtoType(ASTContext &Ctx, 1669 llvm::AttrBuilder &FuncAttrs, 1670 const FunctionProtoType *FPT) { 1671 if (!FPT) 1672 return; 1673 1674 if (!isUnresolvedExceptionSpec(FPT->getExceptionSpecType()) && 1675 FPT->isNothrow(Ctx)) 1676 FuncAttrs.addAttribute(llvm::Attribute::NoUnwind); 1677 } 1678 1679 void CodeGenModule::ConstructDefaultFnAttrList(StringRef Name, bool HasOptnone, 1680 bool AttrOnCallSite, 1681 llvm::AttrBuilder &FuncAttrs) { 1682 // OptimizeNoneAttr takes precedence over -Os or -Oz. No warning needed. 1683 if (!HasOptnone) { 1684 if (CodeGenOpts.OptimizeSize) 1685 FuncAttrs.addAttribute(llvm::Attribute::OptimizeForSize); 1686 if (CodeGenOpts.OptimizeSize == 2) 1687 FuncAttrs.addAttribute(llvm::Attribute::MinSize); 1688 } 1689 1690 if (CodeGenOpts.DisableRedZone) 1691 FuncAttrs.addAttribute(llvm::Attribute::NoRedZone); 1692 if (CodeGenOpts.NoImplicitFloat) 1693 FuncAttrs.addAttribute(llvm::Attribute::NoImplicitFloat); 1694 1695 if (AttrOnCallSite) { 1696 // Attributes that should go on the call site only. 1697 if (!CodeGenOpts.SimplifyLibCalls || 1698 CodeGenOpts.isNoBuiltinFunc(Name.data())) 1699 FuncAttrs.addAttribute(llvm::Attribute::NoBuiltin); 1700 if (!CodeGenOpts.TrapFuncName.empty()) 1701 FuncAttrs.addAttribute("trap-func-name", CodeGenOpts.TrapFuncName); 1702 } else { 1703 // Attributes that should go on the function, but not the call site. 1704 if (!CodeGenOpts.DisableFPElim) { 1705 FuncAttrs.addAttribute("no-frame-pointer-elim", "false"); 1706 } else if (CodeGenOpts.OmitLeafFramePointer) { 1707 FuncAttrs.addAttribute("no-frame-pointer-elim", "false"); 1708 FuncAttrs.addAttribute("no-frame-pointer-elim-non-leaf"); 1709 } else { 1710 FuncAttrs.addAttribute("no-frame-pointer-elim", "true"); 1711 FuncAttrs.addAttribute("no-frame-pointer-elim-non-leaf"); 1712 } 1713 1714 FuncAttrs.addAttribute("less-precise-fpmad", 1715 llvm::toStringRef(CodeGenOpts.LessPreciseFPMAD)); 1716 1717 if (!CodeGenOpts.FPDenormalMode.empty()) 1718 FuncAttrs.addAttribute("denormal-fp-math", CodeGenOpts.FPDenormalMode); 1719 1720 FuncAttrs.addAttribute("no-trapping-math", 1721 llvm::toStringRef(CodeGenOpts.NoTrappingMath)); 1722 1723 // TODO: Are these all needed? 1724 // unsafe/inf/nan/nsz are handled by instruction-level FastMathFlags. 1725 FuncAttrs.addAttribute("no-infs-fp-math", 1726 llvm::toStringRef(CodeGenOpts.NoInfsFPMath)); 1727 FuncAttrs.addAttribute("no-nans-fp-math", 1728 llvm::toStringRef(CodeGenOpts.NoNaNsFPMath)); 1729 FuncAttrs.addAttribute("unsafe-fp-math", 1730 llvm::toStringRef(CodeGenOpts.UnsafeFPMath)); 1731 FuncAttrs.addAttribute("use-soft-float", 1732 llvm::toStringRef(CodeGenOpts.SoftFloat)); 1733 FuncAttrs.addAttribute("stack-protector-buffer-size", 1734 llvm::utostr(CodeGenOpts.SSPBufferSize)); 1735 FuncAttrs.addAttribute("no-signed-zeros-fp-math", 1736 llvm::toStringRef(CodeGenOpts.NoSignedZeros)); 1737 FuncAttrs.addAttribute( 1738 "correctly-rounded-divide-sqrt-fp-math", 1739 llvm::toStringRef(CodeGenOpts.CorrectlyRoundedDivSqrt)); 1740 1741 // TODO: Reciprocal estimate codegen options should apply to instructions? 1742 const std::vector<std::string> &Recips = CodeGenOpts.Reciprocals; 1743 if (!Recips.empty()) 1744 FuncAttrs.addAttribute("reciprocal-estimates", 1745 llvm::join(Recips, ",")); 1746 1747 if (!CodeGenOpts.PreferVectorWidth.empty() && 1748 CodeGenOpts.PreferVectorWidth != "none") 1749 FuncAttrs.addAttribute("prefer-vector-width", 1750 CodeGenOpts.PreferVectorWidth); 1751 1752 if (CodeGenOpts.StackRealignment) 1753 FuncAttrs.addAttribute("stackrealign"); 1754 if (CodeGenOpts.Backchain) 1755 FuncAttrs.addAttribute("backchain"); 1756 } 1757 1758 if (getLangOpts().assumeFunctionsAreConvergent()) { 1759 // Conservatively, mark all functions and calls in CUDA and OpenCL as 1760 // convergent (meaning, they may call an intrinsically convergent op, such 1761 // as __syncthreads() / barrier(), and so can't have certain optimizations 1762 // applied around them). LLVM will remove this attribute where it safely 1763 // can. 1764 FuncAttrs.addAttribute(llvm::Attribute::Convergent); 1765 } 1766 1767 if (getLangOpts().CUDA && getLangOpts().CUDAIsDevice) { 1768 // Exceptions aren't supported in CUDA device code. 1769 FuncAttrs.addAttribute(llvm::Attribute::NoUnwind); 1770 1771 // Respect -fcuda-flush-denormals-to-zero. 1772 if (getLangOpts().CUDADeviceFlushDenormalsToZero) 1773 FuncAttrs.addAttribute("nvptx-f32ftz", "true"); 1774 } 1775 } 1776 1777 void CodeGenModule::AddDefaultFnAttrs(llvm::Function &F) { 1778 llvm::AttrBuilder FuncAttrs; 1779 ConstructDefaultFnAttrList(F.getName(), 1780 F.hasFnAttribute(llvm::Attribute::OptimizeNone), 1781 /* AttrOnCallsite = */ false, FuncAttrs); 1782 F.addAttributes(llvm::AttributeList::FunctionIndex, FuncAttrs); 1783 } 1784 1785 void CodeGenModule::ConstructAttributeList( 1786 StringRef Name, const CGFunctionInfo &FI, CGCalleeInfo CalleeInfo, 1787 llvm::AttributeList &AttrList, unsigned &CallingConv, bool AttrOnCallSite) { 1788 llvm::AttrBuilder FuncAttrs; 1789 llvm::AttrBuilder RetAttrs; 1790 1791 CallingConv = FI.getEffectiveCallingConvention(); 1792 if (FI.isNoReturn()) 1793 FuncAttrs.addAttribute(llvm::Attribute::NoReturn); 1794 1795 // If we have information about the function prototype, we can learn 1796 // attributes form there. 1797 AddAttributesFromFunctionProtoType(getContext(), FuncAttrs, 1798 CalleeInfo.getCalleeFunctionProtoType()); 1799 1800 const Decl *TargetDecl = CalleeInfo.getCalleeDecl(); 1801 1802 bool HasOptnone = false; 1803 // FIXME: handle sseregparm someday... 1804 if (TargetDecl) { 1805 if (TargetDecl->hasAttr<ReturnsTwiceAttr>()) 1806 FuncAttrs.addAttribute(llvm::Attribute::ReturnsTwice); 1807 if (TargetDecl->hasAttr<NoThrowAttr>()) 1808 FuncAttrs.addAttribute(llvm::Attribute::NoUnwind); 1809 if (TargetDecl->hasAttr<NoReturnAttr>()) 1810 FuncAttrs.addAttribute(llvm::Attribute::NoReturn); 1811 if (TargetDecl->hasAttr<ColdAttr>()) 1812 FuncAttrs.addAttribute(llvm::Attribute::Cold); 1813 if (TargetDecl->hasAttr<NoDuplicateAttr>()) 1814 FuncAttrs.addAttribute(llvm::Attribute::NoDuplicate); 1815 if (TargetDecl->hasAttr<ConvergentAttr>()) 1816 FuncAttrs.addAttribute(llvm::Attribute::Convergent); 1817 1818 if (const FunctionDecl *Fn = dyn_cast<FunctionDecl>(TargetDecl)) { 1819 AddAttributesFromFunctionProtoType( 1820 getContext(), FuncAttrs, Fn->getType()->getAs<FunctionProtoType>()); 1821 // Don't use [[noreturn]] or _Noreturn for a call to a virtual function. 1822 // These attributes are not inherited by overloads. 1823 const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(Fn); 1824 if (Fn->isNoReturn() && !(AttrOnCallSite && MD && MD->isVirtual())) 1825 FuncAttrs.addAttribute(llvm::Attribute::NoReturn); 1826 } 1827 1828 // 'const', 'pure' and 'noalias' attributed functions are also nounwind. 1829 if (TargetDecl->hasAttr<ConstAttr>()) { 1830 FuncAttrs.addAttribute(llvm::Attribute::ReadNone); 1831 FuncAttrs.addAttribute(llvm::Attribute::NoUnwind); 1832 } else if (TargetDecl->hasAttr<PureAttr>()) { 1833 FuncAttrs.addAttribute(llvm::Attribute::ReadOnly); 1834 FuncAttrs.addAttribute(llvm::Attribute::NoUnwind); 1835 } else if (TargetDecl->hasAttr<NoAliasAttr>()) { 1836 FuncAttrs.addAttribute(llvm::Attribute::ArgMemOnly); 1837 FuncAttrs.addAttribute(llvm::Attribute::NoUnwind); 1838 } 1839 if (TargetDecl->hasAttr<RestrictAttr>()) 1840 RetAttrs.addAttribute(llvm::Attribute::NoAlias); 1841 if (TargetDecl->hasAttr<ReturnsNonNullAttr>()) 1842 RetAttrs.addAttribute(llvm::Attribute::NonNull); 1843 if (TargetDecl->hasAttr<AnyX86NoCallerSavedRegistersAttr>()) 1844 FuncAttrs.addAttribute("no_caller_saved_registers"); 1845 1846 HasOptnone = TargetDecl->hasAttr<OptimizeNoneAttr>(); 1847 if (auto *AllocSize = TargetDecl->getAttr<AllocSizeAttr>()) { 1848 Optional<unsigned> NumElemsParam; 1849 // alloc_size args are base-1, 0 means not present. 1850 if (unsigned N = AllocSize->getNumElemsParam()) 1851 NumElemsParam = N - 1; 1852 FuncAttrs.addAllocSizeAttr(AllocSize->getElemSizeParam() - 1, 1853 NumElemsParam); 1854 } 1855 } 1856 1857 ConstructDefaultFnAttrList(Name, HasOptnone, AttrOnCallSite, FuncAttrs); 1858 1859 if (CodeGenOpts.EnableSegmentedStacks && 1860 !(TargetDecl && TargetDecl->hasAttr<NoSplitStackAttr>())) 1861 FuncAttrs.addAttribute("split-stack"); 1862 1863 // Add NonLazyBind attribute to function declarations when -fno-plt 1864 // is used. 1865 if (TargetDecl && CodeGenOpts.NoPLT) { 1866 if (auto *Fn = dyn_cast<FunctionDecl>(TargetDecl)) { 1867 if (!Fn->isDefined() && !AttrOnCallSite) { 1868 FuncAttrs.addAttribute(llvm::Attribute::NonLazyBind); 1869 } 1870 } 1871 } 1872 1873 if (!AttrOnCallSite) { 1874 bool DisableTailCalls = 1875 CodeGenOpts.DisableTailCalls || 1876 (TargetDecl && (TargetDecl->hasAttr<DisableTailCallsAttr>() || 1877 TargetDecl->hasAttr<AnyX86InterruptAttr>())); 1878 FuncAttrs.addAttribute("disable-tail-calls", 1879 llvm::toStringRef(DisableTailCalls)); 1880 GetCPUAndFeaturesAttributes(TargetDecl, FuncAttrs); 1881 } 1882 1883 ClangToLLVMArgMapping IRFunctionArgs(getContext(), FI); 1884 1885 QualType RetTy = FI.getReturnType(); 1886 const ABIArgInfo &RetAI = FI.getReturnInfo(); 1887 switch (RetAI.getKind()) { 1888 case ABIArgInfo::Extend: 1889 if (RetAI.isSignExt()) 1890 RetAttrs.addAttribute(llvm::Attribute::SExt); 1891 else 1892 RetAttrs.addAttribute(llvm::Attribute::ZExt); 1893 LLVM_FALLTHROUGH; 1894 case ABIArgInfo::Direct: 1895 if (RetAI.getInReg()) 1896 RetAttrs.addAttribute(llvm::Attribute::InReg); 1897 break; 1898 case ABIArgInfo::Ignore: 1899 break; 1900 1901 case ABIArgInfo::InAlloca: 1902 case ABIArgInfo::Indirect: { 1903 // inalloca and sret disable readnone and readonly 1904 FuncAttrs.removeAttribute(llvm::Attribute::ReadOnly) 1905 .removeAttribute(llvm::Attribute::ReadNone); 1906 break; 1907 } 1908 1909 case ABIArgInfo::CoerceAndExpand: 1910 break; 1911 1912 case ABIArgInfo::Expand: 1913 llvm_unreachable("Invalid ABI kind for return argument"); 1914 } 1915 1916 if (const auto *RefTy = RetTy->getAs<ReferenceType>()) { 1917 QualType PTy = RefTy->getPointeeType(); 1918 if (!PTy->isIncompleteType() && PTy->isConstantSizeType()) 1919 RetAttrs.addDereferenceableAttr(getContext().getTypeSizeInChars(PTy) 1920 .getQuantity()); 1921 else if (getContext().getTargetAddressSpace(PTy) == 0) 1922 RetAttrs.addAttribute(llvm::Attribute::NonNull); 1923 } 1924 1925 bool hasUsedSRet = false; 1926 SmallVector<llvm::AttributeSet, 4> ArgAttrs(IRFunctionArgs.totalIRArgs()); 1927 1928 // Attach attributes to sret. 1929 if (IRFunctionArgs.hasSRetArg()) { 1930 llvm::AttrBuilder SRETAttrs; 1931 SRETAttrs.addAttribute(llvm::Attribute::StructRet); 1932 hasUsedSRet = true; 1933 if (RetAI.getInReg()) 1934 SRETAttrs.addAttribute(llvm::Attribute::InReg); 1935 ArgAttrs[IRFunctionArgs.getSRetArgNo()] = 1936 llvm::AttributeSet::get(getLLVMContext(), SRETAttrs); 1937 } 1938 1939 // Attach attributes to inalloca argument. 1940 if (IRFunctionArgs.hasInallocaArg()) { 1941 llvm::AttrBuilder Attrs; 1942 Attrs.addAttribute(llvm::Attribute::InAlloca); 1943 ArgAttrs[IRFunctionArgs.getInallocaArgNo()] = 1944 llvm::AttributeSet::get(getLLVMContext(), Attrs); 1945 } 1946 1947 unsigned ArgNo = 0; 1948 for (CGFunctionInfo::const_arg_iterator I = FI.arg_begin(), 1949 E = FI.arg_end(); 1950 I != E; ++I, ++ArgNo) { 1951 QualType ParamType = I->type; 1952 const ABIArgInfo &AI = I->info; 1953 llvm::AttrBuilder Attrs; 1954 1955 // Add attribute for padding argument, if necessary. 1956 if (IRFunctionArgs.hasPaddingArg(ArgNo)) { 1957 if (AI.getPaddingInReg()) { 1958 ArgAttrs[IRFunctionArgs.getPaddingArgNo(ArgNo)] = 1959 llvm::AttributeSet::get( 1960 getLLVMContext(), 1961 llvm::AttrBuilder().addAttribute(llvm::Attribute::InReg)); 1962 } 1963 } 1964 1965 // 'restrict' -> 'noalias' is done in EmitFunctionProlog when we 1966 // have the corresponding parameter variable. It doesn't make 1967 // sense to do it here because parameters are so messed up. 1968 switch (AI.getKind()) { 1969 case ABIArgInfo::Extend: 1970 if (AI.isSignExt()) 1971 Attrs.addAttribute(llvm::Attribute::SExt); 1972 else 1973 Attrs.addAttribute(llvm::Attribute::ZExt); 1974 LLVM_FALLTHROUGH; 1975 case ABIArgInfo::Direct: 1976 if (ArgNo == 0 && FI.isChainCall()) 1977 Attrs.addAttribute(llvm::Attribute::Nest); 1978 else if (AI.getInReg()) 1979 Attrs.addAttribute(llvm::Attribute::InReg); 1980 break; 1981 1982 case ABIArgInfo::Indirect: { 1983 if (AI.getInReg()) 1984 Attrs.addAttribute(llvm::Attribute::InReg); 1985 1986 if (AI.getIndirectByVal()) 1987 Attrs.addAttribute(llvm::Attribute::ByVal); 1988 1989 CharUnits Align = AI.getIndirectAlign(); 1990 1991 // In a byval argument, it is important that the required 1992 // alignment of the type is honored, as LLVM might be creating a 1993 // *new* stack object, and needs to know what alignment to give 1994 // it. (Sometimes it can deduce a sensible alignment on its own, 1995 // but not if clang decides it must emit a packed struct, or the 1996 // user specifies increased alignment requirements.) 1997 // 1998 // This is different from indirect *not* byval, where the object 1999 // exists already, and the align attribute is purely 2000 // informative. 2001 assert(!Align.isZero()); 2002 2003 // For now, only add this when we have a byval argument. 2004 // TODO: be less lazy about updating test cases. 2005 if (AI.getIndirectByVal()) 2006 Attrs.addAlignmentAttr(Align.getQuantity()); 2007 2008 // byval disables readnone and readonly. 2009 FuncAttrs.removeAttribute(llvm::Attribute::ReadOnly) 2010 .removeAttribute(llvm::Attribute::ReadNone); 2011 break; 2012 } 2013 case ABIArgInfo::Ignore: 2014 case ABIArgInfo::Expand: 2015 case ABIArgInfo::CoerceAndExpand: 2016 break; 2017 2018 case ABIArgInfo::InAlloca: 2019 // inalloca disables readnone and readonly. 2020 FuncAttrs.removeAttribute(llvm::Attribute::ReadOnly) 2021 .removeAttribute(llvm::Attribute::ReadNone); 2022 continue; 2023 } 2024 2025 if (const auto *RefTy = ParamType->getAs<ReferenceType>()) { 2026 QualType PTy = RefTy->getPointeeType(); 2027 if (!PTy->isIncompleteType() && PTy->isConstantSizeType()) 2028 Attrs.addDereferenceableAttr(getContext().getTypeSizeInChars(PTy) 2029 .getQuantity()); 2030 else if (getContext().getTargetAddressSpace(PTy) == 0) 2031 Attrs.addAttribute(llvm::Attribute::NonNull); 2032 } 2033 2034 switch (FI.getExtParameterInfo(ArgNo).getABI()) { 2035 case ParameterABI::Ordinary: 2036 break; 2037 2038 case ParameterABI::SwiftIndirectResult: { 2039 // Add 'sret' if we haven't already used it for something, but 2040 // only if the result is void. 2041 if (!hasUsedSRet && RetTy->isVoidType()) { 2042 Attrs.addAttribute(llvm::Attribute::StructRet); 2043 hasUsedSRet = true; 2044 } 2045 2046 // Add 'noalias' in either case. 2047 Attrs.addAttribute(llvm::Attribute::NoAlias); 2048 2049 // Add 'dereferenceable' and 'alignment'. 2050 auto PTy = ParamType->getPointeeType(); 2051 if (!PTy->isIncompleteType() && PTy->isConstantSizeType()) { 2052 auto info = getContext().getTypeInfoInChars(PTy); 2053 Attrs.addDereferenceableAttr(info.first.getQuantity()); 2054 Attrs.addAttribute(llvm::Attribute::getWithAlignment(getLLVMContext(), 2055 info.second.getQuantity())); 2056 } 2057 break; 2058 } 2059 2060 case ParameterABI::SwiftErrorResult: 2061 Attrs.addAttribute(llvm::Attribute::SwiftError); 2062 break; 2063 2064 case ParameterABI::SwiftContext: 2065 Attrs.addAttribute(llvm::Attribute::SwiftSelf); 2066 break; 2067 } 2068 2069 if (FI.getExtParameterInfo(ArgNo).isNoEscape()) 2070 Attrs.addAttribute(llvm::Attribute::NoCapture); 2071 2072 if (Attrs.hasAttributes()) { 2073 unsigned FirstIRArg, NumIRArgs; 2074 std::tie(FirstIRArg, NumIRArgs) = IRFunctionArgs.getIRArgs(ArgNo); 2075 for (unsigned i = 0; i < NumIRArgs; i++) 2076 ArgAttrs[FirstIRArg + i] = 2077 llvm::AttributeSet::get(getLLVMContext(), Attrs); 2078 } 2079 } 2080 assert(ArgNo == FI.arg_size()); 2081 2082 AttrList = llvm::AttributeList::get( 2083 getLLVMContext(), llvm::AttributeSet::get(getLLVMContext(), FuncAttrs), 2084 llvm::AttributeSet::get(getLLVMContext(), RetAttrs), ArgAttrs); 2085 } 2086 2087 /// An argument came in as a promoted argument; demote it back to its 2088 /// declared type. 2089 static llvm::Value *emitArgumentDemotion(CodeGenFunction &CGF, 2090 const VarDecl *var, 2091 llvm::Value *value) { 2092 llvm::Type *varType = CGF.ConvertType(var->getType()); 2093 2094 // This can happen with promotions that actually don't change the 2095 // underlying type, like the enum promotions. 2096 if (value->getType() == varType) return value; 2097 2098 assert((varType->isIntegerTy() || varType->isFloatingPointTy()) 2099 && "unexpected promotion type"); 2100 2101 if (isa<llvm::IntegerType>(varType)) 2102 return CGF.Builder.CreateTrunc(value, varType, "arg.unpromote"); 2103 2104 return CGF.Builder.CreateFPCast(value, varType, "arg.unpromote"); 2105 } 2106 2107 /// Returns the attribute (either parameter attribute, or function 2108 /// attribute), which declares argument ArgNo to be non-null. 2109 static const NonNullAttr *getNonNullAttr(const Decl *FD, const ParmVarDecl *PVD, 2110 QualType ArgType, unsigned ArgNo) { 2111 // FIXME: __attribute__((nonnull)) can also be applied to: 2112 // - references to pointers, where the pointee is known to be 2113 // nonnull (apparently a Clang extension) 2114 // - transparent unions containing pointers 2115 // In the former case, LLVM IR cannot represent the constraint. In 2116 // the latter case, we have no guarantee that the transparent union 2117 // is in fact passed as a pointer. 2118 if (!ArgType->isAnyPointerType() && !ArgType->isBlockPointerType()) 2119 return nullptr; 2120 // First, check attribute on parameter itself. 2121 if (PVD) { 2122 if (auto ParmNNAttr = PVD->getAttr<NonNullAttr>()) 2123 return ParmNNAttr; 2124 } 2125 // Check function attributes. 2126 if (!FD) 2127 return nullptr; 2128 for (const auto *NNAttr : FD->specific_attrs<NonNullAttr>()) { 2129 if (NNAttr->isNonNull(ArgNo)) 2130 return NNAttr; 2131 } 2132 return nullptr; 2133 } 2134 2135 namespace { 2136 struct CopyBackSwiftError final : EHScopeStack::Cleanup { 2137 Address Temp; 2138 Address Arg; 2139 CopyBackSwiftError(Address temp, Address arg) : Temp(temp), Arg(arg) {} 2140 void Emit(CodeGenFunction &CGF, Flags flags) override { 2141 llvm::Value *errorValue = CGF.Builder.CreateLoad(Temp); 2142 CGF.Builder.CreateStore(errorValue, Arg); 2143 } 2144 }; 2145 } 2146 2147 void CodeGenFunction::EmitFunctionProlog(const CGFunctionInfo &FI, 2148 llvm::Function *Fn, 2149 const FunctionArgList &Args) { 2150 if (CurCodeDecl && CurCodeDecl->hasAttr<NakedAttr>()) 2151 // Naked functions don't have prologues. 2152 return; 2153 2154 // If this is an implicit-return-zero function, go ahead and 2155 // initialize the return value. TODO: it might be nice to have 2156 // a more general mechanism for this that didn't require synthesized 2157 // return statements. 2158 if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(CurCodeDecl)) { 2159 if (FD->hasImplicitReturnZero()) { 2160 QualType RetTy = FD->getReturnType().getUnqualifiedType(); 2161 llvm::Type* LLVMTy = CGM.getTypes().ConvertType(RetTy); 2162 llvm::Constant* Zero = llvm::Constant::getNullValue(LLVMTy); 2163 Builder.CreateStore(Zero, ReturnValue); 2164 } 2165 } 2166 2167 // FIXME: We no longer need the types from FunctionArgList; lift up and 2168 // simplify. 2169 2170 ClangToLLVMArgMapping IRFunctionArgs(CGM.getContext(), FI); 2171 // Flattened function arguments. 2172 SmallVector<llvm::Value *, 16> FnArgs; 2173 FnArgs.reserve(IRFunctionArgs.totalIRArgs()); 2174 for (auto &Arg : Fn->args()) { 2175 FnArgs.push_back(&Arg); 2176 } 2177 assert(FnArgs.size() == IRFunctionArgs.totalIRArgs()); 2178 2179 // If we're using inalloca, all the memory arguments are GEPs off of the last 2180 // parameter, which is a pointer to the complete memory area. 2181 Address ArgStruct = Address::invalid(); 2182 const llvm::StructLayout *ArgStructLayout = nullptr; 2183 if (IRFunctionArgs.hasInallocaArg()) { 2184 ArgStructLayout = CGM.getDataLayout().getStructLayout(FI.getArgStruct()); 2185 ArgStruct = Address(FnArgs[IRFunctionArgs.getInallocaArgNo()], 2186 FI.getArgStructAlignment()); 2187 2188 assert(ArgStruct.getType() == FI.getArgStruct()->getPointerTo()); 2189 } 2190 2191 // Name the struct return parameter. 2192 if (IRFunctionArgs.hasSRetArg()) { 2193 auto AI = cast<llvm::Argument>(FnArgs[IRFunctionArgs.getSRetArgNo()]); 2194 AI->setName("agg.result"); 2195 AI->addAttr(llvm::Attribute::NoAlias); 2196 } 2197 2198 // Track if we received the parameter as a pointer (indirect, byval, or 2199 // inalloca). If already have a pointer, EmitParmDecl doesn't need to copy it 2200 // into a local alloca for us. 2201 SmallVector<ParamValue, 16> ArgVals; 2202 ArgVals.reserve(Args.size()); 2203 2204 // Create a pointer value for every parameter declaration. This usually 2205 // entails copying one or more LLVM IR arguments into an alloca. Don't push 2206 // any cleanups or do anything that might unwind. We do that separately, so 2207 // we can push the cleanups in the correct order for the ABI. 2208 assert(FI.arg_size() == Args.size() && 2209 "Mismatch between function signature & arguments."); 2210 unsigned ArgNo = 0; 2211 CGFunctionInfo::const_arg_iterator info_it = FI.arg_begin(); 2212 for (FunctionArgList::const_iterator i = Args.begin(), e = Args.end(); 2213 i != e; ++i, ++info_it, ++ArgNo) { 2214 const VarDecl *Arg = *i; 2215 const ABIArgInfo &ArgI = info_it->info; 2216 2217 bool isPromoted = 2218 isa<ParmVarDecl>(Arg) && cast<ParmVarDecl>(Arg)->isKNRPromoted(); 2219 // We are converting from ABIArgInfo type to VarDecl type directly, unless 2220 // the parameter is promoted. In this case we convert to 2221 // CGFunctionInfo::ArgInfo type with subsequent argument demotion. 2222 QualType Ty = isPromoted ? info_it->type : Arg->getType(); 2223 assert(hasScalarEvaluationKind(Ty) == 2224 hasScalarEvaluationKind(Arg->getType())); 2225 2226 unsigned FirstIRArg, NumIRArgs; 2227 std::tie(FirstIRArg, NumIRArgs) = IRFunctionArgs.getIRArgs(ArgNo); 2228 2229 switch (ArgI.getKind()) { 2230 case ABIArgInfo::InAlloca: { 2231 assert(NumIRArgs == 0); 2232 auto FieldIndex = ArgI.getInAllocaFieldIndex(); 2233 CharUnits FieldOffset = 2234 CharUnits::fromQuantity(ArgStructLayout->getElementOffset(FieldIndex)); 2235 Address V = Builder.CreateStructGEP(ArgStruct, FieldIndex, FieldOffset, 2236 Arg->getName()); 2237 ArgVals.push_back(ParamValue::forIndirect(V)); 2238 break; 2239 } 2240 2241 case ABIArgInfo::Indirect: { 2242 assert(NumIRArgs == 1); 2243 Address ParamAddr = Address(FnArgs[FirstIRArg], ArgI.getIndirectAlign()); 2244 2245 if (!hasScalarEvaluationKind(Ty)) { 2246 // Aggregates and complex variables are accessed by reference. All we 2247 // need to do is realign the value, if requested. 2248 Address V = ParamAddr; 2249 if (ArgI.getIndirectRealign()) { 2250 Address AlignedTemp = CreateMemTemp(Ty, "coerce"); 2251 2252 // Copy from the incoming argument pointer to the temporary with the 2253 // appropriate alignment. 2254 // 2255 // FIXME: We should have a common utility for generating an aggregate 2256 // copy. 2257 CharUnits Size = getContext().getTypeSizeInChars(Ty); 2258 auto SizeVal = llvm::ConstantInt::get(IntPtrTy, Size.getQuantity()); 2259 Address Dst = Builder.CreateBitCast(AlignedTemp, Int8PtrTy); 2260 Address Src = Builder.CreateBitCast(ParamAddr, Int8PtrTy); 2261 Builder.CreateMemCpy(Dst, Src, SizeVal, false); 2262 V = AlignedTemp; 2263 } 2264 ArgVals.push_back(ParamValue::forIndirect(V)); 2265 } else { 2266 // Load scalar value from indirect argument. 2267 llvm::Value *V = 2268 EmitLoadOfScalar(ParamAddr, false, Ty, Arg->getLocStart()); 2269 2270 if (isPromoted) 2271 V = emitArgumentDemotion(*this, Arg, V); 2272 ArgVals.push_back(ParamValue::forDirect(V)); 2273 } 2274 break; 2275 } 2276 2277 case ABIArgInfo::Extend: 2278 case ABIArgInfo::Direct: { 2279 2280 // If we have the trivial case, handle it with no muss and fuss. 2281 if (!isa<llvm::StructType>(ArgI.getCoerceToType()) && 2282 ArgI.getCoerceToType() == ConvertType(Ty) && 2283 ArgI.getDirectOffset() == 0) { 2284 assert(NumIRArgs == 1); 2285 llvm::Value *V = FnArgs[FirstIRArg]; 2286 auto AI = cast<llvm::Argument>(V); 2287 2288 if (const ParmVarDecl *PVD = dyn_cast<ParmVarDecl>(Arg)) { 2289 if (getNonNullAttr(CurCodeDecl, PVD, PVD->getType(), 2290 PVD->getFunctionScopeIndex())) 2291 AI->addAttr(llvm::Attribute::NonNull); 2292 2293 QualType OTy = PVD->getOriginalType(); 2294 if (const auto *ArrTy = 2295 getContext().getAsConstantArrayType(OTy)) { 2296 // A C99 array parameter declaration with the static keyword also 2297 // indicates dereferenceability, and if the size is constant we can 2298 // use the dereferenceable attribute (which requires the size in 2299 // bytes). 2300 if (ArrTy->getSizeModifier() == ArrayType::Static) { 2301 QualType ETy = ArrTy->getElementType(); 2302 uint64_t ArrSize = ArrTy->getSize().getZExtValue(); 2303 if (!ETy->isIncompleteType() && ETy->isConstantSizeType() && 2304 ArrSize) { 2305 llvm::AttrBuilder Attrs; 2306 Attrs.addDereferenceableAttr( 2307 getContext().getTypeSizeInChars(ETy).getQuantity()*ArrSize); 2308 AI->addAttrs(Attrs); 2309 } else if (getContext().getTargetAddressSpace(ETy) == 0) { 2310 AI->addAttr(llvm::Attribute::NonNull); 2311 } 2312 } 2313 } else if (const auto *ArrTy = 2314 getContext().getAsVariableArrayType(OTy)) { 2315 // For C99 VLAs with the static keyword, we don't know the size so 2316 // we can't use the dereferenceable attribute, but in addrspace(0) 2317 // we know that it must be nonnull. 2318 if (ArrTy->getSizeModifier() == VariableArrayType::Static && 2319 !getContext().getTargetAddressSpace(ArrTy->getElementType())) 2320 AI->addAttr(llvm::Attribute::NonNull); 2321 } 2322 2323 const auto *AVAttr = PVD->getAttr<AlignValueAttr>(); 2324 if (!AVAttr) 2325 if (const auto *TOTy = dyn_cast<TypedefType>(OTy)) 2326 AVAttr = TOTy->getDecl()->getAttr<AlignValueAttr>(); 2327 if (AVAttr) { 2328 llvm::Value *AlignmentValue = 2329 EmitScalarExpr(AVAttr->getAlignment()); 2330 llvm::ConstantInt *AlignmentCI = 2331 cast<llvm::ConstantInt>(AlignmentValue); 2332 unsigned Alignment = std::min((unsigned)AlignmentCI->getZExtValue(), 2333 +llvm::Value::MaximumAlignment); 2334 AI->addAttrs(llvm::AttrBuilder().addAlignmentAttr(Alignment)); 2335 } 2336 } 2337 2338 if (Arg->getType().isRestrictQualified()) 2339 AI->addAttr(llvm::Attribute::NoAlias); 2340 2341 // LLVM expects swifterror parameters to be used in very restricted 2342 // ways. Copy the value into a less-restricted temporary. 2343 if (FI.getExtParameterInfo(ArgNo).getABI() 2344 == ParameterABI::SwiftErrorResult) { 2345 QualType pointeeTy = Ty->getPointeeType(); 2346 assert(pointeeTy->isPointerType()); 2347 Address temp = 2348 CreateMemTemp(pointeeTy, getPointerAlign(), "swifterror.temp"); 2349 Address arg = Address(V, getContext().getTypeAlignInChars(pointeeTy)); 2350 llvm::Value *incomingErrorValue = Builder.CreateLoad(arg); 2351 Builder.CreateStore(incomingErrorValue, temp); 2352 V = temp.getPointer(); 2353 2354 // Push a cleanup to copy the value back at the end of the function. 2355 // The convention does not guarantee that the value will be written 2356 // back if the function exits with an unwind exception. 2357 EHStack.pushCleanup<CopyBackSwiftError>(NormalCleanup, temp, arg); 2358 } 2359 2360 // Ensure the argument is the correct type. 2361 if (V->getType() != ArgI.getCoerceToType()) 2362 V = Builder.CreateBitCast(V, ArgI.getCoerceToType()); 2363 2364 if (isPromoted) 2365 V = emitArgumentDemotion(*this, Arg, V); 2366 2367 // Because of merging of function types from multiple decls it is 2368 // possible for the type of an argument to not match the corresponding 2369 // type in the function type. Since we are codegening the callee 2370 // in here, add a cast to the argument type. 2371 llvm::Type *LTy = ConvertType(Arg->getType()); 2372 if (V->getType() != LTy) 2373 V = Builder.CreateBitCast(V, LTy); 2374 2375 ArgVals.push_back(ParamValue::forDirect(V)); 2376 break; 2377 } 2378 2379 Address Alloca = CreateMemTemp(Ty, getContext().getDeclAlign(Arg), 2380 Arg->getName()); 2381 2382 // Pointer to store into. 2383 Address Ptr = emitAddressAtOffset(*this, Alloca, ArgI); 2384 2385 // Fast-isel and the optimizer generally like scalar values better than 2386 // FCAs, so we flatten them if this is safe to do for this argument. 2387 llvm::StructType *STy = dyn_cast<llvm::StructType>(ArgI.getCoerceToType()); 2388 if (ArgI.isDirect() && ArgI.getCanBeFlattened() && STy && 2389 STy->getNumElements() > 1) { 2390 auto SrcLayout = CGM.getDataLayout().getStructLayout(STy); 2391 uint64_t SrcSize = CGM.getDataLayout().getTypeAllocSize(STy); 2392 llvm::Type *DstTy = Ptr.getElementType(); 2393 uint64_t DstSize = CGM.getDataLayout().getTypeAllocSize(DstTy); 2394 2395 Address AddrToStoreInto = Address::invalid(); 2396 if (SrcSize <= DstSize) { 2397 AddrToStoreInto = Builder.CreateElementBitCast(Ptr, STy); 2398 } else { 2399 AddrToStoreInto = 2400 CreateTempAlloca(STy, Alloca.getAlignment(), "coerce"); 2401 } 2402 2403 assert(STy->getNumElements() == NumIRArgs); 2404 for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) { 2405 auto AI = FnArgs[FirstIRArg + i]; 2406 AI->setName(Arg->getName() + ".coerce" + Twine(i)); 2407 auto Offset = CharUnits::fromQuantity(SrcLayout->getElementOffset(i)); 2408 Address EltPtr = 2409 Builder.CreateStructGEP(AddrToStoreInto, i, Offset); 2410 Builder.CreateStore(AI, EltPtr); 2411 } 2412 2413 if (SrcSize > DstSize) { 2414 Builder.CreateMemCpy(Ptr, AddrToStoreInto, DstSize); 2415 } 2416 2417 } else { 2418 // Simple case, just do a coerced store of the argument into the alloca. 2419 assert(NumIRArgs == 1); 2420 auto AI = FnArgs[FirstIRArg]; 2421 AI->setName(Arg->getName() + ".coerce"); 2422 CreateCoercedStore(AI, Ptr, /*DestIsVolatile=*/false, *this); 2423 } 2424 2425 // Match to what EmitParmDecl is expecting for this type. 2426 if (CodeGenFunction::hasScalarEvaluationKind(Ty)) { 2427 llvm::Value *V = 2428 EmitLoadOfScalar(Alloca, false, Ty, Arg->getLocStart()); 2429 if (isPromoted) 2430 V = emitArgumentDemotion(*this, Arg, V); 2431 ArgVals.push_back(ParamValue::forDirect(V)); 2432 } else { 2433 ArgVals.push_back(ParamValue::forIndirect(Alloca)); 2434 } 2435 break; 2436 } 2437 2438 case ABIArgInfo::CoerceAndExpand: { 2439 // Reconstruct into a temporary. 2440 Address alloca = CreateMemTemp(Ty, getContext().getDeclAlign(Arg)); 2441 ArgVals.push_back(ParamValue::forIndirect(alloca)); 2442 2443 auto coercionType = ArgI.getCoerceAndExpandType(); 2444 alloca = Builder.CreateElementBitCast(alloca, coercionType); 2445 auto layout = CGM.getDataLayout().getStructLayout(coercionType); 2446 2447 unsigned argIndex = FirstIRArg; 2448 for (unsigned i = 0, e = coercionType->getNumElements(); i != e; ++i) { 2449 llvm::Type *eltType = coercionType->getElementType(i); 2450 if (ABIArgInfo::isPaddingForCoerceAndExpand(eltType)) 2451 continue; 2452 2453 auto eltAddr = Builder.CreateStructGEP(alloca, i, layout); 2454 auto elt = FnArgs[argIndex++]; 2455 Builder.CreateStore(elt, eltAddr); 2456 } 2457 assert(argIndex == FirstIRArg + NumIRArgs); 2458 break; 2459 } 2460 2461 case ABIArgInfo::Expand: { 2462 // If this structure was expanded into multiple arguments then 2463 // we need to create a temporary and reconstruct it from the 2464 // arguments. 2465 Address Alloca = CreateMemTemp(Ty, getContext().getDeclAlign(Arg)); 2466 LValue LV = MakeAddrLValue(Alloca, Ty); 2467 ArgVals.push_back(ParamValue::forIndirect(Alloca)); 2468 2469 auto FnArgIter = FnArgs.begin() + FirstIRArg; 2470 ExpandTypeFromArgs(Ty, LV, FnArgIter); 2471 assert(FnArgIter == FnArgs.begin() + FirstIRArg + NumIRArgs); 2472 for (unsigned i = 0, e = NumIRArgs; i != e; ++i) { 2473 auto AI = FnArgs[FirstIRArg + i]; 2474 AI->setName(Arg->getName() + "." + Twine(i)); 2475 } 2476 break; 2477 } 2478 2479 case ABIArgInfo::Ignore: 2480 assert(NumIRArgs == 0); 2481 // Initialize the local variable appropriately. 2482 if (!hasScalarEvaluationKind(Ty)) { 2483 ArgVals.push_back(ParamValue::forIndirect(CreateMemTemp(Ty))); 2484 } else { 2485 llvm::Value *U = llvm::UndefValue::get(ConvertType(Arg->getType())); 2486 ArgVals.push_back(ParamValue::forDirect(U)); 2487 } 2488 break; 2489 } 2490 } 2491 2492 if (getTarget().getCXXABI().areArgsDestroyedLeftToRightInCallee()) { 2493 for (int I = Args.size() - 1; I >= 0; --I) 2494 EmitParmDecl(*Args[I], ArgVals[I], I + 1); 2495 } else { 2496 for (unsigned I = 0, E = Args.size(); I != E; ++I) 2497 EmitParmDecl(*Args[I], ArgVals[I], I + 1); 2498 } 2499 } 2500 2501 static void eraseUnusedBitCasts(llvm::Instruction *insn) { 2502 while (insn->use_empty()) { 2503 llvm::BitCastInst *bitcast = dyn_cast<llvm::BitCastInst>(insn); 2504 if (!bitcast) return; 2505 2506 // This is "safe" because we would have used a ConstantExpr otherwise. 2507 insn = cast<llvm::Instruction>(bitcast->getOperand(0)); 2508 bitcast->eraseFromParent(); 2509 } 2510 } 2511 2512 /// Try to emit a fused autorelease of a return result. 2513 static llvm::Value *tryEmitFusedAutoreleaseOfResult(CodeGenFunction &CGF, 2514 llvm::Value *result) { 2515 // We must be immediately followed the cast. 2516 llvm::BasicBlock *BB = CGF.Builder.GetInsertBlock(); 2517 if (BB->empty()) return nullptr; 2518 if (&BB->back() != result) return nullptr; 2519 2520 llvm::Type *resultType = result->getType(); 2521 2522 // result is in a BasicBlock and is therefore an Instruction. 2523 llvm::Instruction *generator = cast<llvm::Instruction>(result); 2524 2525 SmallVector<llvm::Instruction *, 4> InstsToKill; 2526 2527 // Look for: 2528 // %generator = bitcast %type1* %generator2 to %type2* 2529 while (llvm::BitCastInst *bitcast = dyn_cast<llvm::BitCastInst>(generator)) { 2530 // We would have emitted this as a constant if the operand weren't 2531 // an Instruction. 2532 generator = cast<llvm::Instruction>(bitcast->getOperand(0)); 2533 2534 // Require the generator to be immediately followed by the cast. 2535 if (generator->getNextNode() != bitcast) 2536 return nullptr; 2537 2538 InstsToKill.push_back(bitcast); 2539 } 2540 2541 // Look for: 2542 // %generator = call i8* @objc_retain(i8* %originalResult) 2543 // or 2544 // %generator = call i8* @objc_retainAutoreleasedReturnValue(i8* %originalResult) 2545 llvm::CallInst *call = dyn_cast<llvm::CallInst>(generator); 2546 if (!call) return nullptr; 2547 2548 bool doRetainAutorelease; 2549 2550 if (call->getCalledValue() == CGF.CGM.getObjCEntrypoints().objc_retain) { 2551 doRetainAutorelease = true; 2552 } else if (call->getCalledValue() == CGF.CGM.getObjCEntrypoints() 2553 .objc_retainAutoreleasedReturnValue) { 2554 doRetainAutorelease = false; 2555 2556 // If we emitted an assembly marker for this call (and the 2557 // ARCEntrypoints field should have been set if so), go looking 2558 // for that call. If we can't find it, we can't do this 2559 // optimization. But it should always be the immediately previous 2560 // instruction, unless we needed bitcasts around the call. 2561 if (CGF.CGM.getObjCEntrypoints().retainAutoreleasedReturnValueMarker) { 2562 llvm::Instruction *prev = call->getPrevNode(); 2563 assert(prev); 2564 if (isa<llvm::BitCastInst>(prev)) { 2565 prev = prev->getPrevNode(); 2566 assert(prev); 2567 } 2568 assert(isa<llvm::CallInst>(prev)); 2569 assert(cast<llvm::CallInst>(prev)->getCalledValue() == 2570 CGF.CGM.getObjCEntrypoints().retainAutoreleasedReturnValueMarker); 2571 InstsToKill.push_back(prev); 2572 } 2573 } else { 2574 return nullptr; 2575 } 2576 2577 result = call->getArgOperand(0); 2578 InstsToKill.push_back(call); 2579 2580 // Keep killing bitcasts, for sanity. Note that we no longer care 2581 // about precise ordering as long as there's exactly one use. 2582 while (llvm::BitCastInst *bitcast = dyn_cast<llvm::BitCastInst>(result)) { 2583 if (!bitcast->hasOneUse()) break; 2584 InstsToKill.push_back(bitcast); 2585 result = bitcast->getOperand(0); 2586 } 2587 2588 // Delete all the unnecessary instructions, from latest to earliest. 2589 for (auto *I : InstsToKill) 2590 I->eraseFromParent(); 2591 2592 // Do the fused retain/autorelease if we were asked to. 2593 if (doRetainAutorelease) 2594 result = CGF.EmitARCRetainAutoreleaseReturnValue(result); 2595 2596 // Cast back to the result type. 2597 return CGF.Builder.CreateBitCast(result, resultType); 2598 } 2599 2600 /// If this is a +1 of the value of an immutable 'self', remove it. 2601 static llvm::Value *tryRemoveRetainOfSelf(CodeGenFunction &CGF, 2602 llvm::Value *result) { 2603 // This is only applicable to a method with an immutable 'self'. 2604 const ObjCMethodDecl *method = 2605 dyn_cast_or_null<ObjCMethodDecl>(CGF.CurCodeDecl); 2606 if (!method) return nullptr; 2607 const VarDecl *self = method->getSelfDecl(); 2608 if (!self->getType().isConstQualified()) return nullptr; 2609 2610 // Look for a retain call. 2611 llvm::CallInst *retainCall = 2612 dyn_cast<llvm::CallInst>(result->stripPointerCasts()); 2613 if (!retainCall || 2614 retainCall->getCalledValue() != CGF.CGM.getObjCEntrypoints().objc_retain) 2615 return nullptr; 2616 2617 // Look for an ordinary load of 'self'. 2618 llvm::Value *retainedValue = retainCall->getArgOperand(0); 2619 llvm::LoadInst *load = 2620 dyn_cast<llvm::LoadInst>(retainedValue->stripPointerCasts()); 2621 if (!load || load->isAtomic() || load->isVolatile() || 2622 load->getPointerOperand() != CGF.GetAddrOfLocalVar(self).getPointer()) 2623 return nullptr; 2624 2625 // Okay! Burn it all down. This relies for correctness on the 2626 // assumption that the retain is emitted as part of the return and 2627 // that thereafter everything is used "linearly". 2628 llvm::Type *resultType = result->getType(); 2629 eraseUnusedBitCasts(cast<llvm::Instruction>(result)); 2630 assert(retainCall->use_empty()); 2631 retainCall->eraseFromParent(); 2632 eraseUnusedBitCasts(cast<llvm::Instruction>(retainedValue)); 2633 2634 return CGF.Builder.CreateBitCast(load, resultType); 2635 } 2636 2637 /// Emit an ARC autorelease of the result of a function. 2638 /// 2639 /// \return the value to actually return from the function 2640 static llvm::Value *emitAutoreleaseOfResult(CodeGenFunction &CGF, 2641 llvm::Value *result) { 2642 // If we're returning 'self', kill the initial retain. This is a 2643 // heuristic attempt to "encourage correctness" in the really unfortunate 2644 // case where we have a return of self during a dealloc and we desperately 2645 // need to avoid the possible autorelease. 2646 if (llvm::Value *self = tryRemoveRetainOfSelf(CGF, result)) 2647 return self; 2648 2649 // At -O0, try to emit a fused retain/autorelease. 2650 if (CGF.shouldUseFusedARCCalls()) 2651 if (llvm::Value *fused = tryEmitFusedAutoreleaseOfResult(CGF, result)) 2652 return fused; 2653 2654 return CGF.EmitARCAutoreleaseReturnValue(result); 2655 } 2656 2657 /// Heuristically search for a dominating store to the return-value slot. 2658 static llvm::StoreInst *findDominatingStoreToReturnValue(CodeGenFunction &CGF) { 2659 // Check if a User is a store which pointerOperand is the ReturnValue. 2660 // We are looking for stores to the ReturnValue, not for stores of the 2661 // ReturnValue to some other location. 2662 auto GetStoreIfValid = [&CGF](llvm::User *U) -> llvm::StoreInst * { 2663 auto *SI = dyn_cast<llvm::StoreInst>(U); 2664 if (!SI || SI->getPointerOperand() != CGF.ReturnValue.getPointer()) 2665 return nullptr; 2666 // These aren't actually possible for non-coerced returns, and we 2667 // only care about non-coerced returns on this code path. 2668 assert(!SI->isAtomic() && !SI->isVolatile()); 2669 return SI; 2670 }; 2671 // If there are multiple uses of the return-value slot, just check 2672 // for something immediately preceding the IP. Sometimes this can 2673 // happen with how we generate implicit-returns; it can also happen 2674 // with noreturn cleanups. 2675 if (!CGF.ReturnValue.getPointer()->hasOneUse()) { 2676 llvm::BasicBlock *IP = CGF.Builder.GetInsertBlock(); 2677 if (IP->empty()) return nullptr; 2678 llvm::Instruction *I = &IP->back(); 2679 2680 // Skip lifetime markers 2681 for (llvm::BasicBlock::reverse_iterator II = IP->rbegin(), 2682 IE = IP->rend(); 2683 II != IE; ++II) { 2684 if (llvm::IntrinsicInst *Intrinsic = 2685 dyn_cast<llvm::IntrinsicInst>(&*II)) { 2686 if (Intrinsic->getIntrinsicID() == llvm::Intrinsic::lifetime_end) { 2687 const llvm::Value *CastAddr = Intrinsic->getArgOperand(1); 2688 ++II; 2689 if (II == IE) 2690 break; 2691 if (isa<llvm::BitCastInst>(&*II) && (CastAddr == &*II)) 2692 continue; 2693 } 2694 } 2695 I = &*II; 2696 break; 2697 } 2698 2699 return GetStoreIfValid(I); 2700 } 2701 2702 llvm::StoreInst *store = 2703 GetStoreIfValid(CGF.ReturnValue.getPointer()->user_back()); 2704 if (!store) return nullptr; 2705 2706 // Now do a first-and-dirty dominance check: just walk up the 2707 // single-predecessors chain from the current insertion point. 2708 llvm::BasicBlock *StoreBB = store->getParent(); 2709 llvm::BasicBlock *IP = CGF.Builder.GetInsertBlock(); 2710 while (IP != StoreBB) { 2711 if (!(IP = IP->getSinglePredecessor())) 2712 return nullptr; 2713 } 2714 2715 // Okay, the store's basic block dominates the insertion point; we 2716 // can do our thing. 2717 return store; 2718 } 2719 2720 void CodeGenFunction::EmitFunctionEpilog(const CGFunctionInfo &FI, 2721 bool EmitRetDbgLoc, 2722 SourceLocation EndLoc) { 2723 if (FI.isNoReturn()) { 2724 // Noreturn functions don't return. 2725 EmitUnreachable(EndLoc); 2726 return; 2727 } 2728 2729 if (CurCodeDecl && CurCodeDecl->hasAttr<NakedAttr>()) { 2730 // Naked functions don't have epilogues. 2731 Builder.CreateUnreachable(); 2732 return; 2733 } 2734 2735 // Functions with no result always return void. 2736 if (!ReturnValue.isValid()) { 2737 Builder.CreateRetVoid(); 2738 return; 2739 } 2740 2741 llvm::DebugLoc RetDbgLoc; 2742 llvm::Value *RV = nullptr; 2743 QualType RetTy = FI.getReturnType(); 2744 const ABIArgInfo &RetAI = FI.getReturnInfo(); 2745 2746 switch (RetAI.getKind()) { 2747 case ABIArgInfo::InAlloca: 2748 // Aggregrates get evaluated directly into the destination. Sometimes we 2749 // need to return the sret value in a register, though. 2750 assert(hasAggregateEvaluationKind(RetTy)); 2751 if (RetAI.getInAllocaSRet()) { 2752 llvm::Function::arg_iterator EI = CurFn->arg_end(); 2753 --EI; 2754 llvm::Value *ArgStruct = &*EI; 2755 llvm::Value *SRet = Builder.CreateStructGEP( 2756 nullptr, ArgStruct, RetAI.getInAllocaFieldIndex()); 2757 RV = Builder.CreateAlignedLoad(SRet, getPointerAlign(), "sret"); 2758 } 2759 break; 2760 2761 case ABIArgInfo::Indirect: { 2762 auto AI = CurFn->arg_begin(); 2763 if (RetAI.isSRetAfterThis()) 2764 ++AI; 2765 switch (getEvaluationKind(RetTy)) { 2766 case TEK_Complex: { 2767 ComplexPairTy RT = 2768 EmitLoadOfComplex(MakeAddrLValue(ReturnValue, RetTy), EndLoc); 2769 EmitStoreOfComplex(RT, MakeNaturalAlignAddrLValue(&*AI, RetTy), 2770 /*isInit*/ true); 2771 break; 2772 } 2773 case TEK_Aggregate: 2774 // Do nothing; aggregrates get evaluated directly into the destination. 2775 break; 2776 case TEK_Scalar: 2777 EmitStoreOfScalar(Builder.CreateLoad(ReturnValue), 2778 MakeNaturalAlignAddrLValue(&*AI, RetTy), 2779 /*isInit*/ true); 2780 break; 2781 } 2782 break; 2783 } 2784 2785 case ABIArgInfo::Extend: 2786 case ABIArgInfo::Direct: 2787 if (RetAI.getCoerceToType() == ConvertType(RetTy) && 2788 RetAI.getDirectOffset() == 0) { 2789 // The internal return value temp always will have pointer-to-return-type 2790 // type, just do a load. 2791 2792 // If there is a dominating store to ReturnValue, we can elide 2793 // the load, zap the store, and usually zap the alloca. 2794 if (llvm::StoreInst *SI = 2795 findDominatingStoreToReturnValue(*this)) { 2796 // Reuse the debug location from the store unless there is 2797 // cleanup code to be emitted between the store and return 2798 // instruction. 2799 if (EmitRetDbgLoc && !AutoreleaseResult) 2800 RetDbgLoc = SI->getDebugLoc(); 2801 // Get the stored value and nuke the now-dead store. 2802 RV = SI->getValueOperand(); 2803 SI->eraseFromParent(); 2804 2805 // If that was the only use of the return value, nuke it as well now. 2806 auto returnValueInst = ReturnValue.getPointer(); 2807 if (returnValueInst->use_empty()) { 2808 if (auto alloca = dyn_cast<llvm::AllocaInst>(returnValueInst)) { 2809 alloca->eraseFromParent(); 2810 ReturnValue = Address::invalid(); 2811 } 2812 } 2813 2814 // Otherwise, we have to do a simple load. 2815 } else { 2816 RV = Builder.CreateLoad(ReturnValue); 2817 } 2818 } else { 2819 // If the value is offset in memory, apply the offset now. 2820 Address V = emitAddressAtOffset(*this, ReturnValue, RetAI); 2821 2822 RV = CreateCoercedLoad(V, RetAI.getCoerceToType(), *this); 2823 } 2824 2825 // In ARC, end functions that return a retainable type with a call 2826 // to objc_autoreleaseReturnValue. 2827 if (AutoreleaseResult) { 2828 #ifndef NDEBUG 2829 // Type::isObjCRetainabletype has to be called on a QualType that hasn't 2830 // been stripped of the typedefs, so we cannot use RetTy here. Get the 2831 // original return type of FunctionDecl, CurCodeDecl, and BlockDecl from 2832 // CurCodeDecl or BlockInfo. 2833 QualType RT; 2834 2835 if (auto *FD = dyn_cast<FunctionDecl>(CurCodeDecl)) 2836 RT = FD->getReturnType(); 2837 else if (auto *MD = dyn_cast<ObjCMethodDecl>(CurCodeDecl)) 2838 RT = MD->getReturnType(); 2839 else if (isa<BlockDecl>(CurCodeDecl)) 2840 RT = BlockInfo->BlockExpression->getFunctionType()->getReturnType(); 2841 else 2842 llvm_unreachable("Unexpected function/method type"); 2843 2844 assert(getLangOpts().ObjCAutoRefCount && 2845 !FI.isReturnsRetained() && 2846 RT->isObjCRetainableType()); 2847 #endif 2848 RV = emitAutoreleaseOfResult(*this, RV); 2849 } 2850 2851 break; 2852 2853 case ABIArgInfo::Ignore: 2854 break; 2855 2856 case ABIArgInfo::CoerceAndExpand: { 2857 auto coercionType = RetAI.getCoerceAndExpandType(); 2858 auto layout = CGM.getDataLayout().getStructLayout(coercionType); 2859 2860 // Load all of the coerced elements out into results. 2861 llvm::SmallVector<llvm::Value*, 4> results; 2862 Address addr = Builder.CreateElementBitCast(ReturnValue, coercionType); 2863 for (unsigned i = 0, e = coercionType->getNumElements(); i != e; ++i) { 2864 auto coercedEltType = coercionType->getElementType(i); 2865 if (ABIArgInfo::isPaddingForCoerceAndExpand(coercedEltType)) 2866 continue; 2867 2868 auto eltAddr = Builder.CreateStructGEP(addr, i, layout); 2869 auto elt = Builder.CreateLoad(eltAddr); 2870 results.push_back(elt); 2871 } 2872 2873 // If we have one result, it's the single direct result type. 2874 if (results.size() == 1) { 2875 RV = results[0]; 2876 2877 // Otherwise, we need to make a first-class aggregate. 2878 } else { 2879 // Construct a return type that lacks padding elements. 2880 llvm::Type *returnType = RetAI.getUnpaddedCoerceAndExpandType(); 2881 2882 RV = llvm::UndefValue::get(returnType); 2883 for (unsigned i = 0, e = results.size(); i != e; ++i) { 2884 RV = Builder.CreateInsertValue(RV, results[i], i); 2885 } 2886 } 2887 break; 2888 } 2889 2890 case ABIArgInfo::Expand: 2891 llvm_unreachable("Invalid ABI kind for return argument"); 2892 } 2893 2894 llvm::Instruction *Ret; 2895 if (RV) { 2896 EmitReturnValueCheck(RV); 2897 Ret = Builder.CreateRet(RV); 2898 } else { 2899 Ret = Builder.CreateRetVoid(); 2900 } 2901 2902 if (RetDbgLoc) 2903 Ret->setDebugLoc(std::move(RetDbgLoc)); 2904 } 2905 2906 void CodeGenFunction::EmitReturnValueCheck(llvm::Value *RV) { 2907 // A current decl may not be available when emitting vtable thunks. 2908 if (!CurCodeDecl) 2909 return; 2910 2911 ReturnsNonNullAttr *RetNNAttr = nullptr; 2912 if (SanOpts.has(SanitizerKind::ReturnsNonnullAttribute)) 2913 RetNNAttr = CurCodeDecl->getAttr<ReturnsNonNullAttr>(); 2914 2915 if (!RetNNAttr && !requiresReturnValueNullabilityCheck()) 2916 return; 2917 2918 // Prefer the returns_nonnull attribute if it's present. 2919 SourceLocation AttrLoc; 2920 SanitizerMask CheckKind; 2921 SanitizerHandler Handler; 2922 if (RetNNAttr) { 2923 assert(!requiresReturnValueNullabilityCheck() && 2924 "Cannot check nullability and the nonnull attribute"); 2925 AttrLoc = RetNNAttr->getLocation(); 2926 CheckKind = SanitizerKind::ReturnsNonnullAttribute; 2927 Handler = SanitizerHandler::NonnullReturn; 2928 } else { 2929 if (auto *DD = dyn_cast<DeclaratorDecl>(CurCodeDecl)) 2930 if (auto *TSI = DD->getTypeSourceInfo()) 2931 if (auto FTL = TSI->getTypeLoc().castAs<FunctionTypeLoc>()) 2932 AttrLoc = FTL.getReturnLoc().findNullabilityLoc(); 2933 CheckKind = SanitizerKind::NullabilityReturn; 2934 Handler = SanitizerHandler::NullabilityReturn; 2935 } 2936 2937 SanitizerScope SanScope(this); 2938 2939 // Make sure the "return" source location is valid. If we're checking a 2940 // nullability annotation, make sure the preconditions for the check are met. 2941 llvm::BasicBlock *Check = createBasicBlock("nullcheck"); 2942 llvm::BasicBlock *NoCheck = createBasicBlock("no.nullcheck"); 2943 llvm::Value *SLocPtr = Builder.CreateLoad(ReturnLocation, "return.sloc.load"); 2944 llvm::Value *CanNullCheck = Builder.CreateIsNotNull(SLocPtr); 2945 if (requiresReturnValueNullabilityCheck()) 2946 CanNullCheck = 2947 Builder.CreateAnd(CanNullCheck, RetValNullabilityPrecondition); 2948 Builder.CreateCondBr(CanNullCheck, Check, NoCheck); 2949 EmitBlock(Check); 2950 2951 // Now do the null check. 2952 llvm::Value *Cond = Builder.CreateIsNotNull(RV); 2953 llvm::Constant *StaticData[] = {EmitCheckSourceLocation(AttrLoc)}; 2954 llvm::Value *DynamicData[] = {SLocPtr}; 2955 EmitCheck(std::make_pair(Cond, CheckKind), Handler, StaticData, DynamicData); 2956 2957 EmitBlock(NoCheck); 2958 2959 #ifndef NDEBUG 2960 // The return location should not be used after the check has been emitted. 2961 ReturnLocation = Address::invalid(); 2962 #endif 2963 } 2964 2965 static bool isInAllocaArgument(CGCXXABI &ABI, QualType type) { 2966 const CXXRecordDecl *RD = type->getAsCXXRecordDecl(); 2967 return RD && ABI.getRecordArgABI(RD) == CGCXXABI::RAA_DirectInMemory; 2968 } 2969 2970 static AggValueSlot createPlaceholderSlot(CodeGenFunction &CGF, 2971 QualType Ty) { 2972 // FIXME: Generate IR in one pass, rather than going back and fixing up these 2973 // placeholders. 2974 llvm::Type *IRTy = CGF.ConvertTypeForMem(Ty); 2975 llvm::Type *IRPtrTy = IRTy->getPointerTo(); 2976 llvm::Value *Placeholder = llvm::UndefValue::get(IRPtrTy->getPointerTo()); 2977 2978 // FIXME: When we generate this IR in one pass, we shouldn't need 2979 // this win32-specific alignment hack. 2980 CharUnits Align = CharUnits::fromQuantity(4); 2981 Placeholder = CGF.Builder.CreateAlignedLoad(IRPtrTy, Placeholder, Align); 2982 2983 return AggValueSlot::forAddr(Address(Placeholder, Align), 2984 Ty.getQualifiers(), 2985 AggValueSlot::IsNotDestructed, 2986 AggValueSlot::DoesNotNeedGCBarriers, 2987 AggValueSlot::IsNotAliased); 2988 } 2989 2990 void CodeGenFunction::EmitDelegateCallArg(CallArgList &args, 2991 const VarDecl *param, 2992 SourceLocation loc) { 2993 // StartFunction converted the ABI-lowered parameter(s) into a 2994 // local alloca. We need to turn that into an r-value suitable 2995 // for EmitCall. 2996 Address local = GetAddrOfLocalVar(param); 2997 2998 QualType type = param->getType(); 2999 3000 assert(!isInAllocaArgument(CGM.getCXXABI(), type) && 3001 "cannot emit delegate call arguments for inalloca arguments!"); 3002 3003 // GetAddrOfLocalVar returns a pointer-to-pointer for references, 3004 // but the argument needs to be the original pointer. 3005 if (type->isReferenceType()) { 3006 args.add(RValue::get(Builder.CreateLoad(local)), type); 3007 3008 // In ARC, move out of consumed arguments so that the release cleanup 3009 // entered by StartFunction doesn't cause an over-release. This isn't 3010 // optimal -O0 code generation, but it should get cleaned up when 3011 // optimization is enabled. This also assumes that delegate calls are 3012 // performed exactly once for a set of arguments, but that should be safe. 3013 } else if (getLangOpts().ObjCAutoRefCount && 3014 param->hasAttr<NSConsumedAttr>() && 3015 type->isObjCRetainableType()) { 3016 llvm::Value *ptr = Builder.CreateLoad(local); 3017 auto null = 3018 llvm::ConstantPointerNull::get(cast<llvm::PointerType>(ptr->getType())); 3019 Builder.CreateStore(null, local); 3020 args.add(RValue::get(ptr), type); 3021 3022 // For the most part, we just need to load the alloca, except that 3023 // aggregate r-values are actually pointers to temporaries. 3024 } else { 3025 args.add(convertTempToRValue(local, type, loc), type); 3026 } 3027 } 3028 3029 static bool isProvablyNull(llvm::Value *addr) { 3030 return isa<llvm::ConstantPointerNull>(addr); 3031 } 3032 3033 /// Emit the actual writing-back of a writeback. 3034 static void emitWriteback(CodeGenFunction &CGF, 3035 const CallArgList::Writeback &writeback) { 3036 const LValue &srcLV = writeback.Source; 3037 Address srcAddr = srcLV.getAddress(); 3038 assert(!isProvablyNull(srcAddr.getPointer()) && 3039 "shouldn't have writeback for provably null argument"); 3040 3041 llvm::BasicBlock *contBB = nullptr; 3042 3043 // If the argument wasn't provably non-null, we need to null check 3044 // before doing the store. 3045 bool provablyNonNull = llvm::isKnownNonZero(srcAddr.getPointer(), 3046 CGF.CGM.getDataLayout()); 3047 if (!provablyNonNull) { 3048 llvm::BasicBlock *writebackBB = CGF.createBasicBlock("icr.writeback"); 3049 contBB = CGF.createBasicBlock("icr.done"); 3050 3051 llvm::Value *isNull = 3052 CGF.Builder.CreateIsNull(srcAddr.getPointer(), "icr.isnull"); 3053 CGF.Builder.CreateCondBr(isNull, contBB, writebackBB); 3054 CGF.EmitBlock(writebackBB); 3055 } 3056 3057 // Load the value to writeback. 3058 llvm::Value *value = CGF.Builder.CreateLoad(writeback.Temporary); 3059 3060 // Cast it back, in case we're writing an id to a Foo* or something. 3061 value = CGF.Builder.CreateBitCast(value, srcAddr.getElementType(), 3062 "icr.writeback-cast"); 3063 3064 // Perform the writeback. 3065 3066 // If we have a "to use" value, it's something we need to emit a use 3067 // of. This has to be carefully threaded in: if it's done after the 3068 // release it's potentially undefined behavior (and the optimizer 3069 // will ignore it), and if it happens before the retain then the 3070 // optimizer could move the release there. 3071 if (writeback.ToUse) { 3072 assert(srcLV.getObjCLifetime() == Qualifiers::OCL_Strong); 3073 3074 // Retain the new value. No need to block-copy here: the block's 3075 // being passed up the stack. 3076 value = CGF.EmitARCRetainNonBlock(value); 3077 3078 // Emit the intrinsic use here. 3079 CGF.EmitARCIntrinsicUse(writeback.ToUse); 3080 3081 // Load the old value (primitively). 3082 llvm::Value *oldValue = CGF.EmitLoadOfScalar(srcLV, SourceLocation()); 3083 3084 // Put the new value in place (primitively). 3085 CGF.EmitStoreOfScalar(value, srcLV, /*init*/ false); 3086 3087 // Release the old value. 3088 CGF.EmitARCRelease(oldValue, srcLV.isARCPreciseLifetime()); 3089 3090 // Otherwise, we can just do a normal lvalue store. 3091 } else { 3092 CGF.EmitStoreThroughLValue(RValue::get(value), srcLV); 3093 } 3094 3095 // Jump to the continuation block. 3096 if (!provablyNonNull) 3097 CGF.EmitBlock(contBB); 3098 } 3099 3100 static void emitWritebacks(CodeGenFunction &CGF, 3101 const CallArgList &args) { 3102 for (const auto &I : args.writebacks()) 3103 emitWriteback(CGF, I); 3104 } 3105 3106 static void deactivateArgCleanupsBeforeCall(CodeGenFunction &CGF, 3107 const CallArgList &CallArgs) { 3108 ArrayRef<CallArgList::CallArgCleanup> Cleanups = 3109 CallArgs.getCleanupsToDeactivate(); 3110 // Iterate in reverse to increase the likelihood of popping the cleanup. 3111 for (const auto &I : llvm::reverse(Cleanups)) { 3112 CGF.DeactivateCleanupBlock(I.Cleanup, I.IsActiveIP); 3113 I.IsActiveIP->eraseFromParent(); 3114 } 3115 } 3116 3117 static const Expr *maybeGetUnaryAddrOfOperand(const Expr *E) { 3118 if (const UnaryOperator *uop = dyn_cast<UnaryOperator>(E->IgnoreParens())) 3119 if (uop->getOpcode() == UO_AddrOf) 3120 return uop->getSubExpr(); 3121 return nullptr; 3122 } 3123 3124 /// Emit an argument that's being passed call-by-writeback. That is, 3125 /// we are passing the address of an __autoreleased temporary; it 3126 /// might be copy-initialized with the current value of the given 3127 /// address, but it will definitely be copied out of after the call. 3128 static void emitWritebackArg(CodeGenFunction &CGF, CallArgList &args, 3129 const ObjCIndirectCopyRestoreExpr *CRE) { 3130 LValue srcLV; 3131 3132 // Make an optimistic effort to emit the address as an l-value. 3133 // This can fail if the argument expression is more complicated. 3134 if (const Expr *lvExpr = maybeGetUnaryAddrOfOperand(CRE->getSubExpr())) { 3135 srcLV = CGF.EmitLValue(lvExpr); 3136 3137 // Otherwise, just emit it as a scalar. 3138 } else { 3139 Address srcAddr = CGF.EmitPointerWithAlignment(CRE->getSubExpr()); 3140 3141 QualType srcAddrType = 3142 CRE->getSubExpr()->getType()->castAs<PointerType>()->getPointeeType(); 3143 srcLV = CGF.MakeAddrLValue(srcAddr, srcAddrType); 3144 } 3145 Address srcAddr = srcLV.getAddress(); 3146 3147 // The dest and src types don't necessarily match in LLVM terms 3148 // because of the crazy ObjC compatibility rules. 3149 3150 llvm::PointerType *destType = 3151 cast<llvm::PointerType>(CGF.ConvertType(CRE->getType())); 3152 3153 // If the address is a constant null, just pass the appropriate null. 3154 if (isProvablyNull(srcAddr.getPointer())) { 3155 args.add(RValue::get(llvm::ConstantPointerNull::get(destType)), 3156 CRE->getType()); 3157 return; 3158 } 3159 3160 // Create the temporary. 3161 Address temp = CGF.CreateTempAlloca(destType->getElementType(), 3162 CGF.getPointerAlign(), 3163 "icr.temp"); 3164 // Loading an l-value can introduce a cleanup if the l-value is __weak, 3165 // and that cleanup will be conditional if we can't prove that the l-value 3166 // isn't null, so we need to register a dominating point so that the cleanups 3167 // system will make valid IR. 3168 CodeGenFunction::ConditionalEvaluation condEval(CGF); 3169 3170 // Zero-initialize it if we're not doing a copy-initialization. 3171 bool shouldCopy = CRE->shouldCopy(); 3172 if (!shouldCopy) { 3173 llvm::Value *null = 3174 llvm::ConstantPointerNull::get( 3175 cast<llvm::PointerType>(destType->getElementType())); 3176 CGF.Builder.CreateStore(null, temp); 3177 } 3178 3179 llvm::BasicBlock *contBB = nullptr; 3180 llvm::BasicBlock *originBB = nullptr; 3181 3182 // If the address is *not* known to be non-null, we need to switch. 3183 llvm::Value *finalArgument; 3184 3185 bool provablyNonNull = llvm::isKnownNonZero(srcAddr.getPointer(), 3186 CGF.CGM.getDataLayout()); 3187 if (provablyNonNull) { 3188 finalArgument = temp.getPointer(); 3189 } else { 3190 llvm::Value *isNull = 3191 CGF.Builder.CreateIsNull(srcAddr.getPointer(), "icr.isnull"); 3192 3193 finalArgument = CGF.Builder.CreateSelect(isNull, 3194 llvm::ConstantPointerNull::get(destType), 3195 temp.getPointer(), "icr.argument"); 3196 3197 // If we need to copy, then the load has to be conditional, which 3198 // means we need control flow. 3199 if (shouldCopy) { 3200 originBB = CGF.Builder.GetInsertBlock(); 3201 contBB = CGF.createBasicBlock("icr.cont"); 3202 llvm::BasicBlock *copyBB = CGF.createBasicBlock("icr.copy"); 3203 CGF.Builder.CreateCondBr(isNull, contBB, copyBB); 3204 CGF.EmitBlock(copyBB); 3205 condEval.begin(CGF); 3206 } 3207 } 3208 3209 llvm::Value *valueToUse = nullptr; 3210 3211 // Perform a copy if necessary. 3212 if (shouldCopy) { 3213 RValue srcRV = CGF.EmitLoadOfLValue(srcLV, SourceLocation()); 3214 assert(srcRV.isScalar()); 3215 3216 llvm::Value *src = srcRV.getScalarVal(); 3217 src = CGF.Builder.CreateBitCast(src, destType->getElementType(), 3218 "icr.cast"); 3219 3220 // Use an ordinary store, not a store-to-lvalue. 3221 CGF.Builder.CreateStore(src, temp); 3222 3223 // If optimization is enabled, and the value was held in a 3224 // __strong variable, we need to tell the optimizer that this 3225 // value has to stay alive until we're doing the store back. 3226 // This is because the temporary is effectively unretained, 3227 // and so otherwise we can violate the high-level semantics. 3228 if (CGF.CGM.getCodeGenOpts().OptimizationLevel != 0 && 3229 srcLV.getObjCLifetime() == Qualifiers::OCL_Strong) { 3230 valueToUse = src; 3231 } 3232 } 3233 3234 // Finish the control flow if we needed it. 3235 if (shouldCopy && !provablyNonNull) { 3236 llvm::BasicBlock *copyBB = CGF.Builder.GetInsertBlock(); 3237 CGF.EmitBlock(contBB); 3238 3239 // Make a phi for the value to intrinsically use. 3240 if (valueToUse) { 3241 llvm::PHINode *phiToUse = CGF.Builder.CreatePHI(valueToUse->getType(), 2, 3242 "icr.to-use"); 3243 phiToUse->addIncoming(valueToUse, copyBB); 3244 phiToUse->addIncoming(llvm::UndefValue::get(valueToUse->getType()), 3245 originBB); 3246 valueToUse = phiToUse; 3247 } 3248 3249 condEval.end(CGF); 3250 } 3251 3252 args.addWriteback(srcLV, temp, valueToUse); 3253 args.add(RValue::get(finalArgument), CRE->getType()); 3254 } 3255 3256 void CallArgList::allocateArgumentMemory(CodeGenFunction &CGF) { 3257 assert(!StackBase); 3258 3259 // Save the stack. 3260 llvm::Function *F = CGF.CGM.getIntrinsic(llvm::Intrinsic::stacksave); 3261 StackBase = CGF.Builder.CreateCall(F, {}, "inalloca.save"); 3262 } 3263 3264 void CallArgList::freeArgumentMemory(CodeGenFunction &CGF) const { 3265 if (StackBase) { 3266 // Restore the stack after the call. 3267 llvm::Value *F = CGF.CGM.getIntrinsic(llvm::Intrinsic::stackrestore); 3268 CGF.Builder.CreateCall(F, StackBase); 3269 } 3270 } 3271 3272 void CodeGenFunction::EmitNonNullArgCheck(RValue RV, QualType ArgType, 3273 SourceLocation ArgLoc, 3274 AbstractCallee AC, 3275 unsigned ParmNum) { 3276 if (!AC.getDecl() || !(SanOpts.has(SanitizerKind::NonnullAttribute) || 3277 SanOpts.has(SanitizerKind::NullabilityArg))) 3278 return; 3279 3280 // The param decl may be missing in a variadic function. 3281 auto PVD = ParmNum < AC.getNumParams() ? AC.getParamDecl(ParmNum) : nullptr; 3282 unsigned ArgNo = PVD ? PVD->getFunctionScopeIndex() : ParmNum; 3283 3284 // Prefer the nonnull attribute if it's present. 3285 const NonNullAttr *NNAttr = nullptr; 3286 if (SanOpts.has(SanitizerKind::NonnullAttribute)) 3287 NNAttr = getNonNullAttr(AC.getDecl(), PVD, ArgType, ArgNo); 3288 3289 bool CanCheckNullability = false; 3290 if (SanOpts.has(SanitizerKind::NullabilityArg) && !NNAttr && PVD) { 3291 auto Nullability = PVD->getType()->getNullability(getContext()); 3292 CanCheckNullability = Nullability && 3293 *Nullability == NullabilityKind::NonNull && 3294 PVD->getTypeSourceInfo(); 3295 } 3296 3297 if (!NNAttr && !CanCheckNullability) 3298 return; 3299 3300 SourceLocation AttrLoc; 3301 SanitizerMask CheckKind; 3302 SanitizerHandler Handler; 3303 if (NNAttr) { 3304 AttrLoc = NNAttr->getLocation(); 3305 CheckKind = SanitizerKind::NonnullAttribute; 3306 Handler = SanitizerHandler::NonnullArg; 3307 } else { 3308 AttrLoc = PVD->getTypeSourceInfo()->getTypeLoc().findNullabilityLoc(); 3309 CheckKind = SanitizerKind::NullabilityArg; 3310 Handler = SanitizerHandler::NullabilityArg; 3311 } 3312 3313 SanitizerScope SanScope(this); 3314 assert(RV.isScalar()); 3315 llvm::Value *V = RV.getScalarVal(); 3316 llvm::Value *Cond = 3317 Builder.CreateICmpNE(V, llvm::Constant::getNullValue(V->getType())); 3318 llvm::Constant *StaticData[] = { 3319 EmitCheckSourceLocation(ArgLoc), EmitCheckSourceLocation(AttrLoc), 3320 llvm::ConstantInt::get(Int32Ty, ArgNo + 1), 3321 }; 3322 EmitCheck(std::make_pair(Cond, CheckKind), Handler, StaticData, None); 3323 } 3324 3325 void CodeGenFunction::EmitCallArgs( 3326 CallArgList &Args, ArrayRef<QualType> ArgTypes, 3327 llvm::iterator_range<CallExpr::const_arg_iterator> ArgRange, 3328 AbstractCallee AC, unsigned ParamsToSkip, EvaluationOrder Order) { 3329 assert((int)ArgTypes.size() == (ArgRange.end() - ArgRange.begin())); 3330 3331 // We *have* to evaluate arguments from right to left in the MS C++ ABI, 3332 // because arguments are destroyed left to right in the callee. As a special 3333 // case, there are certain language constructs that require left-to-right 3334 // evaluation, and in those cases we consider the evaluation order requirement 3335 // to trump the "destruction order is reverse construction order" guarantee. 3336 bool LeftToRight = 3337 CGM.getTarget().getCXXABI().areArgsDestroyedLeftToRightInCallee() 3338 ? Order == EvaluationOrder::ForceLeftToRight 3339 : Order != EvaluationOrder::ForceRightToLeft; 3340 3341 auto MaybeEmitImplicitObjectSize = [&](unsigned I, const Expr *Arg, 3342 RValue EmittedArg) { 3343 if (!AC.hasFunctionDecl() || I >= AC.getNumParams()) 3344 return; 3345 auto *PS = AC.getParamDecl(I)->getAttr<PassObjectSizeAttr>(); 3346 if (PS == nullptr) 3347 return; 3348 3349 const auto &Context = getContext(); 3350 auto SizeTy = Context.getSizeType(); 3351 auto T = Builder.getIntNTy(Context.getTypeSize(SizeTy)); 3352 assert(EmittedArg.getScalarVal() && "We emitted nothing for the arg?"); 3353 llvm::Value *V = evaluateOrEmitBuiltinObjectSize(Arg, PS->getType(), T, 3354 EmittedArg.getScalarVal()); 3355 Args.add(RValue::get(V), SizeTy); 3356 // If we're emitting args in reverse, be sure to do so with 3357 // pass_object_size, as well. 3358 if (!LeftToRight) 3359 std::swap(Args.back(), *(&Args.back() - 1)); 3360 }; 3361 3362 // Insert a stack save if we're going to need any inalloca args. 3363 bool HasInAllocaArgs = false; 3364 if (CGM.getTarget().getCXXABI().isMicrosoft()) { 3365 for (ArrayRef<QualType>::iterator I = ArgTypes.begin(), E = ArgTypes.end(); 3366 I != E && !HasInAllocaArgs; ++I) 3367 HasInAllocaArgs = isInAllocaArgument(CGM.getCXXABI(), *I); 3368 if (HasInAllocaArgs) { 3369 assert(getTarget().getTriple().getArch() == llvm::Triple::x86); 3370 Args.allocateArgumentMemory(*this); 3371 } 3372 } 3373 3374 // Evaluate each argument in the appropriate order. 3375 size_t CallArgsStart = Args.size(); 3376 for (unsigned I = 0, E = ArgTypes.size(); I != E; ++I) { 3377 unsigned Idx = LeftToRight ? I : E - I - 1; 3378 CallExpr::const_arg_iterator Arg = ArgRange.begin() + Idx; 3379 unsigned InitialArgSize = Args.size(); 3380 // If *Arg is an ObjCIndirectCopyRestoreExpr, check that either the types of 3381 // the argument and parameter match or the objc method is parameterized. 3382 assert((!isa<ObjCIndirectCopyRestoreExpr>(*Arg) || 3383 getContext().hasSameUnqualifiedType((*Arg)->getType(), 3384 ArgTypes[Idx]) || 3385 (isa<ObjCMethodDecl>(AC.getDecl()) && 3386 isObjCMethodWithTypeParams(cast<ObjCMethodDecl>(AC.getDecl())))) && 3387 "Argument and parameter types don't match"); 3388 EmitCallArg(Args, *Arg, ArgTypes[Idx]); 3389 // In particular, we depend on it being the last arg in Args, and the 3390 // objectsize bits depend on there only being one arg if !LeftToRight. 3391 assert(InitialArgSize + 1 == Args.size() && 3392 "The code below depends on only adding one arg per EmitCallArg"); 3393 (void)InitialArgSize; 3394 RValue RVArg = Args.back().RV; 3395 EmitNonNullArgCheck(RVArg, ArgTypes[Idx], (*Arg)->getExprLoc(), AC, 3396 ParamsToSkip + Idx); 3397 // @llvm.objectsize should never have side-effects and shouldn't need 3398 // destruction/cleanups, so we can safely "emit" it after its arg, 3399 // regardless of right-to-leftness 3400 MaybeEmitImplicitObjectSize(Idx, *Arg, RVArg); 3401 } 3402 3403 if (!LeftToRight) { 3404 // Un-reverse the arguments we just evaluated so they match up with the LLVM 3405 // IR function. 3406 std::reverse(Args.begin() + CallArgsStart, Args.end()); 3407 } 3408 } 3409 3410 namespace { 3411 3412 struct DestroyUnpassedArg final : EHScopeStack::Cleanup { 3413 DestroyUnpassedArg(Address Addr, QualType Ty) 3414 : Addr(Addr), Ty(Ty) {} 3415 3416 Address Addr; 3417 QualType Ty; 3418 3419 void Emit(CodeGenFunction &CGF, Flags flags) override { 3420 const CXXDestructorDecl *Dtor = Ty->getAsCXXRecordDecl()->getDestructor(); 3421 assert(!Dtor->isTrivial()); 3422 CGF.EmitCXXDestructorCall(Dtor, Dtor_Complete, /*for vbase*/ false, 3423 /*Delegating=*/false, Addr); 3424 } 3425 }; 3426 3427 struct DisableDebugLocationUpdates { 3428 CodeGenFunction &CGF; 3429 bool disabledDebugInfo; 3430 DisableDebugLocationUpdates(CodeGenFunction &CGF, const Expr *E) : CGF(CGF) { 3431 if ((disabledDebugInfo = isa<CXXDefaultArgExpr>(E) && CGF.getDebugInfo())) 3432 CGF.disableDebugInfo(); 3433 } 3434 ~DisableDebugLocationUpdates() { 3435 if (disabledDebugInfo) 3436 CGF.enableDebugInfo(); 3437 } 3438 }; 3439 3440 } // end anonymous namespace 3441 3442 void CodeGenFunction::EmitCallArg(CallArgList &args, const Expr *E, 3443 QualType type) { 3444 DisableDebugLocationUpdates Dis(*this, E); 3445 if (const ObjCIndirectCopyRestoreExpr *CRE 3446 = dyn_cast<ObjCIndirectCopyRestoreExpr>(E)) { 3447 assert(getLangOpts().ObjCAutoRefCount); 3448 return emitWritebackArg(*this, args, CRE); 3449 } 3450 3451 assert(type->isReferenceType() == E->isGLValue() && 3452 "reference binding to unmaterialized r-value!"); 3453 3454 if (E->isGLValue()) { 3455 assert(E->getObjectKind() == OK_Ordinary); 3456 return args.add(EmitReferenceBindingToExpr(E), type); 3457 } 3458 3459 bool HasAggregateEvalKind = hasAggregateEvaluationKind(type); 3460 3461 // In the Microsoft C++ ABI, aggregate arguments are destructed by the callee. 3462 // However, we still have to push an EH-only cleanup in case we unwind before 3463 // we make it to the call. 3464 if (HasAggregateEvalKind && getContext().isParamDestroyedInCallee(type)) { 3465 // If we're using inalloca, use the argument memory. Otherwise, use a 3466 // temporary. 3467 AggValueSlot Slot; 3468 if (args.isUsingInAlloca()) 3469 Slot = createPlaceholderSlot(*this, type); 3470 else 3471 Slot = CreateAggTemp(type, "agg.tmp"); 3472 3473 const CXXRecordDecl *RD = type->getAsCXXRecordDecl(); 3474 bool DestroyedInCallee = 3475 RD && RD->hasNonTrivialDestructor() && 3476 (CGM.getCXXABI().getRecordArgABI(RD) != CGCXXABI::RAA_Default || 3477 RD->hasTrivialABIOverride()); 3478 if (DestroyedInCallee) 3479 Slot.setExternallyDestructed(); 3480 3481 EmitAggExpr(E, Slot); 3482 RValue RV = Slot.asRValue(); 3483 args.add(RV, type); 3484 3485 if (DestroyedInCallee) { 3486 // Create a no-op GEP between the placeholder and the cleanup so we can 3487 // RAUW it successfully. It also serves as a marker of the first 3488 // instruction where the cleanup is active. 3489 pushFullExprCleanup<DestroyUnpassedArg>(EHCleanup, Slot.getAddress(), 3490 type); 3491 // This unreachable is a temporary marker which will be removed later. 3492 llvm::Instruction *IsActive = Builder.CreateUnreachable(); 3493 args.addArgCleanupDeactivation(EHStack.getInnermostEHScope(), IsActive); 3494 } 3495 return; 3496 } 3497 3498 if (HasAggregateEvalKind && isa<ImplicitCastExpr>(E) && 3499 cast<CastExpr>(E)->getCastKind() == CK_LValueToRValue) { 3500 LValue L = EmitLValue(cast<CastExpr>(E)->getSubExpr()); 3501 assert(L.isSimple()); 3502 if (L.getAlignment() >= getContext().getTypeAlignInChars(type)) { 3503 args.add(L.asAggregateRValue(), type, /*NeedsCopy*/true); 3504 } else { 3505 // We can't represent a misaligned lvalue in the CallArgList, so copy 3506 // to an aligned temporary now. 3507 LValue Dest = MakeAddrLValue(CreateMemTemp(type), type); 3508 EmitAggregateCopy(Dest, L, type, L.isVolatile()); 3509 args.add(RValue::getAggregate(Dest.getAddress()), type); 3510 } 3511 return; 3512 } 3513 3514 args.add(EmitAnyExprToTemp(E), type); 3515 } 3516 3517 QualType CodeGenFunction::getVarArgType(const Expr *Arg) { 3518 // System headers on Windows define NULL to 0 instead of 0LL on Win64. MSVC 3519 // implicitly widens null pointer constants that are arguments to varargs 3520 // functions to pointer-sized ints. 3521 if (!getTarget().getTriple().isOSWindows()) 3522 return Arg->getType(); 3523 3524 if (Arg->getType()->isIntegerType() && 3525 getContext().getTypeSize(Arg->getType()) < 3526 getContext().getTargetInfo().getPointerWidth(0) && 3527 Arg->isNullPointerConstant(getContext(), 3528 Expr::NPC_ValueDependentIsNotNull)) { 3529 return getContext().getIntPtrType(); 3530 } 3531 3532 return Arg->getType(); 3533 } 3534 3535 // In ObjC ARC mode with no ObjC ARC exception safety, tell the ARC 3536 // optimizer it can aggressively ignore unwind edges. 3537 void 3538 CodeGenFunction::AddObjCARCExceptionMetadata(llvm::Instruction *Inst) { 3539 if (CGM.getCodeGenOpts().OptimizationLevel != 0 && 3540 !CGM.getCodeGenOpts().ObjCAutoRefCountExceptions) 3541 Inst->setMetadata("clang.arc.no_objc_arc_exceptions", 3542 CGM.getNoObjCARCExceptionsMetadata()); 3543 } 3544 3545 /// Emits a call to the given no-arguments nounwind runtime function. 3546 llvm::CallInst * 3547 CodeGenFunction::EmitNounwindRuntimeCall(llvm::Value *callee, 3548 const llvm::Twine &name) { 3549 return EmitNounwindRuntimeCall(callee, None, name); 3550 } 3551 3552 /// Emits a call to the given nounwind runtime function. 3553 llvm::CallInst * 3554 CodeGenFunction::EmitNounwindRuntimeCall(llvm::Value *callee, 3555 ArrayRef<llvm::Value*> args, 3556 const llvm::Twine &name) { 3557 llvm::CallInst *call = EmitRuntimeCall(callee, args, name); 3558 call->setDoesNotThrow(); 3559 return call; 3560 } 3561 3562 /// Emits a simple call (never an invoke) to the given no-arguments 3563 /// runtime function. 3564 llvm::CallInst * 3565 CodeGenFunction::EmitRuntimeCall(llvm::Value *callee, 3566 const llvm::Twine &name) { 3567 return EmitRuntimeCall(callee, None, name); 3568 } 3569 3570 // Calls which may throw must have operand bundles indicating which funclet 3571 // they are nested within. 3572 SmallVector<llvm::OperandBundleDef, 1> 3573 CodeGenFunction::getBundlesForFunclet(llvm::Value *Callee) { 3574 SmallVector<llvm::OperandBundleDef, 1> BundleList; 3575 // There is no need for a funclet operand bundle if we aren't inside a 3576 // funclet. 3577 if (!CurrentFuncletPad) 3578 return BundleList; 3579 3580 // Skip intrinsics which cannot throw. 3581 auto *CalleeFn = dyn_cast<llvm::Function>(Callee->stripPointerCasts()); 3582 if (CalleeFn && CalleeFn->isIntrinsic() && CalleeFn->doesNotThrow()) 3583 return BundleList; 3584 3585 BundleList.emplace_back("funclet", CurrentFuncletPad); 3586 return BundleList; 3587 } 3588 3589 /// Emits a simple call (never an invoke) to the given runtime function. 3590 llvm::CallInst * 3591 CodeGenFunction::EmitRuntimeCall(llvm::Value *callee, 3592 ArrayRef<llvm::Value*> args, 3593 const llvm::Twine &name) { 3594 llvm::CallInst *call = 3595 Builder.CreateCall(callee, args, getBundlesForFunclet(callee), name); 3596 call->setCallingConv(getRuntimeCC()); 3597 return call; 3598 } 3599 3600 /// Emits a call or invoke to the given noreturn runtime function. 3601 void CodeGenFunction::EmitNoreturnRuntimeCallOrInvoke(llvm::Value *callee, 3602 ArrayRef<llvm::Value*> args) { 3603 SmallVector<llvm::OperandBundleDef, 1> BundleList = 3604 getBundlesForFunclet(callee); 3605 3606 if (getInvokeDest()) { 3607 llvm::InvokeInst *invoke = 3608 Builder.CreateInvoke(callee, 3609 getUnreachableBlock(), 3610 getInvokeDest(), 3611 args, 3612 BundleList); 3613 invoke->setDoesNotReturn(); 3614 invoke->setCallingConv(getRuntimeCC()); 3615 } else { 3616 llvm::CallInst *call = Builder.CreateCall(callee, args, BundleList); 3617 call->setDoesNotReturn(); 3618 call->setCallingConv(getRuntimeCC()); 3619 Builder.CreateUnreachable(); 3620 } 3621 } 3622 3623 /// Emits a call or invoke instruction to the given nullary runtime function. 3624 llvm::CallSite 3625 CodeGenFunction::EmitRuntimeCallOrInvoke(llvm::Value *callee, 3626 const Twine &name) { 3627 return EmitRuntimeCallOrInvoke(callee, None, name); 3628 } 3629 3630 /// Emits a call or invoke instruction to the given runtime function. 3631 llvm::CallSite 3632 CodeGenFunction::EmitRuntimeCallOrInvoke(llvm::Value *callee, 3633 ArrayRef<llvm::Value*> args, 3634 const Twine &name) { 3635 llvm::CallSite callSite = EmitCallOrInvoke(callee, args, name); 3636 callSite.setCallingConv(getRuntimeCC()); 3637 return callSite; 3638 } 3639 3640 /// Emits a call or invoke instruction to the given function, depending 3641 /// on the current state of the EH stack. 3642 llvm::CallSite 3643 CodeGenFunction::EmitCallOrInvoke(llvm::Value *Callee, 3644 ArrayRef<llvm::Value *> Args, 3645 const Twine &Name) { 3646 llvm::BasicBlock *InvokeDest = getInvokeDest(); 3647 SmallVector<llvm::OperandBundleDef, 1> BundleList = 3648 getBundlesForFunclet(Callee); 3649 3650 llvm::Instruction *Inst; 3651 if (!InvokeDest) 3652 Inst = Builder.CreateCall(Callee, Args, BundleList, Name); 3653 else { 3654 llvm::BasicBlock *ContBB = createBasicBlock("invoke.cont"); 3655 Inst = Builder.CreateInvoke(Callee, ContBB, InvokeDest, Args, BundleList, 3656 Name); 3657 EmitBlock(ContBB); 3658 } 3659 3660 // In ObjC ARC mode with no ObjC ARC exception safety, tell the ARC 3661 // optimizer it can aggressively ignore unwind edges. 3662 if (CGM.getLangOpts().ObjCAutoRefCount) 3663 AddObjCARCExceptionMetadata(Inst); 3664 3665 return llvm::CallSite(Inst); 3666 } 3667 3668 /// \brief Store a non-aggregate value to an address to initialize it. For 3669 /// initialization, a non-atomic store will be used. 3670 static void EmitInitStoreOfNonAggregate(CodeGenFunction &CGF, RValue Src, 3671 LValue Dst) { 3672 if (Src.isScalar()) 3673 CGF.EmitStoreOfScalar(Src.getScalarVal(), Dst, /*init=*/true); 3674 else 3675 CGF.EmitStoreOfComplex(Src.getComplexVal(), Dst, /*init=*/true); 3676 } 3677 3678 void CodeGenFunction::deferPlaceholderReplacement(llvm::Instruction *Old, 3679 llvm::Value *New) { 3680 DeferredReplacements.push_back(std::make_pair(Old, New)); 3681 } 3682 3683 RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo, 3684 const CGCallee &Callee, 3685 ReturnValueSlot ReturnValue, 3686 const CallArgList &CallArgs, 3687 llvm::Instruction **callOrInvoke, 3688 SourceLocation Loc) { 3689 // FIXME: We no longer need the types from CallArgs; lift up and simplify. 3690 3691 assert(Callee.isOrdinary() || Callee.isVirtual()); 3692 3693 // Handle struct-return functions by passing a pointer to the 3694 // location that we would like to return into. 3695 QualType RetTy = CallInfo.getReturnType(); 3696 const ABIArgInfo &RetAI = CallInfo.getReturnInfo(); 3697 3698 llvm::FunctionType *IRFuncTy = Callee.getFunctionType(); 3699 3700 // 1. Set up the arguments. 3701 3702 // If we're using inalloca, insert the allocation after the stack save. 3703 // FIXME: Do this earlier rather than hacking it in here! 3704 Address ArgMemory = Address::invalid(); 3705 const llvm::StructLayout *ArgMemoryLayout = nullptr; 3706 if (llvm::StructType *ArgStruct = CallInfo.getArgStruct()) { 3707 const llvm::DataLayout &DL = CGM.getDataLayout(); 3708 ArgMemoryLayout = DL.getStructLayout(ArgStruct); 3709 llvm::Instruction *IP = CallArgs.getStackBase(); 3710 llvm::AllocaInst *AI; 3711 if (IP) { 3712 IP = IP->getNextNode(); 3713 AI = new llvm::AllocaInst(ArgStruct, DL.getAllocaAddrSpace(), 3714 "argmem", IP); 3715 } else { 3716 AI = CreateTempAlloca(ArgStruct, "argmem"); 3717 } 3718 auto Align = CallInfo.getArgStructAlignment(); 3719 AI->setAlignment(Align.getQuantity()); 3720 AI->setUsedWithInAlloca(true); 3721 assert(AI->isUsedWithInAlloca() && !AI->isStaticAlloca()); 3722 ArgMemory = Address(AI, Align); 3723 } 3724 3725 // Helper function to drill into the inalloca allocation. 3726 auto createInAllocaStructGEP = [&](unsigned FieldIndex) -> Address { 3727 auto FieldOffset = 3728 CharUnits::fromQuantity(ArgMemoryLayout->getElementOffset(FieldIndex)); 3729 return Builder.CreateStructGEP(ArgMemory, FieldIndex, FieldOffset); 3730 }; 3731 3732 ClangToLLVMArgMapping IRFunctionArgs(CGM.getContext(), CallInfo); 3733 SmallVector<llvm::Value *, 16> IRCallArgs(IRFunctionArgs.totalIRArgs()); 3734 3735 // If the call returns a temporary with struct return, create a temporary 3736 // alloca to hold the result, unless one is given to us. 3737 Address SRetPtr = Address::invalid(); 3738 size_t UnusedReturnSize = 0; 3739 if (RetAI.isIndirect() || RetAI.isInAlloca() || RetAI.isCoerceAndExpand()) { 3740 if (!ReturnValue.isNull()) { 3741 SRetPtr = ReturnValue.getValue(); 3742 } else { 3743 SRetPtr = CreateMemTemp(RetTy); 3744 if (HaveInsertPoint() && ReturnValue.isUnused()) { 3745 uint64_t size = 3746 CGM.getDataLayout().getTypeAllocSize(ConvertTypeForMem(RetTy)); 3747 if (EmitLifetimeStart(size, SRetPtr.getPointer())) 3748 UnusedReturnSize = size; 3749 } 3750 } 3751 if (IRFunctionArgs.hasSRetArg()) { 3752 IRCallArgs[IRFunctionArgs.getSRetArgNo()] = SRetPtr.getPointer(); 3753 } else if (RetAI.isInAlloca()) { 3754 Address Addr = createInAllocaStructGEP(RetAI.getInAllocaFieldIndex()); 3755 Builder.CreateStore(SRetPtr.getPointer(), Addr); 3756 } 3757 } 3758 3759 Address swiftErrorTemp = Address::invalid(); 3760 Address swiftErrorArg = Address::invalid(); 3761 3762 // Translate all of the arguments as necessary to match the IR lowering. 3763 assert(CallInfo.arg_size() == CallArgs.size() && 3764 "Mismatch between function signature & arguments."); 3765 unsigned ArgNo = 0; 3766 CGFunctionInfo::const_arg_iterator info_it = CallInfo.arg_begin(); 3767 for (CallArgList::const_iterator I = CallArgs.begin(), E = CallArgs.end(); 3768 I != E; ++I, ++info_it, ++ArgNo) { 3769 const ABIArgInfo &ArgInfo = info_it->info; 3770 RValue RV = I->RV; 3771 3772 // Insert a padding argument to ensure proper alignment. 3773 if (IRFunctionArgs.hasPaddingArg(ArgNo)) 3774 IRCallArgs[IRFunctionArgs.getPaddingArgNo(ArgNo)] = 3775 llvm::UndefValue::get(ArgInfo.getPaddingType()); 3776 3777 unsigned FirstIRArg, NumIRArgs; 3778 std::tie(FirstIRArg, NumIRArgs) = IRFunctionArgs.getIRArgs(ArgNo); 3779 3780 switch (ArgInfo.getKind()) { 3781 case ABIArgInfo::InAlloca: { 3782 assert(NumIRArgs == 0); 3783 assert(getTarget().getTriple().getArch() == llvm::Triple::x86); 3784 if (RV.isAggregate()) { 3785 // Replace the placeholder with the appropriate argument slot GEP. 3786 llvm::Instruction *Placeholder = 3787 cast<llvm::Instruction>(RV.getAggregatePointer()); 3788 CGBuilderTy::InsertPoint IP = Builder.saveIP(); 3789 Builder.SetInsertPoint(Placeholder); 3790 Address Addr = createInAllocaStructGEP(ArgInfo.getInAllocaFieldIndex()); 3791 Builder.restoreIP(IP); 3792 deferPlaceholderReplacement(Placeholder, Addr.getPointer()); 3793 } else { 3794 // Store the RValue into the argument struct. 3795 Address Addr = createInAllocaStructGEP(ArgInfo.getInAllocaFieldIndex()); 3796 unsigned AS = Addr.getType()->getPointerAddressSpace(); 3797 llvm::Type *MemType = ConvertTypeForMem(I->Ty)->getPointerTo(AS); 3798 // There are some cases where a trivial bitcast is not avoidable. The 3799 // definition of a type later in a translation unit may change it's type 3800 // from {}* to (%struct.foo*)*. 3801 if (Addr.getType() != MemType) 3802 Addr = Builder.CreateBitCast(Addr, MemType); 3803 LValue argLV = MakeAddrLValue(Addr, I->Ty); 3804 EmitInitStoreOfNonAggregate(*this, RV, argLV); 3805 } 3806 break; 3807 } 3808 3809 case ABIArgInfo::Indirect: { 3810 assert(NumIRArgs == 1); 3811 if (RV.isScalar() || RV.isComplex()) { 3812 // Make a temporary alloca to pass the argument. 3813 Address Addr = CreateMemTemp(I->Ty, ArgInfo.getIndirectAlign(), 3814 "indirect-arg-temp", false); 3815 IRCallArgs[FirstIRArg] = Addr.getPointer(); 3816 3817 LValue argLV = MakeAddrLValue(Addr, I->Ty); 3818 EmitInitStoreOfNonAggregate(*this, RV, argLV); 3819 } else { 3820 // We want to avoid creating an unnecessary temporary+copy here; 3821 // however, we need one in three cases: 3822 // 1. If the argument is not byval, and we are required to copy the 3823 // source. (This case doesn't occur on any common architecture.) 3824 // 2. If the argument is byval, RV is not sufficiently aligned, and 3825 // we cannot force it to be sufficiently aligned. 3826 // 3. If the argument is byval, but RV is located in an address space 3827 // different than that of the argument (0). 3828 Address Addr = RV.getAggregateAddress(); 3829 CharUnits Align = ArgInfo.getIndirectAlign(); 3830 const llvm::DataLayout *TD = &CGM.getDataLayout(); 3831 const unsigned RVAddrSpace = Addr.getType()->getAddressSpace(); 3832 const unsigned ArgAddrSpace = 3833 (FirstIRArg < IRFuncTy->getNumParams() 3834 ? IRFuncTy->getParamType(FirstIRArg)->getPointerAddressSpace() 3835 : 0); 3836 if ((!ArgInfo.getIndirectByVal() && I->NeedsCopy) || 3837 (ArgInfo.getIndirectByVal() && Addr.getAlignment() < Align && 3838 llvm::getOrEnforceKnownAlignment(Addr.getPointer(), 3839 Align.getQuantity(), *TD) 3840 < Align.getQuantity()) || 3841 (ArgInfo.getIndirectByVal() && (RVAddrSpace != ArgAddrSpace))) { 3842 // Create an aligned temporary, and copy to it. 3843 Address AI = CreateMemTemp(I->Ty, ArgInfo.getIndirectAlign(), 3844 "byval-temp", false); 3845 IRCallArgs[FirstIRArg] = AI.getPointer(); 3846 LValue Dest = MakeAddrLValue(AI, I->Ty); 3847 LValue Src = MakeAddrLValue(Addr, I->Ty); 3848 EmitAggregateCopy(Dest, Src, I->Ty, RV.isVolatileQualified()); 3849 } else { 3850 // Skip the extra memcpy call. 3851 IRCallArgs[FirstIRArg] = Addr.getPointer(); 3852 } 3853 } 3854 break; 3855 } 3856 3857 case ABIArgInfo::Ignore: 3858 assert(NumIRArgs == 0); 3859 break; 3860 3861 case ABIArgInfo::Extend: 3862 case ABIArgInfo::Direct: { 3863 if (!isa<llvm::StructType>(ArgInfo.getCoerceToType()) && 3864 ArgInfo.getCoerceToType() == ConvertType(info_it->type) && 3865 ArgInfo.getDirectOffset() == 0) { 3866 assert(NumIRArgs == 1); 3867 llvm::Value *V; 3868 if (RV.isScalar()) 3869 V = RV.getScalarVal(); 3870 else 3871 V = Builder.CreateLoad(RV.getAggregateAddress()); 3872 3873 // Implement swifterror by copying into a new swifterror argument. 3874 // We'll write back in the normal path out of the call. 3875 if (CallInfo.getExtParameterInfo(ArgNo).getABI() 3876 == ParameterABI::SwiftErrorResult) { 3877 assert(!swiftErrorTemp.isValid() && "multiple swifterror args"); 3878 3879 QualType pointeeTy = I->Ty->getPointeeType(); 3880 swiftErrorArg = 3881 Address(V, getContext().getTypeAlignInChars(pointeeTy)); 3882 3883 swiftErrorTemp = 3884 CreateMemTemp(pointeeTy, getPointerAlign(), "swifterror.temp"); 3885 V = swiftErrorTemp.getPointer(); 3886 cast<llvm::AllocaInst>(V)->setSwiftError(true); 3887 3888 llvm::Value *errorValue = Builder.CreateLoad(swiftErrorArg); 3889 Builder.CreateStore(errorValue, swiftErrorTemp); 3890 } 3891 3892 // We might have to widen integers, but we should never truncate. 3893 if (ArgInfo.getCoerceToType() != V->getType() && 3894 V->getType()->isIntegerTy()) 3895 V = Builder.CreateZExt(V, ArgInfo.getCoerceToType()); 3896 3897 // If the argument doesn't match, perform a bitcast to coerce it. This 3898 // can happen due to trivial type mismatches. 3899 if (FirstIRArg < IRFuncTy->getNumParams() && 3900 V->getType() != IRFuncTy->getParamType(FirstIRArg)) 3901 V = Builder.CreateBitCast(V, IRFuncTy->getParamType(FirstIRArg)); 3902 3903 IRCallArgs[FirstIRArg] = V; 3904 break; 3905 } 3906 3907 // FIXME: Avoid the conversion through memory if possible. 3908 Address Src = Address::invalid(); 3909 if (RV.isScalar() || RV.isComplex()) { 3910 Src = CreateMemTemp(I->Ty, "coerce"); 3911 LValue SrcLV = MakeAddrLValue(Src, I->Ty); 3912 EmitInitStoreOfNonAggregate(*this, RV, SrcLV); 3913 } else { 3914 Src = RV.getAggregateAddress(); 3915 } 3916 3917 // If the value is offset in memory, apply the offset now. 3918 Src = emitAddressAtOffset(*this, Src, ArgInfo); 3919 3920 // Fast-isel and the optimizer generally like scalar values better than 3921 // FCAs, so we flatten them if this is safe to do for this argument. 3922 llvm::StructType *STy = 3923 dyn_cast<llvm::StructType>(ArgInfo.getCoerceToType()); 3924 if (STy && ArgInfo.isDirect() && ArgInfo.getCanBeFlattened()) { 3925 llvm::Type *SrcTy = Src.getType()->getElementType(); 3926 uint64_t SrcSize = CGM.getDataLayout().getTypeAllocSize(SrcTy); 3927 uint64_t DstSize = CGM.getDataLayout().getTypeAllocSize(STy); 3928 3929 // If the source type is smaller than the destination type of the 3930 // coerce-to logic, copy the source value into a temp alloca the size 3931 // of the destination type to allow loading all of it. The bits past 3932 // the source value are left undef. 3933 if (SrcSize < DstSize) { 3934 Address TempAlloca 3935 = CreateTempAlloca(STy, Src.getAlignment(), 3936 Src.getName() + ".coerce"); 3937 Builder.CreateMemCpy(TempAlloca, Src, SrcSize); 3938 Src = TempAlloca; 3939 } else { 3940 Src = Builder.CreateBitCast(Src, 3941 STy->getPointerTo(Src.getAddressSpace())); 3942 } 3943 3944 auto SrcLayout = CGM.getDataLayout().getStructLayout(STy); 3945 assert(NumIRArgs == STy->getNumElements()); 3946 for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) { 3947 auto Offset = CharUnits::fromQuantity(SrcLayout->getElementOffset(i)); 3948 Address EltPtr = Builder.CreateStructGEP(Src, i, Offset); 3949 llvm::Value *LI = Builder.CreateLoad(EltPtr); 3950 IRCallArgs[FirstIRArg + i] = LI; 3951 } 3952 } else { 3953 // In the simple case, just pass the coerced loaded value. 3954 assert(NumIRArgs == 1); 3955 IRCallArgs[FirstIRArg] = 3956 CreateCoercedLoad(Src, ArgInfo.getCoerceToType(), *this); 3957 } 3958 3959 break; 3960 } 3961 3962 case ABIArgInfo::CoerceAndExpand: { 3963 auto coercionType = ArgInfo.getCoerceAndExpandType(); 3964 auto layout = CGM.getDataLayout().getStructLayout(coercionType); 3965 3966 llvm::Value *tempSize = nullptr; 3967 Address addr = Address::invalid(); 3968 if (RV.isAggregate()) { 3969 addr = RV.getAggregateAddress(); 3970 } else { 3971 assert(RV.isScalar()); // complex should always just be direct 3972 3973 llvm::Type *scalarType = RV.getScalarVal()->getType(); 3974 auto scalarSize = CGM.getDataLayout().getTypeAllocSize(scalarType); 3975 auto scalarAlign = CGM.getDataLayout().getPrefTypeAlignment(scalarType); 3976 3977 tempSize = llvm::ConstantInt::get(CGM.Int64Ty, scalarSize); 3978 3979 // Materialize to a temporary. 3980 addr = CreateTempAlloca(RV.getScalarVal()->getType(), 3981 CharUnits::fromQuantity(std::max(layout->getAlignment(), 3982 scalarAlign))); 3983 EmitLifetimeStart(scalarSize, addr.getPointer()); 3984 3985 Builder.CreateStore(RV.getScalarVal(), addr); 3986 } 3987 3988 addr = Builder.CreateElementBitCast(addr, coercionType); 3989 3990 unsigned IRArgPos = FirstIRArg; 3991 for (unsigned i = 0, e = coercionType->getNumElements(); i != e; ++i) { 3992 llvm::Type *eltType = coercionType->getElementType(i); 3993 if (ABIArgInfo::isPaddingForCoerceAndExpand(eltType)) continue; 3994 Address eltAddr = Builder.CreateStructGEP(addr, i, layout); 3995 llvm::Value *elt = Builder.CreateLoad(eltAddr); 3996 IRCallArgs[IRArgPos++] = elt; 3997 } 3998 assert(IRArgPos == FirstIRArg + NumIRArgs); 3999 4000 if (tempSize) { 4001 EmitLifetimeEnd(tempSize, addr.getPointer()); 4002 } 4003 4004 break; 4005 } 4006 4007 case ABIArgInfo::Expand: 4008 unsigned IRArgPos = FirstIRArg; 4009 ExpandTypeToArgs(I->Ty, RV, IRFuncTy, IRCallArgs, IRArgPos); 4010 assert(IRArgPos == FirstIRArg + NumIRArgs); 4011 break; 4012 } 4013 } 4014 4015 const CGCallee &ConcreteCallee = Callee.prepareConcreteCallee(*this); 4016 llvm::Value *CalleePtr = ConcreteCallee.getFunctionPointer(); 4017 4018 // If we're using inalloca, set up that argument. 4019 if (ArgMemory.isValid()) { 4020 llvm::Value *Arg = ArgMemory.getPointer(); 4021 if (CallInfo.isVariadic()) { 4022 // When passing non-POD arguments by value to variadic functions, we will 4023 // end up with a variadic prototype and an inalloca call site. In such 4024 // cases, we can't do any parameter mismatch checks. Give up and bitcast 4025 // the callee. 4026 unsigned CalleeAS = CalleePtr->getType()->getPointerAddressSpace(); 4027 auto FnTy = getTypes().GetFunctionType(CallInfo)->getPointerTo(CalleeAS); 4028 CalleePtr = Builder.CreateBitCast(CalleePtr, FnTy); 4029 } else { 4030 llvm::Type *LastParamTy = 4031 IRFuncTy->getParamType(IRFuncTy->getNumParams() - 1); 4032 if (Arg->getType() != LastParamTy) { 4033 #ifndef NDEBUG 4034 // Assert that these structs have equivalent element types. 4035 llvm::StructType *FullTy = CallInfo.getArgStruct(); 4036 llvm::StructType *DeclaredTy = cast<llvm::StructType>( 4037 cast<llvm::PointerType>(LastParamTy)->getElementType()); 4038 assert(DeclaredTy->getNumElements() == FullTy->getNumElements()); 4039 for (llvm::StructType::element_iterator DI = DeclaredTy->element_begin(), 4040 DE = DeclaredTy->element_end(), 4041 FI = FullTy->element_begin(); 4042 DI != DE; ++DI, ++FI) 4043 assert(*DI == *FI); 4044 #endif 4045 Arg = Builder.CreateBitCast(Arg, LastParamTy); 4046 } 4047 } 4048 assert(IRFunctionArgs.hasInallocaArg()); 4049 IRCallArgs[IRFunctionArgs.getInallocaArgNo()] = Arg; 4050 } 4051 4052 // 2. Prepare the function pointer. 4053 4054 // If the callee is a bitcast of a non-variadic function to have a 4055 // variadic function pointer type, check to see if we can remove the 4056 // bitcast. This comes up with unprototyped functions. 4057 // 4058 // This makes the IR nicer, but more importantly it ensures that we 4059 // can inline the function at -O0 if it is marked always_inline. 4060 auto simplifyVariadicCallee = [](llvm::Value *Ptr) -> llvm::Value* { 4061 llvm::FunctionType *CalleeFT = 4062 cast<llvm::FunctionType>(Ptr->getType()->getPointerElementType()); 4063 if (!CalleeFT->isVarArg()) 4064 return Ptr; 4065 4066 llvm::ConstantExpr *CE = dyn_cast<llvm::ConstantExpr>(Ptr); 4067 if (!CE || CE->getOpcode() != llvm::Instruction::BitCast) 4068 return Ptr; 4069 4070 llvm::Function *OrigFn = dyn_cast<llvm::Function>(CE->getOperand(0)); 4071 if (!OrigFn) 4072 return Ptr; 4073 4074 llvm::FunctionType *OrigFT = OrigFn->getFunctionType(); 4075 4076 // If the original type is variadic, or if any of the component types 4077 // disagree, we cannot remove the cast. 4078 if (OrigFT->isVarArg() || 4079 OrigFT->getNumParams() != CalleeFT->getNumParams() || 4080 OrigFT->getReturnType() != CalleeFT->getReturnType()) 4081 return Ptr; 4082 4083 for (unsigned i = 0, e = OrigFT->getNumParams(); i != e; ++i) 4084 if (OrigFT->getParamType(i) != CalleeFT->getParamType(i)) 4085 return Ptr; 4086 4087 return OrigFn; 4088 }; 4089 CalleePtr = simplifyVariadicCallee(CalleePtr); 4090 4091 // 3. Perform the actual call. 4092 4093 // Deactivate any cleanups that we're supposed to do immediately before 4094 // the call. 4095 if (!CallArgs.getCleanupsToDeactivate().empty()) 4096 deactivateArgCleanupsBeforeCall(*this, CallArgs); 4097 4098 // Assert that the arguments we computed match up. The IR verifier 4099 // will catch this, but this is a common enough source of problems 4100 // during IRGen changes that it's way better for debugging to catch 4101 // it ourselves here. 4102 #ifndef NDEBUG 4103 assert(IRCallArgs.size() == IRFuncTy->getNumParams() || IRFuncTy->isVarArg()); 4104 for (unsigned i = 0; i < IRCallArgs.size(); ++i) { 4105 // Inalloca argument can have different type. 4106 if (IRFunctionArgs.hasInallocaArg() && 4107 i == IRFunctionArgs.getInallocaArgNo()) 4108 continue; 4109 if (i < IRFuncTy->getNumParams()) 4110 assert(IRCallArgs[i]->getType() == IRFuncTy->getParamType(i)); 4111 } 4112 #endif 4113 4114 // Compute the calling convention and attributes. 4115 unsigned CallingConv; 4116 llvm::AttributeList Attrs; 4117 CGM.ConstructAttributeList(CalleePtr->getName(), CallInfo, 4118 Callee.getAbstractInfo(), Attrs, CallingConv, 4119 /*AttrOnCallSite=*/true); 4120 4121 // Apply some call-site-specific attributes. 4122 // TODO: work this into building the attribute set. 4123 4124 // Apply always_inline to all calls within flatten functions. 4125 // FIXME: should this really take priority over __try, below? 4126 if (CurCodeDecl && CurCodeDecl->hasAttr<FlattenAttr>() && 4127 !(Callee.getAbstractInfo().getCalleeDecl() && 4128 Callee.getAbstractInfo().getCalleeDecl()->hasAttr<NoInlineAttr>())) { 4129 Attrs = 4130 Attrs.addAttribute(getLLVMContext(), llvm::AttributeList::FunctionIndex, 4131 llvm::Attribute::AlwaysInline); 4132 } 4133 4134 // Disable inlining inside SEH __try blocks. 4135 if (isSEHTryScope()) { 4136 Attrs = 4137 Attrs.addAttribute(getLLVMContext(), llvm::AttributeList::FunctionIndex, 4138 llvm::Attribute::NoInline); 4139 } 4140 4141 // Decide whether to use a call or an invoke. 4142 bool CannotThrow; 4143 if (currentFunctionUsesSEHTry()) { 4144 // SEH cares about asynchronous exceptions, so everything can "throw." 4145 CannotThrow = false; 4146 } else if (isCleanupPadScope() && 4147 EHPersonality::get(*this).isMSVCXXPersonality()) { 4148 // The MSVC++ personality will implicitly terminate the program if an 4149 // exception is thrown during a cleanup outside of a try/catch. 4150 // We don't need to model anything in IR to get this behavior. 4151 CannotThrow = true; 4152 } else { 4153 // Otherwise, nounwind call sites will never throw. 4154 CannotThrow = Attrs.hasAttribute(llvm::AttributeList::FunctionIndex, 4155 llvm::Attribute::NoUnwind); 4156 } 4157 llvm::BasicBlock *InvokeDest = CannotThrow ? nullptr : getInvokeDest(); 4158 4159 SmallVector<llvm::OperandBundleDef, 1> BundleList = 4160 getBundlesForFunclet(CalleePtr); 4161 4162 // Emit the actual call/invoke instruction. 4163 llvm::CallSite CS; 4164 if (!InvokeDest) { 4165 CS = Builder.CreateCall(CalleePtr, IRCallArgs, BundleList); 4166 } else { 4167 llvm::BasicBlock *Cont = createBasicBlock("invoke.cont"); 4168 CS = Builder.CreateInvoke(CalleePtr, Cont, InvokeDest, IRCallArgs, 4169 BundleList); 4170 EmitBlock(Cont); 4171 } 4172 llvm::Instruction *CI = CS.getInstruction(); 4173 if (callOrInvoke) 4174 *callOrInvoke = CI; 4175 4176 // Apply the attributes and calling convention. 4177 CS.setAttributes(Attrs); 4178 CS.setCallingConv(static_cast<llvm::CallingConv::ID>(CallingConv)); 4179 4180 // Apply various metadata. 4181 4182 if (!CI->getType()->isVoidTy()) 4183 CI->setName("call"); 4184 4185 // Insert instrumentation or attach profile metadata at indirect call sites. 4186 // For more details, see the comment before the definition of 4187 // IPVK_IndirectCallTarget in InstrProfData.inc. 4188 if (!CS.getCalledFunction()) 4189 PGO.valueProfile(Builder, llvm::IPVK_IndirectCallTarget, 4190 CI, CalleePtr); 4191 4192 // In ObjC ARC mode with no ObjC ARC exception safety, tell the ARC 4193 // optimizer it can aggressively ignore unwind edges. 4194 if (CGM.getLangOpts().ObjCAutoRefCount) 4195 AddObjCARCExceptionMetadata(CI); 4196 4197 // Suppress tail calls if requested. 4198 if (llvm::CallInst *Call = dyn_cast<llvm::CallInst>(CI)) { 4199 const Decl *TargetDecl = Callee.getAbstractInfo().getCalleeDecl(); 4200 if (TargetDecl && TargetDecl->hasAttr<NotTailCalledAttr>()) 4201 Call->setTailCallKind(llvm::CallInst::TCK_NoTail); 4202 } 4203 4204 // 4. Finish the call. 4205 4206 // If the call doesn't return, finish the basic block and clear the 4207 // insertion point; this allows the rest of IRGen to discard 4208 // unreachable code. 4209 if (CS.doesNotReturn()) { 4210 if (UnusedReturnSize) 4211 EmitLifetimeEnd(llvm::ConstantInt::get(Int64Ty, UnusedReturnSize), 4212 SRetPtr.getPointer()); 4213 4214 // Strip away the noreturn attribute to better diagnose unreachable UB. 4215 if (SanOpts.has(SanitizerKind::Unreachable)) { 4216 if (auto *F = CS.getCalledFunction()) 4217 F->removeFnAttr(llvm::Attribute::NoReturn); 4218 CS.removeAttribute(llvm::AttributeList::FunctionIndex, 4219 llvm::Attribute::NoReturn); 4220 } 4221 4222 EmitUnreachable(Loc); 4223 Builder.ClearInsertionPoint(); 4224 4225 // FIXME: For now, emit a dummy basic block because expr emitters in 4226 // generally are not ready to handle emitting expressions at unreachable 4227 // points. 4228 EnsureInsertPoint(); 4229 4230 // Return a reasonable RValue. 4231 return GetUndefRValue(RetTy); 4232 } 4233 4234 // Perform the swifterror writeback. 4235 if (swiftErrorTemp.isValid()) { 4236 llvm::Value *errorResult = Builder.CreateLoad(swiftErrorTemp); 4237 Builder.CreateStore(errorResult, swiftErrorArg); 4238 } 4239 4240 // Emit any call-associated writebacks immediately. Arguably this 4241 // should happen after any return-value munging. 4242 if (CallArgs.hasWritebacks()) 4243 emitWritebacks(*this, CallArgs); 4244 4245 // The stack cleanup for inalloca arguments has to run out of the normal 4246 // lexical order, so deactivate it and run it manually here. 4247 CallArgs.freeArgumentMemory(*this); 4248 4249 // Extract the return value. 4250 RValue Ret = [&] { 4251 switch (RetAI.getKind()) { 4252 case ABIArgInfo::CoerceAndExpand: { 4253 auto coercionType = RetAI.getCoerceAndExpandType(); 4254 auto layout = CGM.getDataLayout().getStructLayout(coercionType); 4255 4256 Address addr = SRetPtr; 4257 addr = Builder.CreateElementBitCast(addr, coercionType); 4258 4259 assert(CI->getType() == RetAI.getUnpaddedCoerceAndExpandType()); 4260 bool requiresExtract = isa<llvm::StructType>(CI->getType()); 4261 4262 unsigned unpaddedIndex = 0; 4263 for (unsigned i = 0, e = coercionType->getNumElements(); i != e; ++i) { 4264 llvm::Type *eltType = coercionType->getElementType(i); 4265 if (ABIArgInfo::isPaddingForCoerceAndExpand(eltType)) continue; 4266 Address eltAddr = Builder.CreateStructGEP(addr, i, layout); 4267 llvm::Value *elt = CI; 4268 if (requiresExtract) 4269 elt = Builder.CreateExtractValue(elt, unpaddedIndex++); 4270 else 4271 assert(unpaddedIndex == 0); 4272 Builder.CreateStore(elt, eltAddr); 4273 } 4274 // FALLTHROUGH 4275 LLVM_FALLTHROUGH; 4276 } 4277 4278 case ABIArgInfo::InAlloca: 4279 case ABIArgInfo::Indirect: { 4280 RValue ret = convertTempToRValue(SRetPtr, RetTy, SourceLocation()); 4281 if (UnusedReturnSize) 4282 EmitLifetimeEnd(llvm::ConstantInt::get(Int64Ty, UnusedReturnSize), 4283 SRetPtr.getPointer()); 4284 return ret; 4285 } 4286 4287 case ABIArgInfo::Ignore: 4288 // If we are ignoring an argument that had a result, make sure to 4289 // construct the appropriate return value for our caller. 4290 return GetUndefRValue(RetTy); 4291 4292 case ABIArgInfo::Extend: 4293 case ABIArgInfo::Direct: { 4294 llvm::Type *RetIRTy = ConvertType(RetTy); 4295 if (RetAI.getCoerceToType() == RetIRTy && RetAI.getDirectOffset() == 0) { 4296 switch (getEvaluationKind(RetTy)) { 4297 case TEK_Complex: { 4298 llvm::Value *Real = Builder.CreateExtractValue(CI, 0); 4299 llvm::Value *Imag = Builder.CreateExtractValue(CI, 1); 4300 return RValue::getComplex(std::make_pair(Real, Imag)); 4301 } 4302 case TEK_Aggregate: { 4303 Address DestPtr = ReturnValue.getValue(); 4304 bool DestIsVolatile = ReturnValue.isVolatile(); 4305 4306 if (!DestPtr.isValid()) { 4307 DestPtr = CreateMemTemp(RetTy, "agg.tmp"); 4308 DestIsVolatile = false; 4309 } 4310 BuildAggStore(*this, CI, DestPtr, DestIsVolatile); 4311 return RValue::getAggregate(DestPtr); 4312 } 4313 case TEK_Scalar: { 4314 // If the argument doesn't match, perform a bitcast to coerce it. This 4315 // can happen due to trivial type mismatches. 4316 llvm::Value *V = CI; 4317 if (V->getType() != RetIRTy) 4318 V = Builder.CreateBitCast(V, RetIRTy); 4319 return RValue::get(V); 4320 } 4321 } 4322 llvm_unreachable("bad evaluation kind"); 4323 } 4324 4325 Address DestPtr = ReturnValue.getValue(); 4326 bool DestIsVolatile = ReturnValue.isVolatile(); 4327 4328 if (!DestPtr.isValid()) { 4329 DestPtr = CreateMemTemp(RetTy, "coerce"); 4330 DestIsVolatile = false; 4331 } 4332 4333 // If the value is offset in memory, apply the offset now. 4334 Address StorePtr = emitAddressAtOffset(*this, DestPtr, RetAI); 4335 CreateCoercedStore(CI, StorePtr, DestIsVolatile, *this); 4336 4337 return convertTempToRValue(DestPtr, RetTy, SourceLocation()); 4338 } 4339 4340 case ABIArgInfo::Expand: 4341 llvm_unreachable("Invalid ABI kind for return argument"); 4342 } 4343 4344 llvm_unreachable("Unhandled ABIArgInfo::Kind"); 4345 } (); 4346 4347 // Emit the assume_aligned check on the return value. 4348 const Decl *TargetDecl = Callee.getAbstractInfo().getCalleeDecl(); 4349 if (Ret.isScalar() && TargetDecl) { 4350 if (const auto *AA = TargetDecl->getAttr<AssumeAlignedAttr>()) { 4351 llvm::Value *OffsetValue = nullptr; 4352 if (const auto *Offset = AA->getOffset()) 4353 OffsetValue = EmitScalarExpr(Offset); 4354 4355 llvm::Value *Alignment = EmitScalarExpr(AA->getAlignment()); 4356 llvm::ConstantInt *AlignmentCI = cast<llvm::ConstantInt>(Alignment); 4357 EmitAlignmentAssumption(Ret.getScalarVal(), AlignmentCI->getZExtValue(), 4358 OffsetValue); 4359 } else if (const auto *AA = TargetDecl->getAttr<AllocAlignAttr>()) { 4360 llvm::Value *ParamVal = 4361 CallArgs[AA->getParamIndex() - 1].RV.getScalarVal(); 4362 EmitAlignmentAssumption(Ret.getScalarVal(), ParamVal); 4363 } 4364 } 4365 4366 return Ret; 4367 } 4368 4369 CGCallee CGCallee::prepareConcreteCallee(CodeGenFunction &CGF) const { 4370 if (isVirtual()) { 4371 const CallExpr *CE = getVirtualCallExpr(); 4372 return CGF.CGM.getCXXABI().getVirtualFunctionPointer( 4373 CGF, getVirtualMethodDecl(), getThisAddress(), 4374 getFunctionType(), CE ? CE->getLocStart() : SourceLocation()); 4375 } 4376 4377 return *this; 4378 } 4379 4380 /* VarArg handling */ 4381 4382 Address CodeGenFunction::EmitVAArg(VAArgExpr *VE, Address &VAListAddr) { 4383 VAListAddr = VE->isMicrosoftABI() 4384 ? EmitMSVAListRef(VE->getSubExpr()) 4385 : EmitVAListRef(VE->getSubExpr()); 4386 QualType Ty = VE->getType(); 4387 if (VE->isMicrosoftABI()) 4388 return CGM.getTypes().getABIInfo().EmitMSVAArg(*this, VAListAddr, Ty); 4389 return CGM.getTypes().getABIInfo().EmitVAArg(*this, VAListAddr, Ty); 4390 } 4391