1 //===--- CGCall.cpp - Encapsulate calling convention details --------------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // These classes wrap the information about a call or function 11 // definition used to handle ABI compliancy. 12 // 13 //===----------------------------------------------------------------------===// 14 15 #include "CGCall.h" 16 #include "ABIInfo.h" 17 #include "CGBlocks.h" 18 #include "CGCXXABI.h" 19 #include "CGCleanup.h" 20 #include "CodeGenFunction.h" 21 #include "CodeGenModule.h" 22 #include "TargetInfo.h" 23 #include "clang/AST/Decl.h" 24 #include "clang/AST/DeclCXX.h" 25 #include "clang/AST/DeclObjC.h" 26 #include "clang/Basic/TargetBuiltins.h" 27 #include "clang/Basic/TargetInfo.h" 28 #include "clang/CodeGen/CGFunctionInfo.h" 29 #include "clang/CodeGen/SwiftCallingConv.h" 30 #include "clang/Frontend/CodeGenOptions.h" 31 #include "llvm/ADT/StringExtras.h" 32 #include "llvm/IR/Attributes.h" 33 #include "llvm/IR/CallingConv.h" 34 #include "llvm/IR/CallSite.h" 35 #include "llvm/IR/DataLayout.h" 36 #include "llvm/IR/InlineAsm.h" 37 #include "llvm/IR/Intrinsics.h" 38 #include "llvm/IR/IntrinsicInst.h" 39 #include "llvm/Transforms/Utils/Local.h" 40 using namespace clang; 41 using namespace CodeGen; 42 43 /***/ 44 45 unsigned CodeGenTypes::ClangCallConvToLLVMCallConv(CallingConv CC) { 46 switch (CC) { 47 default: return llvm::CallingConv::C; 48 case CC_X86StdCall: return llvm::CallingConv::X86_StdCall; 49 case CC_X86FastCall: return llvm::CallingConv::X86_FastCall; 50 case CC_X86ThisCall: return llvm::CallingConv::X86_ThisCall; 51 case CC_X86_64Win64: return llvm::CallingConv::X86_64_Win64; 52 case CC_X86_64SysV: return llvm::CallingConv::X86_64_SysV; 53 case CC_AAPCS: return llvm::CallingConv::ARM_AAPCS; 54 case CC_AAPCS_VFP: return llvm::CallingConv::ARM_AAPCS_VFP; 55 case CC_IntelOclBicc: return llvm::CallingConv::Intel_OCL_BI; 56 // TODO: Add support for __pascal to LLVM. 57 case CC_X86Pascal: return llvm::CallingConv::C; 58 // TODO: Add support for __vectorcall to LLVM. 59 case CC_X86VectorCall: return llvm::CallingConv::X86_VectorCall; 60 case CC_SpirFunction: return llvm::CallingConv::SPIR_FUNC; 61 case CC_OpenCLKernel: return CGM.getTargetCodeGenInfo().getOpenCLKernelCallingConv(); 62 case CC_PreserveMost: return llvm::CallingConv::PreserveMost; 63 case CC_PreserveAll: return llvm::CallingConv::PreserveAll; 64 case CC_Swift: return llvm::CallingConv::Swift; 65 } 66 } 67 68 /// Derives the 'this' type for codegen purposes, i.e. ignoring method 69 /// qualification. 70 /// FIXME: address space qualification? 71 static CanQualType GetThisType(ASTContext &Context, const CXXRecordDecl *RD) { 72 QualType RecTy = Context.getTagDeclType(RD)->getCanonicalTypeInternal(); 73 return Context.getPointerType(CanQualType::CreateUnsafe(RecTy)); 74 } 75 76 /// Returns the canonical formal type of the given C++ method. 77 static CanQual<FunctionProtoType> GetFormalType(const CXXMethodDecl *MD) { 78 return MD->getType()->getCanonicalTypeUnqualified() 79 .getAs<FunctionProtoType>(); 80 } 81 82 /// Returns the "extra-canonicalized" return type, which discards 83 /// qualifiers on the return type. Codegen doesn't care about them, 84 /// and it makes ABI code a little easier to be able to assume that 85 /// all parameter and return types are top-level unqualified. 86 static CanQualType GetReturnType(QualType RetTy) { 87 return RetTy->getCanonicalTypeUnqualified().getUnqualifiedType(); 88 } 89 90 /// Arrange the argument and result information for a value of the given 91 /// unprototyped freestanding function type. 92 const CGFunctionInfo & 93 CodeGenTypes::arrangeFreeFunctionType(CanQual<FunctionNoProtoType> FTNP) { 94 // When translating an unprototyped function type, always use a 95 // variadic type. 96 return arrangeLLVMFunctionInfo(FTNP->getReturnType().getUnqualifiedType(), 97 /*instanceMethod=*/false, 98 /*chainCall=*/false, None, 99 FTNP->getExtInfo(), {}, RequiredArgs(0)); 100 } 101 102 /// Adds the formal paramaters in FPT to the given prefix. If any parameter in 103 /// FPT has pass_object_size attrs, then we'll add parameters for those, too. 104 static void appendParameterTypes(const CodeGenTypes &CGT, 105 SmallVectorImpl<CanQualType> &prefix, 106 SmallVectorImpl<FunctionProtoType::ExtParameterInfo> ¶mInfos, 107 CanQual<FunctionProtoType> FPT, 108 const FunctionDecl *FD) { 109 // Fill out paramInfos. 110 if (FPT->hasExtParameterInfos() || !paramInfos.empty()) { 111 assert(paramInfos.size() <= prefix.size()); 112 auto protoParamInfos = FPT->getExtParameterInfos(); 113 paramInfos.reserve(prefix.size() + protoParamInfos.size()); 114 paramInfos.resize(prefix.size()); 115 paramInfos.append(protoParamInfos.begin(), protoParamInfos.end()); 116 } 117 118 // Fast path: unknown target. 119 if (FD == nullptr) { 120 prefix.append(FPT->param_type_begin(), FPT->param_type_end()); 121 return; 122 } 123 124 // In the vast majority cases, we'll have precisely FPT->getNumParams() 125 // parameters; the only thing that can change this is the presence of 126 // pass_object_size. So, we preallocate for the common case. 127 prefix.reserve(prefix.size() + FPT->getNumParams()); 128 129 assert(FD->getNumParams() == FPT->getNumParams()); 130 for (unsigned I = 0, E = FPT->getNumParams(); I != E; ++I) { 131 prefix.push_back(FPT->getParamType(I)); 132 if (FD->getParamDecl(I)->hasAttr<PassObjectSizeAttr>()) 133 prefix.push_back(CGT.getContext().getSizeType()); 134 } 135 } 136 137 /// Arrange the LLVM function layout for a value of the given function 138 /// type, on top of any implicit parameters already stored. 139 static const CGFunctionInfo & 140 arrangeLLVMFunctionInfo(CodeGenTypes &CGT, bool instanceMethod, 141 SmallVectorImpl<CanQualType> &prefix, 142 CanQual<FunctionProtoType> FTP, 143 const FunctionDecl *FD) { 144 SmallVector<FunctionProtoType::ExtParameterInfo, 16> paramInfos; 145 RequiredArgs Required = 146 RequiredArgs::forPrototypePlus(FTP, prefix.size(), FD); 147 // FIXME: Kill copy. 148 appendParameterTypes(CGT, prefix, paramInfos, FTP, FD); 149 CanQualType resultType = FTP->getReturnType().getUnqualifiedType(); 150 151 return CGT.arrangeLLVMFunctionInfo(resultType, instanceMethod, 152 /*chainCall=*/false, prefix, 153 FTP->getExtInfo(), paramInfos, 154 Required); 155 } 156 157 /// Arrange the argument and result information for a value of the 158 /// given freestanding function type. 159 const CGFunctionInfo & 160 CodeGenTypes::arrangeFreeFunctionType(CanQual<FunctionProtoType> FTP, 161 const FunctionDecl *FD) { 162 SmallVector<CanQualType, 16> argTypes; 163 return ::arrangeLLVMFunctionInfo(*this, /*instanceMethod=*/false, argTypes, 164 FTP, FD); 165 } 166 167 static CallingConv getCallingConventionForDecl(const Decl *D, bool IsWindows) { 168 // Set the appropriate calling convention for the Function. 169 if (D->hasAttr<StdCallAttr>()) 170 return CC_X86StdCall; 171 172 if (D->hasAttr<FastCallAttr>()) 173 return CC_X86FastCall; 174 175 if (D->hasAttr<ThisCallAttr>()) 176 return CC_X86ThisCall; 177 178 if (D->hasAttr<VectorCallAttr>()) 179 return CC_X86VectorCall; 180 181 if (D->hasAttr<PascalAttr>()) 182 return CC_X86Pascal; 183 184 if (PcsAttr *PCS = D->getAttr<PcsAttr>()) 185 return (PCS->getPCS() == PcsAttr::AAPCS ? CC_AAPCS : CC_AAPCS_VFP); 186 187 if (D->hasAttr<IntelOclBiccAttr>()) 188 return CC_IntelOclBicc; 189 190 if (D->hasAttr<MSABIAttr>()) 191 return IsWindows ? CC_C : CC_X86_64Win64; 192 193 if (D->hasAttr<SysVABIAttr>()) 194 return IsWindows ? CC_X86_64SysV : CC_C; 195 196 if (D->hasAttr<PreserveMostAttr>()) 197 return CC_PreserveMost; 198 199 if (D->hasAttr<PreserveAllAttr>()) 200 return CC_PreserveAll; 201 202 return CC_C; 203 } 204 205 /// Arrange the argument and result information for a call to an 206 /// unknown C++ non-static member function of the given abstract type. 207 /// (Zero value of RD means we don't have any meaningful "this" argument type, 208 /// so fall back to a generic pointer type). 209 /// The member function must be an ordinary function, i.e. not a 210 /// constructor or destructor. 211 const CGFunctionInfo & 212 CodeGenTypes::arrangeCXXMethodType(const CXXRecordDecl *RD, 213 const FunctionProtoType *FTP, 214 const CXXMethodDecl *MD) { 215 SmallVector<CanQualType, 16> argTypes; 216 217 // Add the 'this' pointer. 218 if (RD) 219 argTypes.push_back(GetThisType(Context, RD)); 220 else 221 argTypes.push_back(Context.VoidPtrTy); 222 223 return ::arrangeLLVMFunctionInfo( 224 *this, true, argTypes, 225 FTP->getCanonicalTypeUnqualified().getAs<FunctionProtoType>(), MD); 226 } 227 228 /// Arrange the argument and result information for a declaration or 229 /// definition of the given C++ non-static member function. The 230 /// member function must be an ordinary function, i.e. not a 231 /// constructor or destructor. 232 const CGFunctionInfo & 233 CodeGenTypes::arrangeCXXMethodDeclaration(const CXXMethodDecl *MD) { 234 assert(!isa<CXXConstructorDecl>(MD) && "wrong method for constructors!"); 235 assert(!isa<CXXDestructorDecl>(MD) && "wrong method for destructors!"); 236 237 CanQual<FunctionProtoType> prototype = GetFormalType(MD); 238 239 if (MD->isInstance()) { 240 // The abstract case is perfectly fine. 241 const CXXRecordDecl *ThisType = TheCXXABI.getThisArgumentTypeForMethod(MD); 242 return arrangeCXXMethodType(ThisType, prototype.getTypePtr(), MD); 243 } 244 245 return arrangeFreeFunctionType(prototype, MD); 246 } 247 248 bool CodeGenTypes::inheritingCtorHasParams( 249 const InheritedConstructor &Inherited, CXXCtorType Type) { 250 // Parameters are unnecessary if we're constructing a base class subobject 251 // and the inherited constructor lives in a virtual base. 252 return Type == Ctor_Complete || 253 !Inherited.getShadowDecl()->constructsVirtualBase() || 254 !Target.getCXXABI().hasConstructorVariants(); 255 } 256 257 const CGFunctionInfo & 258 CodeGenTypes::arrangeCXXStructorDeclaration(const CXXMethodDecl *MD, 259 StructorType Type) { 260 261 SmallVector<CanQualType, 16> argTypes; 262 SmallVector<FunctionProtoType::ExtParameterInfo, 16> paramInfos; 263 argTypes.push_back(GetThisType(Context, MD->getParent())); 264 265 bool PassParams = true; 266 267 GlobalDecl GD; 268 if (auto *CD = dyn_cast<CXXConstructorDecl>(MD)) { 269 GD = GlobalDecl(CD, toCXXCtorType(Type)); 270 271 // A base class inheriting constructor doesn't get forwarded arguments 272 // needed to construct a virtual base (or base class thereof). 273 if (auto Inherited = CD->getInheritedConstructor()) 274 PassParams = inheritingCtorHasParams(Inherited, toCXXCtorType(Type)); 275 } else { 276 auto *DD = dyn_cast<CXXDestructorDecl>(MD); 277 GD = GlobalDecl(DD, toCXXDtorType(Type)); 278 } 279 280 CanQual<FunctionProtoType> FTP = GetFormalType(MD); 281 282 // Add the formal parameters. 283 if (PassParams) 284 appendParameterTypes(*this, argTypes, paramInfos, FTP, MD); 285 286 TheCXXABI.buildStructorSignature(MD, Type, argTypes); 287 288 RequiredArgs required = 289 (PassParams && MD->isVariadic() ? RequiredArgs(argTypes.size()) 290 : RequiredArgs::All); 291 292 FunctionType::ExtInfo extInfo = FTP->getExtInfo(); 293 CanQualType resultType = TheCXXABI.HasThisReturn(GD) 294 ? argTypes.front() 295 : TheCXXABI.hasMostDerivedReturn(GD) 296 ? CGM.getContext().VoidPtrTy 297 : Context.VoidTy; 298 return arrangeLLVMFunctionInfo(resultType, /*instanceMethod=*/true, 299 /*chainCall=*/false, argTypes, extInfo, 300 paramInfos, required); 301 } 302 303 static SmallVector<CanQualType, 16> 304 getArgTypesForCall(ASTContext &ctx, const CallArgList &args) { 305 SmallVector<CanQualType, 16> argTypes; 306 for (auto &arg : args) 307 argTypes.push_back(ctx.getCanonicalParamType(arg.Ty)); 308 return argTypes; 309 } 310 311 static SmallVector<CanQualType, 16> 312 getArgTypesForDeclaration(ASTContext &ctx, const FunctionArgList &args) { 313 SmallVector<CanQualType, 16> argTypes; 314 for (auto &arg : args) 315 argTypes.push_back(ctx.getCanonicalParamType(arg->getType())); 316 return argTypes; 317 } 318 319 static void addExtParameterInfosForCall( 320 llvm::SmallVectorImpl<FunctionProtoType::ExtParameterInfo> ¶mInfos, 321 const FunctionProtoType *proto, 322 unsigned prefixArgs, 323 unsigned totalArgs) { 324 assert(proto->hasExtParameterInfos()); 325 assert(paramInfos.size() <= prefixArgs); 326 assert(proto->getNumParams() + prefixArgs <= totalArgs); 327 328 // Add default infos for any prefix args that don't already have infos. 329 paramInfos.resize(prefixArgs); 330 331 // Add infos for the prototype. 332 auto protoInfos = proto->getExtParameterInfos(); 333 paramInfos.append(protoInfos.begin(), protoInfos.end()); 334 335 // Add default infos for the variadic arguments. 336 paramInfos.resize(totalArgs); 337 } 338 339 static llvm::SmallVector<FunctionProtoType::ExtParameterInfo, 16> 340 getExtParameterInfosForCall(const FunctionProtoType *proto, 341 unsigned prefixArgs, unsigned totalArgs) { 342 llvm::SmallVector<FunctionProtoType::ExtParameterInfo, 16> result; 343 if (proto->hasExtParameterInfos()) { 344 addExtParameterInfosForCall(result, proto, prefixArgs, totalArgs); 345 } 346 return result; 347 } 348 349 /// Arrange a call to a C++ method, passing the given arguments. 350 const CGFunctionInfo & 351 CodeGenTypes::arrangeCXXConstructorCall(const CallArgList &args, 352 const CXXConstructorDecl *D, 353 CXXCtorType CtorKind, 354 unsigned ExtraArgs) { 355 // FIXME: Kill copy. 356 SmallVector<CanQualType, 16> ArgTypes; 357 for (const auto &Arg : args) 358 ArgTypes.push_back(Context.getCanonicalParamType(Arg.Ty)); 359 360 CanQual<FunctionProtoType> FPT = GetFormalType(D); 361 RequiredArgs Required = RequiredArgs::forPrototypePlus(FPT, 1 + ExtraArgs, D); 362 GlobalDecl GD(D, CtorKind); 363 CanQualType ResultType = TheCXXABI.HasThisReturn(GD) 364 ? ArgTypes.front() 365 : TheCXXABI.hasMostDerivedReturn(GD) 366 ? CGM.getContext().VoidPtrTy 367 : Context.VoidTy; 368 369 FunctionType::ExtInfo Info = FPT->getExtInfo(); 370 auto ParamInfos = getExtParameterInfosForCall(FPT.getTypePtr(), 1 + ExtraArgs, 371 ArgTypes.size()); 372 return arrangeLLVMFunctionInfo(ResultType, /*instanceMethod=*/true, 373 /*chainCall=*/false, ArgTypes, Info, 374 ParamInfos, Required); 375 } 376 377 /// Arrange the argument and result information for the declaration or 378 /// definition of the given function. 379 const CGFunctionInfo & 380 CodeGenTypes::arrangeFunctionDeclaration(const FunctionDecl *FD) { 381 if (const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(FD)) 382 if (MD->isInstance()) 383 return arrangeCXXMethodDeclaration(MD); 384 385 CanQualType FTy = FD->getType()->getCanonicalTypeUnqualified(); 386 387 assert(isa<FunctionType>(FTy)); 388 389 // When declaring a function without a prototype, always use a 390 // non-variadic type. 391 if (isa<FunctionNoProtoType>(FTy)) { 392 CanQual<FunctionNoProtoType> noProto = FTy.getAs<FunctionNoProtoType>(); 393 return arrangeLLVMFunctionInfo( 394 noProto->getReturnType(), /*instanceMethod=*/false, 395 /*chainCall=*/false, None, noProto->getExtInfo(), {},RequiredArgs::All); 396 } 397 398 assert(isa<FunctionProtoType>(FTy)); 399 return arrangeFreeFunctionType(FTy.getAs<FunctionProtoType>(), FD); 400 } 401 402 /// Arrange the argument and result information for the declaration or 403 /// definition of an Objective-C method. 404 const CGFunctionInfo & 405 CodeGenTypes::arrangeObjCMethodDeclaration(const ObjCMethodDecl *MD) { 406 // It happens that this is the same as a call with no optional 407 // arguments, except also using the formal 'self' type. 408 return arrangeObjCMessageSendSignature(MD, MD->getSelfDecl()->getType()); 409 } 410 411 /// Arrange the argument and result information for the function type 412 /// through which to perform a send to the given Objective-C method, 413 /// using the given receiver type. The receiver type is not always 414 /// the 'self' type of the method or even an Objective-C pointer type. 415 /// This is *not* the right method for actually performing such a 416 /// message send, due to the possibility of optional arguments. 417 const CGFunctionInfo & 418 CodeGenTypes::arrangeObjCMessageSendSignature(const ObjCMethodDecl *MD, 419 QualType receiverType) { 420 SmallVector<CanQualType, 16> argTys; 421 argTys.push_back(Context.getCanonicalParamType(receiverType)); 422 argTys.push_back(Context.getCanonicalParamType(Context.getObjCSelType())); 423 // FIXME: Kill copy? 424 for (const auto *I : MD->parameters()) { 425 argTys.push_back(Context.getCanonicalParamType(I->getType())); 426 } 427 428 FunctionType::ExtInfo einfo; 429 bool IsWindows = getContext().getTargetInfo().getTriple().isOSWindows(); 430 einfo = einfo.withCallingConv(getCallingConventionForDecl(MD, IsWindows)); 431 432 if (getContext().getLangOpts().ObjCAutoRefCount && 433 MD->hasAttr<NSReturnsRetainedAttr>()) 434 einfo = einfo.withProducesResult(true); 435 436 RequiredArgs required = 437 (MD->isVariadic() ? RequiredArgs(argTys.size()) : RequiredArgs::All); 438 439 return arrangeLLVMFunctionInfo( 440 GetReturnType(MD->getReturnType()), /*instanceMethod=*/false, 441 /*chainCall=*/false, argTys, einfo, {}, required); 442 } 443 444 const CGFunctionInfo & 445 CodeGenTypes::arrangeUnprototypedObjCMessageSend(QualType returnType, 446 const CallArgList &args) { 447 auto argTypes = getArgTypesForCall(Context, args); 448 FunctionType::ExtInfo einfo; 449 450 return arrangeLLVMFunctionInfo( 451 GetReturnType(returnType), /*instanceMethod=*/false, 452 /*chainCall=*/false, argTypes, einfo, {}, RequiredArgs::All); 453 } 454 455 const CGFunctionInfo & 456 CodeGenTypes::arrangeGlobalDeclaration(GlobalDecl GD) { 457 // FIXME: Do we need to handle ObjCMethodDecl? 458 const FunctionDecl *FD = cast<FunctionDecl>(GD.getDecl()); 459 460 if (const CXXConstructorDecl *CD = dyn_cast<CXXConstructorDecl>(FD)) 461 return arrangeCXXStructorDeclaration(CD, getFromCtorType(GD.getCtorType())); 462 463 if (const CXXDestructorDecl *DD = dyn_cast<CXXDestructorDecl>(FD)) 464 return arrangeCXXStructorDeclaration(DD, getFromDtorType(GD.getDtorType())); 465 466 return arrangeFunctionDeclaration(FD); 467 } 468 469 /// Arrange a thunk that takes 'this' as the first parameter followed by 470 /// varargs. Return a void pointer, regardless of the actual return type. 471 /// The body of the thunk will end in a musttail call to a function of the 472 /// correct type, and the caller will bitcast the function to the correct 473 /// prototype. 474 const CGFunctionInfo & 475 CodeGenTypes::arrangeMSMemberPointerThunk(const CXXMethodDecl *MD) { 476 assert(MD->isVirtual() && "only virtual memptrs have thunks"); 477 CanQual<FunctionProtoType> FTP = GetFormalType(MD); 478 CanQualType ArgTys[] = { GetThisType(Context, MD->getParent()) }; 479 return arrangeLLVMFunctionInfo(Context.VoidTy, /*instanceMethod=*/false, 480 /*chainCall=*/false, ArgTys, 481 FTP->getExtInfo(), {}, RequiredArgs(1)); 482 } 483 484 const CGFunctionInfo & 485 CodeGenTypes::arrangeMSCtorClosure(const CXXConstructorDecl *CD, 486 CXXCtorType CT) { 487 assert(CT == Ctor_CopyingClosure || CT == Ctor_DefaultClosure); 488 489 CanQual<FunctionProtoType> FTP = GetFormalType(CD); 490 SmallVector<CanQualType, 2> ArgTys; 491 const CXXRecordDecl *RD = CD->getParent(); 492 ArgTys.push_back(GetThisType(Context, RD)); 493 if (CT == Ctor_CopyingClosure) 494 ArgTys.push_back(*FTP->param_type_begin()); 495 if (RD->getNumVBases() > 0) 496 ArgTys.push_back(Context.IntTy); 497 CallingConv CC = Context.getDefaultCallingConvention( 498 /*IsVariadic=*/false, /*IsCXXMethod=*/true); 499 return arrangeLLVMFunctionInfo(Context.VoidTy, /*instanceMethod=*/true, 500 /*chainCall=*/false, ArgTys, 501 FunctionType::ExtInfo(CC), {}, 502 RequiredArgs::All); 503 } 504 505 /// Arrange a call as unto a free function, except possibly with an 506 /// additional number of formal parameters considered required. 507 static const CGFunctionInfo & 508 arrangeFreeFunctionLikeCall(CodeGenTypes &CGT, 509 CodeGenModule &CGM, 510 const CallArgList &args, 511 const FunctionType *fnType, 512 unsigned numExtraRequiredArgs, 513 bool chainCall) { 514 assert(args.size() >= numExtraRequiredArgs); 515 516 llvm::SmallVector<FunctionProtoType::ExtParameterInfo, 16> paramInfos; 517 518 // In most cases, there are no optional arguments. 519 RequiredArgs required = RequiredArgs::All; 520 521 // If we have a variadic prototype, the required arguments are the 522 // extra prefix plus the arguments in the prototype. 523 if (const FunctionProtoType *proto = dyn_cast<FunctionProtoType>(fnType)) { 524 if (proto->isVariadic()) 525 required = RequiredArgs(proto->getNumParams() + numExtraRequiredArgs); 526 527 if (proto->hasExtParameterInfos()) 528 addExtParameterInfosForCall(paramInfos, proto, numExtraRequiredArgs, 529 args.size()); 530 531 // If we don't have a prototype at all, but we're supposed to 532 // explicitly use the variadic convention for unprototyped calls, 533 // treat all of the arguments as required but preserve the nominal 534 // possibility of variadics. 535 } else if (CGM.getTargetCodeGenInfo() 536 .isNoProtoCallVariadic(args, 537 cast<FunctionNoProtoType>(fnType))) { 538 required = RequiredArgs(args.size()); 539 } 540 541 // FIXME: Kill copy. 542 SmallVector<CanQualType, 16> argTypes; 543 for (const auto &arg : args) 544 argTypes.push_back(CGT.getContext().getCanonicalParamType(arg.Ty)); 545 return CGT.arrangeLLVMFunctionInfo(GetReturnType(fnType->getReturnType()), 546 /*instanceMethod=*/false, chainCall, 547 argTypes, fnType->getExtInfo(), paramInfos, 548 required); 549 } 550 551 /// Figure out the rules for calling a function with the given formal 552 /// type using the given arguments. The arguments are necessary 553 /// because the function might be unprototyped, in which case it's 554 /// target-dependent in crazy ways. 555 const CGFunctionInfo & 556 CodeGenTypes::arrangeFreeFunctionCall(const CallArgList &args, 557 const FunctionType *fnType, 558 bool chainCall) { 559 return arrangeFreeFunctionLikeCall(*this, CGM, args, fnType, 560 chainCall ? 1 : 0, chainCall); 561 } 562 563 /// A block function is essentially a free function with an 564 /// extra implicit argument. 565 const CGFunctionInfo & 566 CodeGenTypes::arrangeBlockFunctionCall(const CallArgList &args, 567 const FunctionType *fnType) { 568 return arrangeFreeFunctionLikeCall(*this, CGM, args, fnType, 1, 569 /*chainCall=*/false); 570 } 571 572 const CGFunctionInfo & 573 CodeGenTypes::arrangeBlockFunctionDeclaration(const FunctionProtoType *proto, 574 const FunctionArgList ¶ms) { 575 auto paramInfos = getExtParameterInfosForCall(proto, 1, params.size()); 576 auto argTypes = getArgTypesForDeclaration(Context, params); 577 578 return arrangeLLVMFunctionInfo( 579 GetReturnType(proto->getReturnType()), 580 /*instanceMethod*/ false, /*chainCall*/ false, argTypes, 581 proto->getExtInfo(), paramInfos, 582 RequiredArgs::forPrototypePlus(proto, 1, nullptr)); 583 } 584 585 const CGFunctionInfo & 586 CodeGenTypes::arrangeBuiltinFunctionCall(QualType resultType, 587 const CallArgList &args) { 588 // FIXME: Kill copy. 589 SmallVector<CanQualType, 16> argTypes; 590 for (const auto &Arg : args) 591 argTypes.push_back(Context.getCanonicalParamType(Arg.Ty)); 592 return arrangeLLVMFunctionInfo( 593 GetReturnType(resultType), /*instanceMethod=*/false, 594 /*chainCall=*/false, argTypes, FunctionType::ExtInfo(), 595 /*paramInfos=*/ {}, RequiredArgs::All); 596 } 597 598 const CGFunctionInfo & 599 CodeGenTypes::arrangeBuiltinFunctionDeclaration(QualType resultType, 600 const FunctionArgList &args) { 601 auto argTypes = getArgTypesForDeclaration(Context, args); 602 603 return arrangeLLVMFunctionInfo( 604 GetReturnType(resultType), /*instanceMethod=*/false, /*chainCall=*/false, 605 argTypes, FunctionType::ExtInfo(), {}, RequiredArgs::All); 606 } 607 608 const CGFunctionInfo & 609 CodeGenTypes::arrangeBuiltinFunctionDeclaration(CanQualType resultType, 610 ArrayRef<CanQualType> argTypes) { 611 return arrangeLLVMFunctionInfo( 612 resultType, /*instanceMethod=*/false, /*chainCall=*/false, 613 argTypes, FunctionType::ExtInfo(), {}, RequiredArgs::All); 614 } 615 616 /// Arrange a call to a C++ method, passing the given arguments. 617 const CGFunctionInfo & 618 CodeGenTypes::arrangeCXXMethodCall(const CallArgList &args, 619 const FunctionProtoType *proto, 620 RequiredArgs required) { 621 unsigned numRequiredArgs = 622 (proto->isVariadic() ? required.getNumRequiredArgs() : args.size()); 623 unsigned numPrefixArgs = numRequiredArgs - proto->getNumParams(); 624 auto paramInfos = 625 getExtParameterInfosForCall(proto, numPrefixArgs, args.size()); 626 627 // FIXME: Kill copy. 628 auto argTypes = getArgTypesForCall(Context, args); 629 630 FunctionType::ExtInfo info = proto->getExtInfo(); 631 return arrangeLLVMFunctionInfo( 632 GetReturnType(proto->getReturnType()), /*instanceMethod=*/true, 633 /*chainCall=*/false, argTypes, info, paramInfos, required); 634 } 635 636 const CGFunctionInfo &CodeGenTypes::arrangeNullaryFunction() { 637 return arrangeLLVMFunctionInfo( 638 getContext().VoidTy, /*instanceMethod=*/false, /*chainCall=*/false, 639 None, FunctionType::ExtInfo(), {}, RequiredArgs::All); 640 } 641 642 const CGFunctionInfo & 643 CodeGenTypes::arrangeCall(const CGFunctionInfo &signature, 644 const CallArgList &args) { 645 assert(signature.arg_size() <= args.size()); 646 if (signature.arg_size() == args.size()) 647 return signature; 648 649 SmallVector<FunctionProtoType::ExtParameterInfo, 16> paramInfos; 650 auto sigParamInfos = signature.getExtParameterInfos(); 651 if (!sigParamInfos.empty()) { 652 paramInfos.append(sigParamInfos.begin(), sigParamInfos.end()); 653 paramInfos.resize(args.size()); 654 } 655 656 auto argTypes = getArgTypesForCall(Context, args); 657 658 assert(signature.getRequiredArgs().allowsOptionalArgs()); 659 return arrangeLLVMFunctionInfo(signature.getReturnType(), 660 signature.isInstanceMethod(), 661 signature.isChainCall(), 662 argTypes, 663 signature.getExtInfo(), 664 paramInfos, 665 signature.getRequiredArgs()); 666 } 667 668 /// Arrange the argument and result information for an abstract value 669 /// of a given function type. This is the method which all of the 670 /// above functions ultimately defer to. 671 const CGFunctionInfo & 672 CodeGenTypes::arrangeLLVMFunctionInfo(CanQualType resultType, 673 bool instanceMethod, 674 bool chainCall, 675 ArrayRef<CanQualType> argTypes, 676 FunctionType::ExtInfo info, 677 ArrayRef<FunctionProtoType::ExtParameterInfo> paramInfos, 678 RequiredArgs required) { 679 assert(std::all_of(argTypes.begin(), argTypes.end(), 680 std::mem_fun_ref(&CanQualType::isCanonicalAsParam))); 681 682 // Lookup or create unique function info. 683 llvm::FoldingSetNodeID ID; 684 CGFunctionInfo::Profile(ID, instanceMethod, chainCall, info, paramInfos, 685 required, resultType, argTypes); 686 687 void *insertPos = nullptr; 688 CGFunctionInfo *FI = FunctionInfos.FindNodeOrInsertPos(ID, insertPos); 689 if (FI) 690 return *FI; 691 692 unsigned CC = ClangCallConvToLLVMCallConv(info.getCC()); 693 694 // Construct the function info. We co-allocate the ArgInfos. 695 FI = CGFunctionInfo::create(CC, instanceMethod, chainCall, info, 696 paramInfos, resultType, argTypes, required); 697 FunctionInfos.InsertNode(FI, insertPos); 698 699 bool inserted = FunctionsBeingProcessed.insert(FI).second; 700 (void)inserted; 701 assert(inserted && "Recursively being processed?"); 702 703 // Compute ABI information. 704 if (info.getCC() != CC_Swift) { 705 getABIInfo().computeInfo(*FI); 706 } else { 707 swiftcall::computeABIInfo(CGM, *FI); 708 } 709 710 // Loop over all of the computed argument and return value info. If any of 711 // them are direct or extend without a specified coerce type, specify the 712 // default now. 713 ABIArgInfo &retInfo = FI->getReturnInfo(); 714 if (retInfo.canHaveCoerceToType() && retInfo.getCoerceToType() == nullptr) 715 retInfo.setCoerceToType(ConvertType(FI->getReturnType())); 716 717 for (auto &I : FI->arguments()) 718 if (I.info.canHaveCoerceToType() && I.info.getCoerceToType() == nullptr) 719 I.info.setCoerceToType(ConvertType(I.type)); 720 721 bool erased = FunctionsBeingProcessed.erase(FI); (void)erased; 722 assert(erased && "Not in set?"); 723 724 return *FI; 725 } 726 727 CGFunctionInfo *CGFunctionInfo::create(unsigned llvmCC, 728 bool instanceMethod, 729 bool chainCall, 730 const FunctionType::ExtInfo &info, 731 ArrayRef<ExtParameterInfo> paramInfos, 732 CanQualType resultType, 733 ArrayRef<CanQualType> argTypes, 734 RequiredArgs required) { 735 assert(paramInfos.empty() || paramInfos.size() == argTypes.size()); 736 737 void *buffer = 738 operator new(totalSizeToAlloc<ArgInfo, ExtParameterInfo>( 739 argTypes.size() + 1, paramInfos.size())); 740 741 CGFunctionInfo *FI = new(buffer) CGFunctionInfo(); 742 FI->CallingConvention = llvmCC; 743 FI->EffectiveCallingConvention = llvmCC; 744 FI->ASTCallingConvention = info.getCC(); 745 FI->InstanceMethod = instanceMethod; 746 FI->ChainCall = chainCall; 747 FI->NoReturn = info.getNoReturn(); 748 FI->ReturnsRetained = info.getProducesResult(); 749 FI->Required = required; 750 FI->HasRegParm = info.getHasRegParm(); 751 FI->RegParm = info.getRegParm(); 752 FI->ArgStruct = nullptr; 753 FI->ArgStructAlign = 0; 754 FI->NumArgs = argTypes.size(); 755 FI->HasExtParameterInfos = !paramInfos.empty(); 756 FI->getArgsBuffer()[0].type = resultType; 757 for (unsigned i = 0, e = argTypes.size(); i != e; ++i) 758 FI->getArgsBuffer()[i + 1].type = argTypes[i]; 759 for (unsigned i = 0, e = paramInfos.size(); i != e; ++i) 760 FI->getExtParameterInfosBuffer()[i] = paramInfos[i]; 761 return FI; 762 } 763 764 /***/ 765 766 namespace { 767 // ABIArgInfo::Expand implementation. 768 769 // Specifies the way QualType passed as ABIArgInfo::Expand is expanded. 770 struct TypeExpansion { 771 enum TypeExpansionKind { 772 // Elements of constant arrays are expanded recursively. 773 TEK_ConstantArray, 774 // Record fields are expanded recursively (but if record is a union, only 775 // the field with the largest size is expanded). 776 TEK_Record, 777 // For complex types, real and imaginary parts are expanded recursively. 778 TEK_Complex, 779 // All other types are not expandable. 780 TEK_None 781 }; 782 783 const TypeExpansionKind Kind; 784 785 TypeExpansion(TypeExpansionKind K) : Kind(K) {} 786 virtual ~TypeExpansion() {} 787 }; 788 789 struct ConstantArrayExpansion : TypeExpansion { 790 QualType EltTy; 791 uint64_t NumElts; 792 793 ConstantArrayExpansion(QualType EltTy, uint64_t NumElts) 794 : TypeExpansion(TEK_ConstantArray), EltTy(EltTy), NumElts(NumElts) {} 795 static bool classof(const TypeExpansion *TE) { 796 return TE->Kind == TEK_ConstantArray; 797 } 798 }; 799 800 struct RecordExpansion : TypeExpansion { 801 SmallVector<const CXXBaseSpecifier *, 1> Bases; 802 803 SmallVector<const FieldDecl *, 1> Fields; 804 805 RecordExpansion(SmallVector<const CXXBaseSpecifier *, 1> &&Bases, 806 SmallVector<const FieldDecl *, 1> &&Fields) 807 : TypeExpansion(TEK_Record), Bases(std::move(Bases)), 808 Fields(std::move(Fields)) {} 809 static bool classof(const TypeExpansion *TE) { 810 return TE->Kind == TEK_Record; 811 } 812 }; 813 814 struct ComplexExpansion : TypeExpansion { 815 QualType EltTy; 816 817 ComplexExpansion(QualType EltTy) : TypeExpansion(TEK_Complex), EltTy(EltTy) {} 818 static bool classof(const TypeExpansion *TE) { 819 return TE->Kind == TEK_Complex; 820 } 821 }; 822 823 struct NoExpansion : TypeExpansion { 824 NoExpansion() : TypeExpansion(TEK_None) {} 825 static bool classof(const TypeExpansion *TE) { 826 return TE->Kind == TEK_None; 827 } 828 }; 829 } // namespace 830 831 static std::unique_ptr<TypeExpansion> 832 getTypeExpansion(QualType Ty, const ASTContext &Context) { 833 if (const ConstantArrayType *AT = Context.getAsConstantArrayType(Ty)) { 834 return llvm::make_unique<ConstantArrayExpansion>( 835 AT->getElementType(), AT->getSize().getZExtValue()); 836 } 837 if (const RecordType *RT = Ty->getAs<RecordType>()) { 838 SmallVector<const CXXBaseSpecifier *, 1> Bases; 839 SmallVector<const FieldDecl *, 1> Fields; 840 const RecordDecl *RD = RT->getDecl(); 841 assert(!RD->hasFlexibleArrayMember() && 842 "Cannot expand structure with flexible array."); 843 if (RD->isUnion()) { 844 // Unions can be here only in degenerative cases - all the fields are same 845 // after flattening. Thus we have to use the "largest" field. 846 const FieldDecl *LargestFD = nullptr; 847 CharUnits UnionSize = CharUnits::Zero(); 848 849 for (const auto *FD : RD->fields()) { 850 // Skip zero length bitfields. 851 if (FD->isBitField() && FD->getBitWidthValue(Context) == 0) 852 continue; 853 assert(!FD->isBitField() && 854 "Cannot expand structure with bit-field members."); 855 CharUnits FieldSize = Context.getTypeSizeInChars(FD->getType()); 856 if (UnionSize < FieldSize) { 857 UnionSize = FieldSize; 858 LargestFD = FD; 859 } 860 } 861 if (LargestFD) 862 Fields.push_back(LargestFD); 863 } else { 864 if (const auto *CXXRD = dyn_cast<CXXRecordDecl>(RD)) { 865 assert(!CXXRD->isDynamicClass() && 866 "cannot expand vtable pointers in dynamic classes"); 867 for (const CXXBaseSpecifier &BS : CXXRD->bases()) 868 Bases.push_back(&BS); 869 } 870 871 for (const auto *FD : RD->fields()) { 872 // Skip zero length bitfields. 873 if (FD->isBitField() && FD->getBitWidthValue(Context) == 0) 874 continue; 875 assert(!FD->isBitField() && 876 "Cannot expand structure with bit-field members."); 877 Fields.push_back(FD); 878 } 879 } 880 return llvm::make_unique<RecordExpansion>(std::move(Bases), 881 std::move(Fields)); 882 } 883 if (const ComplexType *CT = Ty->getAs<ComplexType>()) { 884 return llvm::make_unique<ComplexExpansion>(CT->getElementType()); 885 } 886 return llvm::make_unique<NoExpansion>(); 887 } 888 889 static int getExpansionSize(QualType Ty, const ASTContext &Context) { 890 auto Exp = getTypeExpansion(Ty, Context); 891 if (auto CAExp = dyn_cast<ConstantArrayExpansion>(Exp.get())) { 892 return CAExp->NumElts * getExpansionSize(CAExp->EltTy, Context); 893 } 894 if (auto RExp = dyn_cast<RecordExpansion>(Exp.get())) { 895 int Res = 0; 896 for (auto BS : RExp->Bases) 897 Res += getExpansionSize(BS->getType(), Context); 898 for (auto FD : RExp->Fields) 899 Res += getExpansionSize(FD->getType(), Context); 900 return Res; 901 } 902 if (isa<ComplexExpansion>(Exp.get())) 903 return 2; 904 assert(isa<NoExpansion>(Exp.get())); 905 return 1; 906 } 907 908 void 909 CodeGenTypes::getExpandedTypes(QualType Ty, 910 SmallVectorImpl<llvm::Type *>::iterator &TI) { 911 auto Exp = getTypeExpansion(Ty, Context); 912 if (auto CAExp = dyn_cast<ConstantArrayExpansion>(Exp.get())) { 913 for (int i = 0, n = CAExp->NumElts; i < n; i++) { 914 getExpandedTypes(CAExp->EltTy, TI); 915 } 916 } else if (auto RExp = dyn_cast<RecordExpansion>(Exp.get())) { 917 for (auto BS : RExp->Bases) 918 getExpandedTypes(BS->getType(), TI); 919 for (auto FD : RExp->Fields) 920 getExpandedTypes(FD->getType(), TI); 921 } else if (auto CExp = dyn_cast<ComplexExpansion>(Exp.get())) { 922 llvm::Type *EltTy = ConvertType(CExp->EltTy); 923 *TI++ = EltTy; 924 *TI++ = EltTy; 925 } else { 926 assert(isa<NoExpansion>(Exp.get())); 927 *TI++ = ConvertType(Ty); 928 } 929 } 930 931 static void forConstantArrayExpansion(CodeGenFunction &CGF, 932 ConstantArrayExpansion *CAE, 933 Address BaseAddr, 934 llvm::function_ref<void(Address)> Fn) { 935 CharUnits EltSize = CGF.getContext().getTypeSizeInChars(CAE->EltTy); 936 CharUnits EltAlign = 937 BaseAddr.getAlignment().alignmentOfArrayElement(EltSize); 938 939 for (int i = 0, n = CAE->NumElts; i < n; i++) { 940 llvm::Value *EltAddr = 941 CGF.Builder.CreateConstGEP2_32(nullptr, BaseAddr.getPointer(), 0, i); 942 Fn(Address(EltAddr, EltAlign)); 943 } 944 } 945 946 void CodeGenFunction::ExpandTypeFromArgs( 947 QualType Ty, LValue LV, SmallVectorImpl<llvm::Value *>::iterator &AI) { 948 assert(LV.isSimple() && 949 "Unexpected non-simple lvalue during struct expansion."); 950 951 auto Exp = getTypeExpansion(Ty, getContext()); 952 if (auto CAExp = dyn_cast<ConstantArrayExpansion>(Exp.get())) { 953 forConstantArrayExpansion(*this, CAExp, LV.getAddress(), 954 [&](Address EltAddr) { 955 LValue LV = MakeAddrLValue(EltAddr, CAExp->EltTy); 956 ExpandTypeFromArgs(CAExp->EltTy, LV, AI); 957 }); 958 } else if (auto RExp = dyn_cast<RecordExpansion>(Exp.get())) { 959 Address This = LV.getAddress(); 960 for (const CXXBaseSpecifier *BS : RExp->Bases) { 961 // Perform a single step derived-to-base conversion. 962 Address Base = 963 GetAddressOfBaseClass(This, Ty->getAsCXXRecordDecl(), &BS, &BS + 1, 964 /*NullCheckValue=*/false, SourceLocation()); 965 LValue SubLV = MakeAddrLValue(Base, BS->getType()); 966 967 // Recurse onto bases. 968 ExpandTypeFromArgs(BS->getType(), SubLV, AI); 969 } 970 for (auto FD : RExp->Fields) { 971 // FIXME: What are the right qualifiers here? 972 LValue SubLV = EmitLValueForFieldInitialization(LV, FD); 973 ExpandTypeFromArgs(FD->getType(), SubLV, AI); 974 } 975 } else if (isa<ComplexExpansion>(Exp.get())) { 976 auto realValue = *AI++; 977 auto imagValue = *AI++; 978 EmitStoreOfComplex(ComplexPairTy(realValue, imagValue), LV, /*init*/ true); 979 } else { 980 assert(isa<NoExpansion>(Exp.get())); 981 EmitStoreThroughLValue(RValue::get(*AI++), LV); 982 } 983 } 984 985 void CodeGenFunction::ExpandTypeToArgs( 986 QualType Ty, RValue RV, llvm::FunctionType *IRFuncTy, 987 SmallVectorImpl<llvm::Value *> &IRCallArgs, unsigned &IRCallArgPos) { 988 auto Exp = getTypeExpansion(Ty, getContext()); 989 if (auto CAExp = dyn_cast<ConstantArrayExpansion>(Exp.get())) { 990 forConstantArrayExpansion(*this, CAExp, RV.getAggregateAddress(), 991 [&](Address EltAddr) { 992 RValue EltRV = 993 convertTempToRValue(EltAddr, CAExp->EltTy, SourceLocation()); 994 ExpandTypeToArgs(CAExp->EltTy, EltRV, IRFuncTy, IRCallArgs, IRCallArgPos); 995 }); 996 } else if (auto RExp = dyn_cast<RecordExpansion>(Exp.get())) { 997 Address This = RV.getAggregateAddress(); 998 for (const CXXBaseSpecifier *BS : RExp->Bases) { 999 // Perform a single step derived-to-base conversion. 1000 Address Base = 1001 GetAddressOfBaseClass(This, Ty->getAsCXXRecordDecl(), &BS, &BS + 1, 1002 /*NullCheckValue=*/false, SourceLocation()); 1003 RValue BaseRV = RValue::getAggregate(Base); 1004 1005 // Recurse onto bases. 1006 ExpandTypeToArgs(BS->getType(), BaseRV, IRFuncTy, IRCallArgs, 1007 IRCallArgPos); 1008 } 1009 1010 LValue LV = MakeAddrLValue(This, Ty); 1011 for (auto FD : RExp->Fields) { 1012 RValue FldRV = EmitRValueForField(LV, FD, SourceLocation()); 1013 ExpandTypeToArgs(FD->getType(), FldRV, IRFuncTy, IRCallArgs, 1014 IRCallArgPos); 1015 } 1016 } else if (isa<ComplexExpansion>(Exp.get())) { 1017 ComplexPairTy CV = RV.getComplexVal(); 1018 IRCallArgs[IRCallArgPos++] = CV.first; 1019 IRCallArgs[IRCallArgPos++] = CV.second; 1020 } else { 1021 assert(isa<NoExpansion>(Exp.get())); 1022 assert(RV.isScalar() && 1023 "Unexpected non-scalar rvalue during struct expansion."); 1024 1025 // Insert a bitcast as needed. 1026 llvm::Value *V = RV.getScalarVal(); 1027 if (IRCallArgPos < IRFuncTy->getNumParams() && 1028 V->getType() != IRFuncTy->getParamType(IRCallArgPos)) 1029 V = Builder.CreateBitCast(V, IRFuncTy->getParamType(IRCallArgPos)); 1030 1031 IRCallArgs[IRCallArgPos++] = V; 1032 } 1033 } 1034 1035 /// Create a temporary allocation for the purposes of coercion. 1036 static Address CreateTempAllocaForCoercion(CodeGenFunction &CGF, llvm::Type *Ty, 1037 CharUnits MinAlign) { 1038 // Don't use an alignment that's worse than what LLVM would prefer. 1039 auto PrefAlign = CGF.CGM.getDataLayout().getPrefTypeAlignment(Ty); 1040 CharUnits Align = std::max(MinAlign, CharUnits::fromQuantity(PrefAlign)); 1041 1042 return CGF.CreateTempAlloca(Ty, Align); 1043 } 1044 1045 /// EnterStructPointerForCoercedAccess - Given a struct pointer that we are 1046 /// accessing some number of bytes out of it, try to gep into the struct to get 1047 /// at its inner goodness. Dive as deep as possible without entering an element 1048 /// with an in-memory size smaller than DstSize. 1049 static Address 1050 EnterStructPointerForCoercedAccess(Address SrcPtr, 1051 llvm::StructType *SrcSTy, 1052 uint64_t DstSize, CodeGenFunction &CGF) { 1053 // We can't dive into a zero-element struct. 1054 if (SrcSTy->getNumElements() == 0) return SrcPtr; 1055 1056 llvm::Type *FirstElt = SrcSTy->getElementType(0); 1057 1058 // If the first elt is at least as large as what we're looking for, or if the 1059 // first element is the same size as the whole struct, we can enter it. The 1060 // comparison must be made on the store size and not the alloca size. Using 1061 // the alloca size may overstate the size of the load. 1062 uint64_t FirstEltSize = 1063 CGF.CGM.getDataLayout().getTypeStoreSize(FirstElt); 1064 if (FirstEltSize < DstSize && 1065 FirstEltSize < CGF.CGM.getDataLayout().getTypeStoreSize(SrcSTy)) 1066 return SrcPtr; 1067 1068 // GEP into the first element. 1069 SrcPtr = CGF.Builder.CreateStructGEP(SrcPtr, 0, CharUnits(), "coerce.dive"); 1070 1071 // If the first element is a struct, recurse. 1072 llvm::Type *SrcTy = SrcPtr.getElementType(); 1073 if (llvm::StructType *SrcSTy = dyn_cast<llvm::StructType>(SrcTy)) 1074 return EnterStructPointerForCoercedAccess(SrcPtr, SrcSTy, DstSize, CGF); 1075 1076 return SrcPtr; 1077 } 1078 1079 /// CoerceIntOrPtrToIntOrPtr - Convert a value Val to the specific Ty where both 1080 /// are either integers or pointers. This does a truncation of the value if it 1081 /// is too large or a zero extension if it is too small. 1082 /// 1083 /// This behaves as if the value were coerced through memory, so on big-endian 1084 /// targets the high bits are preserved in a truncation, while little-endian 1085 /// targets preserve the low bits. 1086 static llvm::Value *CoerceIntOrPtrToIntOrPtr(llvm::Value *Val, 1087 llvm::Type *Ty, 1088 CodeGenFunction &CGF) { 1089 if (Val->getType() == Ty) 1090 return Val; 1091 1092 if (isa<llvm::PointerType>(Val->getType())) { 1093 // If this is Pointer->Pointer avoid conversion to and from int. 1094 if (isa<llvm::PointerType>(Ty)) 1095 return CGF.Builder.CreateBitCast(Val, Ty, "coerce.val"); 1096 1097 // Convert the pointer to an integer so we can play with its width. 1098 Val = CGF.Builder.CreatePtrToInt(Val, CGF.IntPtrTy, "coerce.val.pi"); 1099 } 1100 1101 llvm::Type *DestIntTy = Ty; 1102 if (isa<llvm::PointerType>(DestIntTy)) 1103 DestIntTy = CGF.IntPtrTy; 1104 1105 if (Val->getType() != DestIntTy) { 1106 const llvm::DataLayout &DL = CGF.CGM.getDataLayout(); 1107 if (DL.isBigEndian()) { 1108 // Preserve the high bits on big-endian targets. 1109 // That is what memory coercion does. 1110 uint64_t SrcSize = DL.getTypeSizeInBits(Val->getType()); 1111 uint64_t DstSize = DL.getTypeSizeInBits(DestIntTy); 1112 1113 if (SrcSize > DstSize) { 1114 Val = CGF.Builder.CreateLShr(Val, SrcSize - DstSize, "coerce.highbits"); 1115 Val = CGF.Builder.CreateTrunc(Val, DestIntTy, "coerce.val.ii"); 1116 } else { 1117 Val = CGF.Builder.CreateZExt(Val, DestIntTy, "coerce.val.ii"); 1118 Val = CGF.Builder.CreateShl(Val, DstSize - SrcSize, "coerce.highbits"); 1119 } 1120 } else { 1121 // Little-endian targets preserve the low bits. No shifts required. 1122 Val = CGF.Builder.CreateIntCast(Val, DestIntTy, false, "coerce.val.ii"); 1123 } 1124 } 1125 1126 if (isa<llvm::PointerType>(Ty)) 1127 Val = CGF.Builder.CreateIntToPtr(Val, Ty, "coerce.val.ip"); 1128 return Val; 1129 } 1130 1131 1132 1133 /// CreateCoercedLoad - Create a load from \arg SrcPtr interpreted as 1134 /// a pointer to an object of type \arg Ty, known to be aligned to 1135 /// \arg SrcAlign bytes. 1136 /// 1137 /// This safely handles the case when the src type is smaller than the 1138 /// destination type; in this situation the values of bits which not 1139 /// present in the src are undefined. 1140 static llvm::Value *CreateCoercedLoad(Address Src, llvm::Type *Ty, 1141 CodeGenFunction &CGF) { 1142 llvm::Type *SrcTy = Src.getElementType(); 1143 1144 // If SrcTy and Ty are the same, just do a load. 1145 if (SrcTy == Ty) 1146 return CGF.Builder.CreateLoad(Src); 1147 1148 uint64_t DstSize = CGF.CGM.getDataLayout().getTypeAllocSize(Ty); 1149 1150 if (llvm::StructType *SrcSTy = dyn_cast<llvm::StructType>(SrcTy)) { 1151 Src = EnterStructPointerForCoercedAccess(Src, SrcSTy, DstSize, CGF); 1152 SrcTy = Src.getType()->getElementType(); 1153 } 1154 1155 uint64_t SrcSize = CGF.CGM.getDataLayout().getTypeAllocSize(SrcTy); 1156 1157 // If the source and destination are integer or pointer types, just do an 1158 // extension or truncation to the desired type. 1159 if ((isa<llvm::IntegerType>(Ty) || isa<llvm::PointerType>(Ty)) && 1160 (isa<llvm::IntegerType>(SrcTy) || isa<llvm::PointerType>(SrcTy))) { 1161 llvm::Value *Load = CGF.Builder.CreateLoad(Src); 1162 return CoerceIntOrPtrToIntOrPtr(Load, Ty, CGF); 1163 } 1164 1165 // If load is legal, just bitcast the src pointer. 1166 if (SrcSize >= DstSize) { 1167 // Generally SrcSize is never greater than DstSize, since this means we are 1168 // losing bits. However, this can happen in cases where the structure has 1169 // additional padding, for example due to a user specified alignment. 1170 // 1171 // FIXME: Assert that we aren't truncating non-padding bits when have access 1172 // to that information. 1173 Src = CGF.Builder.CreateBitCast(Src, llvm::PointerType::getUnqual(Ty)); 1174 return CGF.Builder.CreateLoad(Src); 1175 } 1176 1177 // Otherwise do coercion through memory. This is stupid, but simple. 1178 Address Tmp = CreateTempAllocaForCoercion(CGF, Ty, Src.getAlignment()); 1179 Address Casted = CGF.Builder.CreateBitCast(Tmp, CGF.Int8PtrTy); 1180 Address SrcCasted = CGF.Builder.CreateBitCast(Src, CGF.Int8PtrTy); 1181 CGF.Builder.CreateMemCpy(Casted, SrcCasted, 1182 llvm::ConstantInt::get(CGF.IntPtrTy, SrcSize), 1183 false); 1184 return CGF.Builder.CreateLoad(Tmp); 1185 } 1186 1187 // Function to store a first-class aggregate into memory. We prefer to 1188 // store the elements rather than the aggregate to be more friendly to 1189 // fast-isel. 1190 // FIXME: Do we need to recurse here? 1191 static void BuildAggStore(CodeGenFunction &CGF, llvm::Value *Val, 1192 Address Dest, bool DestIsVolatile) { 1193 // Prefer scalar stores to first-class aggregate stores. 1194 if (llvm::StructType *STy = 1195 dyn_cast<llvm::StructType>(Val->getType())) { 1196 const llvm::StructLayout *Layout = 1197 CGF.CGM.getDataLayout().getStructLayout(STy); 1198 1199 for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) { 1200 auto EltOffset = CharUnits::fromQuantity(Layout->getElementOffset(i)); 1201 Address EltPtr = CGF.Builder.CreateStructGEP(Dest, i, EltOffset); 1202 llvm::Value *Elt = CGF.Builder.CreateExtractValue(Val, i); 1203 CGF.Builder.CreateStore(Elt, EltPtr, DestIsVolatile); 1204 } 1205 } else { 1206 CGF.Builder.CreateStore(Val, Dest, DestIsVolatile); 1207 } 1208 } 1209 1210 /// CreateCoercedStore - Create a store to \arg DstPtr from \arg Src, 1211 /// where the source and destination may have different types. The 1212 /// destination is known to be aligned to \arg DstAlign bytes. 1213 /// 1214 /// This safely handles the case when the src type is larger than the 1215 /// destination type; the upper bits of the src will be lost. 1216 static void CreateCoercedStore(llvm::Value *Src, 1217 Address Dst, 1218 bool DstIsVolatile, 1219 CodeGenFunction &CGF) { 1220 llvm::Type *SrcTy = Src->getType(); 1221 llvm::Type *DstTy = Dst.getType()->getElementType(); 1222 if (SrcTy == DstTy) { 1223 CGF.Builder.CreateStore(Src, Dst, DstIsVolatile); 1224 return; 1225 } 1226 1227 uint64_t SrcSize = CGF.CGM.getDataLayout().getTypeAllocSize(SrcTy); 1228 1229 if (llvm::StructType *DstSTy = dyn_cast<llvm::StructType>(DstTy)) { 1230 Dst = EnterStructPointerForCoercedAccess(Dst, DstSTy, SrcSize, CGF); 1231 DstTy = Dst.getType()->getElementType(); 1232 } 1233 1234 // If the source and destination are integer or pointer types, just do an 1235 // extension or truncation to the desired type. 1236 if ((isa<llvm::IntegerType>(SrcTy) || isa<llvm::PointerType>(SrcTy)) && 1237 (isa<llvm::IntegerType>(DstTy) || isa<llvm::PointerType>(DstTy))) { 1238 Src = CoerceIntOrPtrToIntOrPtr(Src, DstTy, CGF); 1239 CGF.Builder.CreateStore(Src, Dst, DstIsVolatile); 1240 return; 1241 } 1242 1243 uint64_t DstSize = CGF.CGM.getDataLayout().getTypeAllocSize(DstTy); 1244 1245 // If store is legal, just bitcast the src pointer. 1246 if (SrcSize <= DstSize) { 1247 Dst = CGF.Builder.CreateBitCast(Dst, llvm::PointerType::getUnqual(SrcTy)); 1248 BuildAggStore(CGF, Src, Dst, DstIsVolatile); 1249 } else { 1250 // Otherwise do coercion through memory. This is stupid, but 1251 // simple. 1252 1253 // Generally SrcSize is never greater than DstSize, since this means we are 1254 // losing bits. However, this can happen in cases where the structure has 1255 // additional padding, for example due to a user specified alignment. 1256 // 1257 // FIXME: Assert that we aren't truncating non-padding bits when have access 1258 // to that information. 1259 Address Tmp = CreateTempAllocaForCoercion(CGF, SrcTy, Dst.getAlignment()); 1260 CGF.Builder.CreateStore(Src, Tmp); 1261 Address Casted = CGF.Builder.CreateBitCast(Tmp, CGF.Int8PtrTy); 1262 Address DstCasted = CGF.Builder.CreateBitCast(Dst, CGF.Int8PtrTy); 1263 CGF.Builder.CreateMemCpy(DstCasted, Casted, 1264 llvm::ConstantInt::get(CGF.IntPtrTy, DstSize), 1265 false); 1266 } 1267 } 1268 1269 static Address emitAddressAtOffset(CodeGenFunction &CGF, Address addr, 1270 const ABIArgInfo &info) { 1271 if (unsigned offset = info.getDirectOffset()) { 1272 addr = CGF.Builder.CreateElementBitCast(addr, CGF.Int8Ty); 1273 addr = CGF.Builder.CreateConstInBoundsByteGEP(addr, 1274 CharUnits::fromQuantity(offset)); 1275 addr = CGF.Builder.CreateElementBitCast(addr, info.getCoerceToType()); 1276 } 1277 return addr; 1278 } 1279 1280 namespace { 1281 1282 /// Encapsulates information about the way function arguments from 1283 /// CGFunctionInfo should be passed to actual LLVM IR function. 1284 class ClangToLLVMArgMapping { 1285 static const unsigned InvalidIndex = ~0U; 1286 unsigned InallocaArgNo; 1287 unsigned SRetArgNo; 1288 unsigned TotalIRArgs; 1289 1290 /// Arguments of LLVM IR function corresponding to single Clang argument. 1291 struct IRArgs { 1292 unsigned PaddingArgIndex; 1293 // Argument is expanded to IR arguments at positions 1294 // [FirstArgIndex, FirstArgIndex + NumberOfArgs). 1295 unsigned FirstArgIndex; 1296 unsigned NumberOfArgs; 1297 1298 IRArgs() 1299 : PaddingArgIndex(InvalidIndex), FirstArgIndex(InvalidIndex), 1300 NumberOfArgs(0) {} 1301 }; 1302 1303 SmallVector<IRArgs, 8> ArgInfo; 1304 1305 public: 1306 ClangToLLVMArgMapping(const ASTContext &Context, const CGFunctionInfo &FI, 1307 bool OnlyRequiredArgs = false) 1308 : InallocaArgNo(InvalidIndex), SRetArgNo(InvalidIndex), TotalIRArgs(0), 1309 ArgInfo(OnlyRequiredArgs ? FI.getNumRequiredArgs() : FI.arg_size()) { 1310 construct(Context, FI, OnlyRequiredArgs); 1311 } 1312 1313 bool hasInallocaArg() const { return InallocaArgNo != InvalidIndex; } 1314 unsigned getInallocaArgNo() const { 1315 assert(hasInallocaArg()); 1316 return InallocaArgNo; 1317 } 1318 1319 bool hasSRetArg() const { return SRetArgNo != InvalidIndex; } 1320 unsigned getSRetArgNo() const { 1321 assert(hasSRetArg()); 1322 return SRetArgNo; 1323 } 1324 1325 unsigned totalIRArgs() const { return TotalIRArgs; } 1326 1327 bool hasPaddingArg(unsigned ArgNo) const { 1328 assert(ArgNo < ArgInfo.size()); 1329 return ArgInfo[ArgNo].PaddingArgIndex != InvalidIndex; 1330 } 1331 unsigned getPaddingArgNo(unsigned ArgNo) const { 1332 assert(hasPaddingArg(ArgNo)); 1333 return ArgInfo[ArgNo].PaddingArgIndex; 1334 } 1335 1336 /// Returns index of first IR argument corresponding to ArgNo, and their 1337 /// quantity. 1338 std::pair<unsigned, unsigned> getIRArgs(unsigned ArgNo) const { 1339 assert(ArgNo < ArgInfo.size()); 1340 return std::make_pair(ArgInfo[ArgNo].FirstArgIndex, 1341 ArgInfo[ArgNo].NumberOfArgs); 1342 } 1343 1344 private: 1345 void construct(const ASTContext &Context, const CGFunctionInfo &FI, 1346 bool OnlyRequiredArgs); 1347 }; 1348 1349 void ClangToLLVMArgMapping::construct(const ASTContext &Context, 1350 const CGFunctionInfo &FI, 1351 bool OnlyRequiredArgs) { 1352 unsigned IRArgNo = 0; 1353 bool SwapThisWithSRet = false; 1354 const ABIArgInfo &RetAI = FI.getReturnInfo(); 1355 1356 if (RetAI.getKind() == ABIArgInfo::Indirect) { 1357 SwapThisWithSRet = RetAI.isSRetAfterThis(); 1358 SRetArgNo = SwapThisWithSRet ? 1 : IRArgNo++; 1359 } 1360 1361 unsigned ArgNo = 0; 1362 unsigned NumArgs = OnlyRequiredArgs ? FI.getNumRequiredArgs() : FI.arg_size(); 1363 for (CGFunctionInfo::const_arg_iterator I = FI.arg_begin(); ArgNo < NumArgs; 1364 ++I, ++ArgNo) { 1365 assert(I != FI.arg_end()); 1366 QualType ArgType = I->type; 1367 const ABIArgInfo &AI = I->info; 1368 // Collect data about IR arguments corresponding to Clang argument ArgNo. 1369 auto &IRArgs = ArgInfo[ArgNo]; 1370 1371 if (AI.getPaddingType()) 1372 IRArgs.PaddingArgIndex = IRArgNo++; 1373 1374 switch (AI.getKind()) { 1375 case ABIArgInfo::Extend: 1376 case ABIArgInfo::Direct: { 1377 // FIXME: handle sseregparm someday... 1378 llvm::StructType *STy = dyn_cast<llvm::StructType>(AI.getCoerceToType()); 1379 if (AI.isDirect() && AI.getCanBeFlattened() && STy) { 1380 IRArgs.NumberOfArgs = STy->getNumElements(); 1381 } else { 1382 IRArgs.NumberOfArgs = 1; 1383 } 1384 break; 1385 } 1386 case ABIArgInfo::Indirect: 1387 IRArgs.NumberOfArgs = 1; 1388 break; 1389 case ABIArgInfo::Ignore: 1390 case ABIArgInfo::InAlloca: 1391 // ignore and inalloca doesn't have matching LLVM parameters. 1392 IRArgs.NumberOfArgs = 0; 1393 break; 1394 case ABIArgInfo::CoerceAndExpand: 1395 IRArgs.NumberOfArgs = AI.getCoerceAndExpandTypeSequence().size(); 1396 break; 1397 case ABIArgInfo::Expand: 1398 IRArgs.NumberOfArgs = getExpansionSize(ArgType, Context); 1399 break; 1400 } 1401 1402 if (IRArgs.NumberOfArgs > 0) { 1403 IRArgs.FirstArgIndex = IRArgNo; 1404 IRArgNo += IRArgs.NumberOfArgs; 1405 } 1406 1407 // Skip over the sret parameter when it comes second. We already handled it 1408 // above. 1409 if (IRArgNo == 1 && SwapThisWithSRet) 1410 IRArgNo++; 1411 } 1412 assert(ArgNo == ArgInfo.size()); 1413 1414 if (FI.usesInAlloca()) 1415 InallocaArgNo = IRArgNo++; 1416 1417 TotalIRArgs = IRArgNo; 1418 } 1419 } // namespace 1420 1421 /***/ 1422 1423 bool CodeGenModule::ReturnTypeUsesSRet(const CGFunctionInfo &FI) { 1424 return FI.getReturnInfo().isIndirect(); 1425 } 1426 1427 bool CodeGenModule::ReturnSlotInterferesWithArgs(const CGFunctionInfo &FI) { 1428 return ReturnTypeUsesSRet(FI) && 1429 getTargetCodeGenInfo().doesReturnSlotInterfereWithArgs(); 1430 } 1431 1432 bool CodeGenModule::ReturnTypeUsesFPRet(QualType ResultType) { 1433 if (const BuiltinType *BT = ResultType->getAs<BuiltinType>()) { 1434 switch (BT->getKind()) { 1435 default: 1436 return false; 1437 case BuiltinType::Float: 1438 return getTarget().useObjCFPRetForRealType(TargetInfo::Float); 1439 case BuiltinType::Double: 1440 return getTarget().useObjCFPRetForRealType(TargetInfo::Double); 1441 case BuiltinType::LongDouble: 1442 return getTarget().useObjCFPRetForRealType(TargetInfo::LongDouble); 1443 } 1444 } 1445 1446 return false; 1447 } 1448 1449 bool CodeGenModule::ReturnTypeUsesFP2Ret(QualType ResultType) { 1450 if (const ComplexType *CT = ResultType->getAs<ComplexType>()) { 1451 if (const BuiltinType *BT = CT->getElementType()->getAs<BuiltinType>()) { 1452 if (BT->getKind() == BuiltinType::LongDouble) 1453 return getTarget().useObjCFP2RetForComplexLongDouble(); 1454 } 1455 } 1456 1457 return false; 1458 } 1459 1460 llvm::FunctionType *CodeGenTypes::GetFunctionType(GlobalDecl GD) { 1461 const CGFunctionInfo &FI = arrangeGlobalDeclaration(GD); 1462 return GetFunctionType(FI); 1463 } 1464 1465 llvm::FunctionType * 1466 CodeGenTypes::GetFunctionType(const CGFunctionInfo &FI) { 1467 1468 bool Inserted = FunctionsBeingProcessed.insert(&FI).second; 1469 (void)Inserted; 1470 assert(Inserted && "Recursively being processed?"); 1471 1472 llvm::Type *resultType = nullptr; 1473 const ABIArgInfo &retAI = FI.getReturnInfo(); 1474 switch (retAI.getKind()) { 1475 case ABIArgInfo::Expand: 1476 llvm_unreachable("Invalid ABI kind for return argument"); 1477 1478 case ABIArgInfo::Extend: 1479 case ABIArgInfo::Direct: 1480 resultType = retAI.getCoerceToType(); 1481 break; 1482 1483 case ABIArgInfo::InAlloca: 1484 if (retAI.getInAllocaSRet()) { 1485 // sret things on win32 aren't void, they return the sret pointer. 1486 QualType ret = FI.getReturnType(); 1487 llvm::Type *ty = ConvertType(ret); 1488 unsigned addressSpace = Context.getTargetAddressSpace(ret); 1489 resultType = llvm::PointerType::get(ty, addressSpace); 1490 } else { 1491 resultType = llvm::Type::getVoidTy(getLLVMContext()); 1492 } 1493 break; 1494 1495 case ABIArgInfo::Indirect: 1496 case ABIArgInfo::Ignore: 1497 resultType = llvm::Type::getVoidTy(getLLVMContext()); 1498 break; 1499 1500 case ABIArgInfo::CoerceAndExpand: 1501 resultType = retAI.getUnpaddedCoerceAndExpandType(); 1502 break; 1503 } 1504 1505 ClangToLLVMArgMapping IRFunctionArgs(getContext(), FI, true); 1506 SmallVector<llvm::Type*, 8> ArgTypes(IRFunctionArgs.totalIRArgs()); 1507 1508 // Add type for sret argument. 1509 if (IRFunctionArgs.hasSRetArg()) { 1510 QualType Ret = FI.getReturnType(); 1511 llvm::Type *Ty = ConvertType(Ret); 1512 unsigned AddressSpace = Context.getTargetAddressSpace(Ret); 1513 ArgTypes[IRFunctionArgs.getSRetArgNo()] = 1514 llvm::PointerType::get(Ty, AddressSpace); 1515 } 1516 1517 // Add type for inalloca argument. 1518 if (IRFunctionArgs.hasInallocaArg()) { 1519 auto ArgStruct = FI.getArgStruct(); 1520 assert(ArgStruct); 1521 ArgTypes[IRFunctionArgs.getInallocaArgNo()] = ArgStruct->getPointerTo(); 1522 } 1523 1524 // Add in all of the required arguments. 1525 unsigned ArgNo = 0; 1526 CGFunctionInfo::const_arg_iterator it = FI.arg_begin(), 1527 ie = it + FI.getNumRequiredArgs(); 1528 for (; it != ie; ++it, ++ArgNo) { 1529 const ABIArgInfo &ArgInfo = it->info; 1530 1531 // Insert a padding type to ensure proper alignment. 1532 if (IRFunctionArgs.hasPaddingArg(ArgNo)) 1533 ArgTypes[IRFunctionArgs.getPaddingArgNo(ArgNo)] = 1534 ArgInfo.getPaddingType(); 1535 1536 unsigned FirstIRArg, NumIRArgs; 1537 std::tie(FirstIRArg, NumIRArgs) = IRFunctionArgs.getIRArgs(ArgNo); 1538 1539 switch (ArgInfo.getKind()) { 1540 case ABIArgInfo::Ignore: 1541 case ABIArgInfo::InAlloca: 1542 assert(NumIRArgs == 0); 1543 break; 1544 1545 case ABIArgInfo::Indirect: { 1546 assert(NumIRArgs == 1); 1547 // indirect arguments are always on the stack, which is addr space #0. 1548 llvm::Type *LTy = ConvertTypeForMem(it->type); 1549 ArgTypes[FirstIRArg] = LTy->getPointerTo(); 1550 break; 1551 } 1552 1553 case ABIArgInfo::Extend: 1554 case ABIArgInfo::Direct: { 1555 // Fast-isel and the optimizer generally like scalar values better than 1556 // FCAs, so we flatten them if this is safe to do for this argument. 1557 llvm::Type *argType = ArgInfo.getCoerceToType(); 1558 llvm::StructType *st = dyn_cast<llvm::StructType>(argType); 1559 if (st && ArgInfo.isDirect() && ArgInfo.getCanBeFlattened()) { 1560 assert(NumIRArgs == st->getNumElements()); 1561 for (unsigned i = 0, e = st->getNumElements(); i != e; ++i) 1562 ArgTypes[FirstIRArg + i] = st->getElementType(i); 1563 } else { 1564 assert(NumIRArgs == 1); 1565 ArgTypes[FirstIRArg] = argType; 1566 } 1567 break; 1568 } 1569 1570 case ABIArgInfo::CoerceAndExpand: { 1571 auto ArgTypesIter = ArgTypes.begin() + FirstIRArg; 1572 for (auto EltTy : ArgInfo.getCoerceAndExpandTypeSequence()) { 1573 *ArgTypesIter++ = EltTy; 1574 } 1575 assert(ArgTypesIter == ArgTypes.begin() + FirstIRArg + NumIRArgs); 1576 break; 1577 } 1578 1579 case ABIArgInfo::Expand: 1580 auto ArgTypesIter = ArgTypes.begin() + FirstIRArg; 1581 getExpandedTypes(it->type, ArgTypesIter); 1582 assert(ArgTypesIter == ArgTypes.begin() + FirstIRArg + NumIRArgs); 1583 break; 1584 } 1585 } 1586 1587 bool Erased = FunctionsBeingProcessed.erase(&FI); (void)Erased; 1588 assert(Erased && "Not in set?"); 1589 1590 return llvm::FunctionType::get(resultType, ArgTypes, FI.isVariadic()); 1591 } 1592 1593 llvm::Type *CodeGenTypes::GetFunctionTypeForVTable(GlobalDecl GD) { 1594 const CXXMethodDecl *MD = cast<CXXMethodDecl>(GD.getDecl()); 1595 const FunctionProtoType *FPT = MD->getType()->getAs<FunctionProtoType>(); 1596 1597 if (!isFuncTypeConvertible(FPT)) 1598 return llvm::StructType::get(getLLVMContext()); 1599 1600 const CGFunctionInfo *Info; 1601 if (isa<CXXDestructorDecl>(MD)) 1602 Info = 1603 &arrangeCXXStructorDeclaration(MD, getFromDtorType(GD.getDtorType())); 1604 else 1605 Info = &arrangeCXXMethodDeclaration(MD); 1606 return GetFunctionType(*Info); 1607 } 1608 1609 static void AddAttributesFromFunctionProtoType(ASTContext &Ctx, 1610 llvm::AttrBuilder &FuncAttrs, 1611 const FunctionProtoType *FPT) { 1612 if (!FPT) 1613 return; 1614 1615 if (!isUnresolvedExceptionSpec(FPT->getExceptionSpecType()) && 1616 FPT->isNothrow(Ctx)) 1617 FuncAttrs.addAttribute(llvm::Attribute::NoUnwind); 1618 } 1619 1620 void CodeGenModule::ConstructAttributeList( 1621 StringRef Name, const CGFunctionInfo &FI, CGCalleeInfo CalleeInfo, 1622 AttributeListType &PAL, unsigned &CallingConv, bool AttrOnCallSite) { 1623 llvm::AttrBuilder FuncAttrs; 1624 llvm::AttrBuilder RetAttrs; 1625 bool HasOptnone = false; 1626 1627 CallingConv = FI.getEffectiveCallingConvention(); 1628 1629 if (FI.isNoReturn()) 1630 FuncAttrs.addAttribute(llvm::Attribute::NoReturn); 1631 1632 // If we have information about the function prototype, we can learn 1633 // attributes form there. 1634 AddAttributesFromFunctionProtoType(getContext(), FuncAttrs, 1635 CalleeInfo.getCalleeFunctionProtoType()); 1636 1637 const Decl *TargetDecl = CalleeInfo.getCalleeDecl(); 1638 1639 bool HasAnyX86InterruptAttr = false; 1640 // FIXME: handle sseregparm someday... 1641 if (TargetDecl) { 1642 if (TargetDecl->hasAttr<ReturnsTwiceAttr>()) 1643 FuncAttrs.addAttribute(llvm::Attribute::ReturnsTwice); 1644 if (TargetDecl->hasAttr<NoThrowAttr>()) 1645 FuncAttrs.addAttribute(llvm::Attribute::NoUnwind); 1646 if (TargetDecl->hasAttr<NoReturnAttr>()) 1647 FuncAttrs.addAttribute(llvm::Attribute::NoReturn); 1648 if (TargetDecl->hasAttr<NoDuplicateAttr>()) 1649 FuncAttrs.addAttribute(llvm::Attribute::NoDuplicate); 1650 1651 if (const FunctionDecl *Fn = dyn_cast<FunctionDecl>(TargetDecl)) { 1652 AddAttributesFromFunctionProtoType( 1653 getContext(), FuncAttrs, Fn->getType()->getAs<FunctionProtoType>()); 1654 // Don't use [[noreturn]] or _Noreturn for a call to a virtual function. 1655 // These attributes are not inherited by overloads. 1656 const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(Fn); 1657 if (Fn->isNoReturn() && !(AttrOnCallSite && MD && MD->isVirtual())) 1658 FuncAttrs.addAttribute(llvm::Attribute::NoReturn); 1659 } 1660 1661 // 'const', 'pure' and 'noalias' attributed functions are also nounwind. 1662 if (TargetDecl->hasAttr<ConstAttr>()) { 1663 FuncAttrs.addAttribute(llvm::Attribute::ReadNone); 1664 FuncAttrs.addAttribute(llvm::Attribute::NoUnwind); 1665 } else if (TargetDecl->hasAttr<PureAttr>()) { 1666 FuncAttrs.addAttribute(llvm::Attribute::ReadOnly); 1667 FuncAttrs.addAttribute(llvm::Attribute::NoUnwind); 1668 } else if (TargetDecl->hasAttr<NoAliasAttr>()) { 1669 FuncAttrs.addAttribute(llvm::Attribute::ArgMemOnly); 1670 FuncAttrs.addAttribute(llvm::Attribute::NoUnwind); 1671 } 1672 if (TargetDecl->hasAttr<RestrictAttr>()) 1673 RetAttrs.addAttribute(llvm::Attribute::NoAlias); 1674 if (TargetDecl->hasAttr<ReturnsNonNullAttr>()) 1675 RetAttrs.addAttribute(llvm::Attribute::NonNull); 1676 1677 HasAnyX86InterruptAttr = TargetDecl->hasAttr<AnyX86InterruptAttr>(); 1678 HasOptnone = TargetDecl->hasAttr<OptimizeNoneAttr>(); 1679 } 1680 1681 // OptimizeNoneAttr takes precedence over -Os or -Oz. No warning needed. 1682 if (!HasOptnone) { 1683 if (CodeGenOpts.OptimizeSize) 1684 FuncAttrs.addAttribute(llvm::Attribute::OptimizeForSize); 1685 if (CodeGenOpts.OptimizeSize == 2) 1686 FuncAttrs.addAttribute(llvm::Attribute::MinSize); 1687 } 1688 1689 if (CodeGenOpts.DisableRedZone) 1690 FuncAttrs.addAttribute(llvm::Attribute::NoRedZone); 1691 if (CodeGenOpts.NoImplicitFloat) 1692 FuncAttrs.addAttribute(llvm::Attribute::NoImplicitFloat); 1693 if (CodeGenOpts.EnableSegmentedStacks && 1694 !(TargetDecl && TargetDecl->hasAttr<NoSplitStackAttr>())) 1695 FuncAttrs.addAttribute("split-stack"); 1696 1697 if (AttrOnCallSite) { 1698 // Attributes that should go on the call site only. 1699 if (!CodeGenOpts.SimplifyLibCalls || 1700 CodeGenOpts.isNoBuiltinFunc(Name.data())) 1701 FuncAttrs.addAttribute(llvm::Attribute::NoBuiltin); 1702 if (!CodeGenOpts.TrapFuncName.empty()) 1703 FuncAttrs.addAttribute("trap-func-name", CodeGenOpts.TrapFuncName); 1704 } else { 1705 // Attributes that should go on the function, but not the call site. 1706 if (!CodeGenOpts.DisableFPElim) { 1707 FuncAttrs.addAttribute("no-frame-pointer-elim", "false"); 1708 } else if (CodeGenOpts.OmitLeafFramePointer) { 1709 FuncAttrs.addAttribute("no-frame-pointer-elim", "false"); 1710 FuncAttrs.addAttribute("no-frame-pointer-elim-non-leaf"); 1711 } else { 1712 FuncAttrs.addAttribute("no-frame-pointer-elim", "true"); 1713 FuncAttrs.addAttribute("no-frame-pointer-elim-non-leaf"); 1714 } 1715 1716 bool DisableTailCalls = 1717 CodeGenOpts.DisableTailCalls || HasAnyX86InterruptAttr || 1718 (TargetDecl && TargetDecl->hasAttr<DisableTailCallsAttr>()); 1719 FuncAttrs.addAttribute( 1720 "disable-tail-calls", 1721 llvm::toStringRef(DisableTailCalls)); 1722 1723 FuncAttrs.addAttribute("less-precise-fpmad", 1724 llvm::toStringRef(CodeGenOpts.LessPreciseFPMAD)); 1725 FuncAttrs.addAttribute("no-infs-fp-math", 1726 llvm::toStringRef(CodeGenOpts.NoInfsFPMath)); 1727 FuncAttrs.addAttribute("no-nans-fp-math", 1728 llvm::toStringRef(CodeGenOpts.NoNaNsFPMath)); 1729 FuncAttrs.addAttribute("unsafe-fp-math", 1730 llvm::toStringRef(CodeGenOpts.UnsafeFPMath)); 1731 FuncAttrs.addAttribute("use-soft-float", 1732 llvm::toStringRef(CodeGenOpts.SoftFloat)); 1733 FuncAttrs.addAttribute("stack-protector-buffer-size", 1734 llvm::utostr(CodeGenOpts.SSPBufferSize)); 1735 FuncAttrs.addAttribute("no-signed-zeros-fp-math", 1736 llvm::toStringRef(CodeGenOpts.NoSignedZeros)); 1737 FuncAttrs.addAttribute( 1738 "correctly-rounded-divide-sqrt-fp-math", 1739 llvm::toStringRef(CodeGenOpts.CorrectlyRoundedDivSqrt)); 1740 1741 if (CodeGenOpts.StackRealignment) 1742 FuncAttrs.addAttribute("stackrealign"); 1743 if (CodeGenOpts.Backchain) 1744 FuncAttrs.addAttribute("backchain"); 1745 1746 // Add target-cpu and target-features attributes to functions. If 1747 // we have a decl for the function and it has a target attribute then 1748 // parse that and add it to the feature set. 1749 StringRef TargetCPU = getTarget().getTargetOpts().CPU; 1750 const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(TargetDecl); 1751 if (FD && FD->hasAttr<TargetAttr>()) { 1752 llvm::StringMap<bool> FeatureMap; 1753 getFunctionFeatureMap(FeatureMap, FD); 1754 1755 // Produce the canonical string for this set of features. 1756 std::vector<std::string> Features; 1757 for (llvm::StringMap<bool>::const_iterator it = FeatureMap.begin(), 1758 ie = FeatureMap.end(); 1759 it != ie; ++it) 1760 Features.push_back((it->second ? "+" : "-") + it->first().str()); 1761 1762 // Now add the target-cpu and target-features to the function. 1763 // While we populated the feature map above, we still need to 1764 // get and parse the target attribute so we can get the cpu for 1765 // the function. 1766 const auto *TD = FD->getAttr<TargetAttr>(); 1767 TargetAttr::ParsedTargetAttr ParsedAttr = TD->parse(); 1768 if (ParsedAttr.second != "") 1769 TargetCPU = ParsedAttr.second; 1770 if (TargetCPU != "") 1771 FuncAttrs.addAttribute("target-cpu", TargetCPU); 1772 if (!Features.empty()) { 1773 std::sort(Features.begin(), Features.end()); 1774 FuncAttrs.addAttribute( 1775 "target-features", 1776 llvm::join(Features.begin(), Features.end(), ",")); 1777 } 1778 } else { 1779 // Otherwise just add the existing target cpu and target features to the 1780 // function. 1781 std::vector<std::string> &Features = getTarget().getTargetOpts().Features; 1782 if (TargetCPU != "") 1783 FuncAttrs.addAttribute("target-cpu", TargetCPU); 1784 if (!Features.empty()) { 1785 std::sort(Features.begin(), Features.end()); 1786 FuncAttrs.addAttribute( 1787 "target-features", 1788 llvm::join(Features.begin(), Features.end(), ",")); 1789 } 1790 } 1791 } 1792 1793 if (getLangOpts().CUDA && getLangOpts().CUDAIsDevice) { 1794 // Conservatively, mark all functions and calls in CUDA as convergent 1795 // (meaning, they may call an intrinsically convergent op, such as 1796 // __syncthreads(), and so can't have certain optimizations applied around 1797 // them). LLVM will remove this attribute where it safely can. 1798 FuncAttrs.addAttribute(llvm::Attribute::Convergent); 1799 1800 // Respect -fcuda-flush-denormals-to-zero. 1801 if (getLangOpts().CUDADeviceFlushDenormalsToZero) 1802 FuncAttrs.addAttribute("nvptx-f32ftz", "true"); 1803 } 1804 1805 ClangToLLVMArgMapping IRFunctionArgs(getContext(), FI); 1806 1807 QualType RetTy = FI.getReturnType(); 1808 const ABIArgInfo &RetAI = FI.getReturnInfo(); 1809 switch (RetAI.getKind()) { 1810 case ABIArgInfo::Extend: 1811 if (RetTy->hasSignedIntegerRepresentation()) 1812 RetAttrs.addAttribute(llvm::Attribute::SExt); 1813 else if (RetTy->hasUnsignedIntegerRepresentation()) 1814 RetAttrs.addAttribute(llvm::Attribute::ZExt); 1815 // FALL THROUGH 1816 case ABIArgInfo::Direct: 1817 if (RetAI.getInReg()) 1818 RetAttrs.addAttribute(llvm::Attribute::InReg); 1819 break; 1820 case ABIArgInfo::Ignore: 1821 break; 1822 1823 case ABIArgInfo::InAlloca: 1824 case ABIArgInfo::Indirect: { 1825 // inalloca and sret disable readnone and readonly 1826 FuncAttrs.removeAttribute(llvm::Attribute::ReadOnly) 1827 .removeAttribute(llvm::Attribute::ReadNone); 1828 break; 1829 } 1830 1831 case ABIArgInfo::CoerceAndExpand: 1832 break; 1833 1834 case ABIArgInfo::Expand: 1835 llvm_unreachable("Invalid ABI kind for return argument"); 1836 } 1837 1838 if (const auto *RefTy = RetTy->getAs<ReferenceType>()) { 1839 QualType PTy = RefTy->getPointeeType(); 1840 if (!PTy->isIncompleteType() && PTy->isConstantSizeType()) 1841 RetAttrs.addDereferenceableAttr(getContext().getTypeSizeInChars(PTy) 1842 .getQuantity()); 1843 else if (getContext().getTargetAddressSpace(PTy) == 0) 1844 RetAttrs.addAttribute(llvm::Attribute::NonNull); 1845 } 1846 1847 // Attach return attributes. 1848 if (RetAttrs.hasAttributes()) { 1849 PAL.push_back(llvm::AttributeSet::get( 1850 getLLVMContext(), llvm::AttributeSet::ReturnIndex, RetAttrs)); 1851 } 1852 1853 bool hasUsedSRet = false; 1854 1855 // Attach attributes to sret. 1856 if (IRFunctionArgs.hasSRetArg()) { 1857 llvm::AttrBuilder SRETAttrs; 1858 SRETAttrs.addAttribute(llvm::Attribute::StructRet); 1859 hasUsedSRet = true; 1860 if (RetAI.getInReg()) 1861 SRETAttrs.addAttribute(llvm::Attribute::InReg); 1862 PAL.push_back(llvm::AttributeSet::get( 1863 getLLVMContext(), IRFunctionArgs.getSRetArgNo() + 1, SRETAttrs)); 1864 } 1865 1866 // Attach attributes to inalloca argument. 1867 if (IRFunctionArgs.hasInallocaArg()) { 1868 llvm::AttrBuilder Attrs; 1869 Attrs.addAttribute(llvm::Attribute::InAlloca); 1870 PAL.push_back(llvm::AttributeSet::get( 1871 getLLVMContext(), IRFunctionArgs.getInallocaArgNo() + 1, Attrs)); 1872 } 1873 1874 unsigned ArgNo = 0; 1875 for (CGFunctionInfo::const_arg_iterator I = FI.arg_begin(), 1876 E = FI.arg_end(); 1877 I != E; ++I, ++ArgNo) { 1878 QualType ParamType = I->type; 1879 const ABIArgInfo &AI = I->info; 1880 llvm::AttrBuilder Attrs; 1881 1882 // Add attribute for padding argument, if necessary. 1883 if (IRFunctionArgs.hasPaddingArg(ArgNo)) { 1884 if (AI.getPaddingInReg()) 1885 PAL.push_back(llvm::AttributeSet::get( 1886 getLLVMContext(), IRFunctionArgs.getPaddingArgNo(ArgNo) + 1, 1887 llvm::Attribute::InReg)); 1888 } 1889 1890 // 'restrict' -> 'noalias' is done in EmitFunctionProlog when we 1891 // have the corresponding parameter variable. It doesn't make 1892 // sense to do it here because parameters are so messed up. 1893 switch (AI.getKind()) { 1894 case ABIArgInfo::Extend: 1895 if (ParamType->isSignedIntegerOrEnumerationType()) 1896 Attrs.addAttribute(llvm::Attribute::SExt); 1897 else if (ParamType->isUnsignedIntegerOrEnumerationType()) { 1898 if (getTypes().getABIInfo().shouldSignExtUnsignedType(ParamType)) 1899 Attrs.addAttribute(llvm::Attribute::SExt); 1900 else 1901 Attrs.addAttribute(llvm::Attribute::ZExt); 1902 } 1903 // FALL THROUGH 1904 case ABIArgInfo::Direct: 1905 if (ArgNo == 0 && FI.isChainCall()) 1906 Attrs.addAttribute(llvm::Attribute::Nest); 1907 else if (AI.getInReg()) 1908 Attrs.addAttribute(llvm::Attribute::InReg); 1909 break; 1910 1911 case ABIArgInfo::Indirect: { 1912 if (AI.getInReg()) 1913 Attrs.addAttribute(llvm::Attribute::InReg); 1914 1915 if (AI.getIndirectByVal()) 1916 Attrs.addAttribute(llvm::Attribute::ByVal); 1917 1918 CharUnits Align = AI.getIndirectAlign(); 1919 1920 // In a byval argument, it is important that the required 1921 // alignment of the type is honored, as LLVM might be creating a 1922 // *new* stack object, and needs to know what alignment to give 1923 // it. (Sometimes it can deduce a sensible alignment on its own, 1924 // but not if clang decides it must emit a packed struct, or the 1925 // user specifies increased alignment requirements.) 1926 // 1927 // This is different from indirect *not* byval, where the object 1928 // exists already, and the align attribute is purely 1929 // informative. 1930 assert(!Align.isZero()); 1931 1932 // For now, only add this when we have a byval argument. 1933 // TODO: be less lazy about updating test cases. 1934 if (AI.getIndirectByVal()) 1935 Attrs.addAlignmentAttr(Align.getQuantity()); 1936 1937 // byval disables readnone and readonly. 1938 FuncAttrs.removeAttribute(llvm::Attribute::ReadOnly) 1939 .removeAttribute(llvm::Attribute::ReadNone); 1940 break; 1941 } 1942 case ABIArgInfo::Ignore: 1943 case ABIArgInfo::Expand: 1944 case ABIArgInfo::CoerceAndExpand: 1945 break; 1946 1947 case ABIArgInfo::InAlloca: 1948 // inalloca disables readnone and readonly. 1949 FuncAttrs.removeAttribute(llvm::Attribute::ReadOnly) 1950 .removeAttribute(llvm::Attribute::ReadNone); 1951 continue; 1952 } 1953 1954 if (const auto *RefTy = ParamType->getAs<ReferenceType>()) { 1955 QualType PTy = RefTy->getPointeeType(); 1956 if (!PTy->isIncompleteType() && PTy->isConstantSizeType()) 1957 Attrs.addDereferenceableAttr(getContext().getTypeSizeInChars(PTy) 1958 .getQuantity()); 1959 else if (getContext().getTargetAddressSpace(PTy) == 0) 1960 Attrs.addAttribute(llvm::Attribute::NonNull); 1961 } 1962 1963 switch (FI.getExtParameterInfo(ArgNo).getABI()) { 1964 case ParameterABI::Ordinary: 1965 break; 1966 1967 case ParameterABI::SwiftIndirectResult: { 1968 // Add 'sret' if we haven't already used it for something, but 1969 // only if the result is void. 1970 if (!hasUsedSRet && RetTy->isVoidType()) { 1971 Attrs.addAttribute(llvm::Attribute::StructRet); 1972 hasUsedSRet = true; 1973 } 1974 1975 // Add 'noalias' in either case. 1976 Attrs.addAttribute(llvm::Attribute::NoAlias); 1977 1978 // Add 'dereferenceable' and 'alignment'. 1979 auto PTy = ParamType->getPointeeType(); 1980 if (!PTy->isIncompleteType() && PTy->isConstantSizeType()) { 1981 auto info = getContext().getTypeInfoInChars(PTy); 1982 Attrs.addDereferenceableAttr(info.first.getQuantity()); 1983 Attrs.addAttribute(llvm::Attribute::getWithAlignment(getLLVMContext(), 1984 info.second.getQuantity())); 1985 } 1986 break; 1987 } 1988 1989 case ParameterABI::SwiftErrorResult: 1990 Attrs.addAttribute(llvm::Attribute::SwiftError); 1991 break; 1992 1993 case ParameterABI::SwiftContext: 1994 Attrs.addAttribute(llvm::Attribute::SwiftSelf); 1995 break; 1996 } 1997 1998 if (Attrs.hasAttributes()) { 1999 unsigned FirstIRArg, NumIRArgs; 2000 std::tie(FirstIRArg, NumIRArgs) = IRFunctionArgs.getIRArgs(ArgNo); 2001 for (unsigned i = 0; i < NumIRArgs; i++) 2002 PAL.push_back(llvm::AttributeSet::get(getLLVMContext(), 2003 FirstIRArg + i + 1, Attrs)); 2004 } 2005 } 2006 assert(ArgNo == FI.arg_size()); 2007 2008 if (FuncAttrs.hasAttributes()) 2009 PAL.push_back(llvm:: 2010 AttributeSet::get(getLLVMContext(), 2011 llvm::AttributeSet::FunctionIndex, 2012 FuncAttrs)); 2013 } 2014 2015 /// An argument came in as a promoted argument; demote it back to its 2016 /// declared type. 2017 static llvm::Value *emitArgumentDemotion(CodeGenFunction &CGF, 2018 const VarDecl *var, 2019 llvm::Value *value) { 2020 llvm::Type *varType = CGF.ConvertType(var->getType()); 2021 2022 // This can happen with promotions that actually don't change the 2023 // underlying type, like the enum promotions. 2024 if (value->getType() == varType) return value; 2025 2026 assert((varType->isIntegerTy() || varType->isFloatingPointTy()) 2027 && "unexpected promotion type"); 2028 2029 if (isa<llvm::IntegerType>(varType)) 2030 return CGF.Builder.CreateTrunc(value, varType, "arg.unpromote"); 2031 2032 return CGF.Builder.CreateFPCast(value, varType, "arg.unpromote"); 2033 } 2034 2035 /// Returns the attribute (either parameter attribute, or function 2036 /// attribute), which declares argument ArgNo to be non-null. 2037 static const NonNullAttr *getNonNullAttr(const Decl *FD, const ParmVarDecl *PVD, 2038 QualType ArgType, unsigned ArgNo) { 2039 // FIXME: __attribute__((nonnull)) can also be applied to: 2040 // - references to pointers, where the pointee is known to be 2041 // nonnull (apparently a Clang extension) 2042 // - transparent unions containing pointers 2043 // In the former case, LLVM IR cannot represent the constraint. In 2044 // the latter case, we have no guarantee that the transparent union 2045 // is in fact passed as a pointer. 2046 if (!ArgType->isAnyPointerType() && !ArgType->isBlockPointerType()) 2047 return nullptr; 2048 // First, check attribute on parameter itself. 2049 if (PVD) { 2050 if (auto ParmNNAttr = PVD->getAttr<NonNullAttr>()) 2051 return ParmNNAttr; 2052 } 2053 // Check function attributes. 2054 if (!FD) 2055 return nullptr; 2056 for (const auto *NNAttr : FD->specific_attrs<NonNullAttr>()) { 2057 if (NNAttr->isNonNull(ArgNo)) 2058 return NNAttr; 2059 } 2060 return nullptr; 2061 } 2062 2063 namespace { 2064 struct CopyBackSwiftError final : EHScopeStack::Cleanup { 2065 Address Temp; 2066 Address Arg; 2067 CopyBackSwiftError(Address temp, Address arg) : Temp(temp), Arg(arg) {} 2068 void Emit(CodeGenFunction &CGF, Flags flags) override { 2069 llvm::Value *errorValue = CGF.Builder.CreateLoad(Temp); 2070 CGF.Builder.CreateStore(errorValue, Arg); 2071 } 2072 }; 2073 } 2074 2075 void CodeGenFunction::EmitFunctionProlog(const CGFunctionInfo &FI, 2076 llvm::Function *Fn, 2077 const FunctionArgList &Args) { 2078 if (CurCodeDecl && CurCodeDecl->hasAttr<NakedAttr>()) 2079 // Naked functions don't have prologues. 2080 return; 2081 2082 // If this is an implicit-return-zero function, go ahead and 2083 // initialize the return value. TODO: it might be nice to have 2084 // a more general mechanism for this that didn't require synthesized 2085 // return statements. 2086 if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(CurCodeDecl)) { 2087 if (FD->hasImplicitReturnZero()) { 2088 QualType RetTy = FD->getReturnType().getUnqualifiedType(); 2089 llvm::Type* LLVMTy = CGM.getTypes().ConvertType(RetTy); 2090 llvm::Constant* Zero = llvm::Constant::getNullValue(LLVMTy); 2091 Builder.CreateStore(Zero, ReturnValue); 2092 } 2093 } 2094 2095 // FIXME: We no longer need the types from FunctionArgList; lift up and 2096 // simplify. 2097 2098 ClangToLLVMArgMapping IRFunctionArgs(CGM.getContext(), FI); 2099 // Flattened function arguments. 2100 SmallVector<llvm::Value *, 16> FnArgs; 2101 FnArgs.reserve(IRFunctionArgs.totalIRArgs()); 2102 for (auto &Arg : Fn->args()) { 2103 FnArgs.push_back(&Arg); 2104 } 2105 assert(FnArgs.size() == IRFunctionArgs.totalIRArgs()); 2106 2107 // If we're using inalloca, all the memory arguments are GEPs off of the last 2108 // parameter, which is a pointer to the complete memory area. 2109 Address ArgStruct = Address::invalid(); 2110 const llvm::StructLayout *ArgStructLayout = nullptr; 2111 if (IRFunctionArgs.hasInallocaArg()) { 2112 ArgStructLayout = CGM.getDataLayout().getStructLayout(FI.getArgStruct()); 2113 ArgStruct = Address(FnArgs[IRFunctionArgs.getInallocaArgNo()], 2114 FI.getArgStructAlignment()); 2115 2116 assert(ArgStruct.getType() == FI.getArgStruct()->getPointerTo()); 2117 } 2118 2119 // Name the struct return parameter. 2120 if (IRFunctionArgs.hasSRetArg()) { 2121 auto AI = cast<llvm::Argument>(FnArgs[IRFunctionArgs.getSRetArgNo()]); 2122 AI->setName("agg.result"); 2123 AI->addAttr(llvm::AttributeSet::get(getLLVMContext(), AI->getArgNo() + 1, 2124 llvm::Attribute::NoAlias)); 2125 } 2126 2127 // Track if we received the parameter as a pointer (indirect, byval, or 2128 // inalloca). If already have a pointer, EmitParmDecl doesn't need to copy it 2129 // into a local alloca for us. 2130 SmallVector<ParamValue, 16> ArgVals; 2131 ArgVals.reserve(Args.size()); 2132 2133 // Create a pointer value for every parameter declaration. This usually 2134 // entails copying one or more LLVM IR arguments into an alloca. Don't push 2135 // any cleanups or do anything that might unwind. We do that separately, so 2136 // we can push the cleanups in the correct order for the ABI. 2137 assert(FI.arg_size() == Args.size() && 2138 "Mismatch between function signature & arguments."); 2139 unsigned ArgNo = 0; 2140 CGFunctionInfo::const_arg_iterator info_it = FI.arg_begin(); 2141 for (FunctionArgList::const_iterator i = Args.begin(), e = Args.end(); 2142 i != e; ++i, ++info_it, ++ArgNo) { 2143 const VarDecl *Arg = *i; 2144 QualType Ty = info_it->type; 2145 const ABIArgInfo &ArgI = info_it->info; 2146 2147 bool isPromoted = 2148 isa<ParmVarDecl>(Arg) && cast<ParmVarDecl>(Arg)->isKNRPromoted(); 2149 2150 unsigned FirstIRArg, NumIRArgs; 2151 std::tie(FirstIRArg, NumIRArgs) = IRFunctionArgs.getIRArgs(ArgNo); 2152 2153 switch (ArgI.getKind()) { 2154 case ABIArgInfo::InAlloca: { 2155 assert(NumIRArgs == 0); 2156 auto FieldIndex = ArgI.getInAllocaFieldIndex(); 2157 CharUnits FieldOffset = 2158 CharUnits::fromQuantity(ArgStructLayout->getElementOffset(FieldIndex)); 2159 Address V = Builder.CreateStructGEP(ArgStruct, FieldIndex, FieldOffset, 2160 Arg->getName()); 2161 ArgVals.push_back(ParamValue::forIndirect(V)); 2162 break; 2163 } 2164 2165 case ABIArgInfo::Indirect: { 2166 assert(NumIRArgs == 1); 2167 Address ParamAddr = Address(FnArgs[FirstIRArg], ArgI.getIndirectAlign()); 2168 2169 if (!hasScalarEvaluationKind(Ty)) { 2170 // Aggregates and complex variables are accessed by reference. All we 2171 // need to do is realign the value, if requested. 2172 Address V = ParamAddr; 2173 if (ArgI.getIndirectRealign()) { 2174 Address AlignedTemp = CreateMemTemp(Ty, "coerce"); 2175 2176 // Copy from the incoming argument pointer to the temporary with the 2177 // appropriate alignment. 2178 // 2179 // FIXME: We should have a common utility for generating an aggregate 2180 // copy. 2181 CharUnits Size = getContext().getTypeSizeInChars(Ty); 2182 auto SizeVal = llvm::ConstantInt::get(IntPtrTy, Size.getQuantity()); 2183 Address Dst = Builder.CreateBitCast(AlignedTemp, Int8PtrTy); 2184 Address Src = Builder.CreateBitCast(ParamAddr, Int8PtrTy); 2185 Builder.CreateMemCpy(Dst, Src, SizeVal, false); 2186 V = AlignedTemp; 2187 } 2188 ArgVals.push_back(ParamValue::forIndirect(V)); 2189 } else { 2190 // Load scalar value from indirect argument. 2191 llvm::Value *V = 2192 EmitLoadOfScalar(ParamAddr, false, Ty, Arg->getLocStart()); 2193 2194 if (isPromoted) 2195 V = emitArgumentDemotion(*this, Arg, V); 2196 ArgVals.push_back(ParamValue::forDirect(V)); 2197 } 2198 break; 2199 } 2200 2201 case ABIArgInfo::Extend: 2202 case ABIArgInfo::Direct: { 2203 2204 // If we have the trivial case, handle it with no muss and fuss. 2205 if (!isa<llvm::StructType>(ArgI.getCoerceToType()) && 2206 ArgI.getCoerceToType() == ConvertType(Ty) && 2207 ArgI.getDirectOffset() == 0) { 2208 assert(NumIRArgs == 1); 2209 llvm::Value *V = FnArgs[FirstIRArg]; 2210 auto AI = cast<llvm::Argument>(V); 2211 2212 if (const ParmVarDecl *PVD = dyn_cast<ParmVarDecl>(Arg)) { 2213 if (getNonNullAttr(CurCodeDecl, PVD, PVD->getType(), 2214 PVD->getFunctionScopeIndex())) 2215 AI->addAttr(llvm::AttributeSet::get(getLLVMContext(), 2216 AI->getArgNo() + 1, 2217 llvm::Attribute::NonNull)); 2218 2219 QualType OTy = PVD->getOriginalType(); 2220 if (const auto *ArrTy = 2221 getContext().getAsConstantArrayType(OTy)) { 2222 // A C99 array parameter declaration with the static keyword also 2223 // indicates dereferenceability, and if the size is constant we can 2224 // use the dereferenceable attribute (which requires the size in 2225 // bytes). 2226 if (ArrTy->getSizeModifier() == ArrayType::Static) { 2227 QualType ETy = ArrTy->getElementType(); 2228 uint64_t ArrSize = ArrTy->getSize().getZExtValue(); 2229 if (!ETy->isIncompleteType() && ETy->isConstantSizeType() && 2230 ArrSize) { 2231 llvm::AttrBuilder Attrs; 2232 Attrs.addDereferenceableAttr( 2233 getContext().getTypeSizeInChars(ETy).getQuantity()*ArrSize); 2234 AI->addAttr(llvm::AttributeSet::get(getLLVMContext(), 2235 AI->getArgNo() + 1, Attrs)); 2236 } else if (getContext().getTargetAddressSpace(ETy) == 0) { 2237 AI->addAttr(llvm::AttributeSet::get(getLLVMContext(), 2238 AI->getArgNo() + 1, 2239 llvm::Attribute::NonNull)); 2240 } 2241 } 2242 } else if (const auto *ArrTy = 2243 getContext().getAsVariableArrayType(OTy)) { 2244 // For C99 VLAs with the static keyword, we don't know the size so 2245 // we can't use the dereferenceable attribute, but in addrspace(0) 2246 // we know that it must be nonnull. 2247 if (ArrTy->getSizeModifier() == VariableArrayType::Static && 2248 !getContext().getTargetAddressSpace(ArrTy->getElementType())) 2249 AI->addAttr(llvm::AttributeSet::get(getLLVMContext(), 2250 AI->getArgNo() + 1, 2251 llvm::Attribute::NonNull)); 2252 } 2253 2254 const auto *AVAttr = PVD->getAttr<AlignValueAttr>(); 2255 if (!AVAttr) 2256 if (const auto *TOTy = dyn_cast<TypedefType>(OTy)) 2257 AVAttr = TOTy->getDecl()->getAttr<AlignValueAttr>(); 2258 if (AVAttr) { 2259 llvm::Value *AlignmentValue = 2260 EmitScalarExpr(AVAttr->getAlignment()); 2261 llvm::ConstantInt *AlignmentCI = 2262 cast<llvm::ConstantInt>(AlignmentValue); 2263 unsigned Alignment = 2264 std::min((unsigned) AlignmentCI->getZExtValue(), 2265 +llvm::Value::MaximumAlignment); 2266 2267 llvm::AttrBuilder Attrs; 2268 Attrs.addAlignmentAttr(Alignment); 2269 AI->addAttr(llvm::AttributeSet::get(getLLVMContext(), 2270 AI->getArgNo() + 1, Attrs)); 2271 } 2272 } 2273 2274 if (Arg->getType().isRestrictQualified()) 2275 AI->addAttr(llvm::AttributeSet::get(getLLVMContext(), 2276 AI->getArgNo() + 1, 2277 llvm::Attribute::NoAlias)); 2278 2279 // LLVM expects swifterror parameters to be used in very restricted 2280 // ways. Copy the value into a less-restricted temporary. 2281 if (FI.getExtParameterInfo(ArgNo).getABI() 2282 == ParameterABI::SwiftErrorResult) { 2283 QualType pointeeTy = Ty->getPointeeType(); 2284 assert(pointeeTy->isPointerType()); 2285 Address temp = 2286 CreateMemTemp(pointeeTy, getPointerAlign(), "swifterror.temp"); 2287 Address arg = Address(V, getContext().getTypeAlignInChars(pointeeTy)); 2288 llvm::Value *incomingErrorValue = Builder.CreateLoad(arg); 2289 Builder.CreateStore(incomingErrorValue, temp); 2290 V = temp.getPointer(); 2291 2292 // Push a cleanup to copy the value back at the end of the function. 2293 // The convention does not guarantee that the value will be written 2294 // back if the function exits with an unwind exception. 2295 EHStack.pushCleanup<CopyBackSwiftError>(NormalCleanup, temp, arg); 2296 } 2297 2298 // Ensure the argument is the correct type. 2299 if (V->getType() != ArgI.getCoerceToType()) 2300 V = Builder.CreateBitCast(V, ArgI.getCoerceToType()); 2301 2302 if (isPromoted) 2303 V = emitArgumentDemotion(*this, Arg, V); 2304 2305 if (const CXXMethodDecl *MD = 2306 dyn_cast_or_null<CXXMethodDecl>(CurCodeDecl)) { 2307 if (MD->isVirtual() && Arg == CXXABIThisDecl) 2308 V = CGM.getCXXABI(). 2309 adjustThisParameterInVirtualFunctionPrologue(*this, CurGD, V); 2310 } 2311 2312 // Because of merging of function types from multiple decls it is 2313 // possible for the type of an argument to not match the corresponding 2314 // type in the function type. Since we are codegening the callee 2315 // in here, add a cast to the argument type. 2316 llvm::Type *LTy = ConvertType(Arg->getType()); 2317 if (V->getType() != LTy) 2318 V = Builder.CreateBitCast(V, LTy); 2319 2320 ArgVals.push_back(ParamValue::forDirect(V)); 2321 break; 2322 } 2323 2324 Address Alloca = CreateMemTemp(Ty, getContext().getDeclAlign(Arg), 2325 Arg->getName()); 2326 2327 // Pointer to store into. 2328 Address Ptr = emitAddressAtOffset(*this, Alloca, ArgI); 2329 2330 // Fast-isel and the optimizer generally like scalar values better than 2331 // FCAs, so we flatten them if this is safe to do for this argument. 2332 llvm::StructType *STy = dyn_cast<llvm::StructType>(ArgI.getCoerceToType()); 2333 if (ArgI.isDirect() && ArgI.getCanBeFlattened() && STy && 2334 STy->getNumElements() > 1) { 2335 auto SrcLayout = CGM.getDataLayout().getStructLayout(STy); 2336 uint64_t SrcSize = CGM.getDataLayout().getTypeAllocSize(STy); 2337 llvm::Type *DstTy = Ptr.getElementType(); 2338 uint64_t DstSize = CGM.getDataLayout().getTypeAllocSize(DstTy); 2339 2340 Address AddrToStoreInto = Address::invalid(); 2341 if (SrcSize <= DstSize) { 2342 AddrToStoreInto = 2343 Builder.CreateBitCast(Ptr, llvm::PointerType::getUnqual(STy)); 2344 } else { 2345 AddrToStoreInto = 2346 CreateTempAlloca(STy, Alloca.getAlignment(), "coerce"); 2347 } 2348 2349 assert(STy->getNumElements() == NumIRArgs); 2350 for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) { 2351 auto AI = FnArgs[FirstIRArg + i]; 2352 AI->setName(Arg->getName() + ".coerce" + Twine(i)); 2353 auto Offset = CharUnits::fromQuantity(SrcLayout->getElementOffset(i)); 2354 Address EltPtr = 2355 Builder.CreateStructGEP(AddrToStoreInto, i, Offset); 2356 Builder.CreateStore(AI, EltPtr); 2357 } 2358 2359 if (SrcSize > DstSize) { 2360 Builder.CreateMemCpy(Ptr, AddrToStoreInto, DstSize); 2361 } 2362 2363 } else { 2364 // Simple case, just do a coerced store of the argument into the alloca. 2365 assert(NumIRArgs == 1); 2366 auto AI = FnArgs[FirstIRArg]; 2367 AI->setName(Arg->getName() + ".coerce"); 2368 CreateCoercedStore(AI, Ptr, /*DestIsVolatile=*/false, *this); 2369 } 2370 2371 // Match to what EmitParmDecl is expecting for this type. 2372 if (CodeGenFunction::hasScalarEvaluationKind(Ty)) { 2373 llvm::Value *V = 2374 EmitLoadOfScalar(Alloca, false, Ty, Arg->getLocStart()); 2375 if (isPromoted) 2376 V = emitArgumentDemotion(*this, Arg, V); 2377 ArgVals.push_back(ParamValue::forDirect(V)); 2378 } else { 2379 ArgVals.push_back(ParamValue::forIndirect(Alloca)); 2380 } 2381 break; 2382 } 2383 2384 case ABIArgInfo::CoerceAndExpand: { 2385 // Reconstruct into a temporary. 2386 Address alloca = CreateMemTemp(Ty, getContext().getDeclAlign(Arg)); 2387 ArgVals.push_back(ParamValue::forIndirect(alloca)); 2388 2389 auto coercionType = ArgI.getCoerceAndExpandType(); 2390 alloca = Builder.CreateElementBitCast(alloca, coercionType); 2391 auto layout = CGM.getDataLayout().getStructLayout(coercionType); 2392 2393 unsigned argIndex = FirstIRArg; 2394 for (unsigned i = 0, e = coercionType->getNumElements(); i != e; ++i) { 2395 llvm::Type *eltType = coercionType->getElementType(i); 2396 if (ABIArgInfo::isPaddingForCoerceAndExpand(eltType)) 2397 continue; 2398 2399 auto eltAddr = Builder.CreateStructGEP(alloca, i, layout); 2400 auto elt = FnArgs[argIndex++]; 2401 Builder.CreateStore(elt, eltAddr); 2402 } 2403 assert(argIndex == FirstIRArg + NumIRArgs); 2404 break; 2405 } 2406 2407 case ABIArgInfo::Expand: { 2408 // If this structure was expanded into multiple arguments then 2409 // we need to create a temporary and reconstruct it from the 2410 // arguments. 2411 Address Alloca = CreateMemTemp(Ty, getContext().getDeclAlign(Arg)); 2412 LValue LV = MakeAddrLValue(Alloca, Ty); 2413 ArgVals.push_back(ParamValue::forIndirect(Alloca)); 2414 2415 auto FnArgIter = FnArgs.begin() + FirstIRArg; 2416 ExpandTypeFromArgs(Ty, LV, FnArgIter); 2417 assert(FnArgIter == FnArgs.begin() + FirstIRArg + NumIRArgs); 2418 for (unsigned i = 0, e = NumIRArgs; i != e; ++i) { 2419 auto AI = FnArgs[FirstIRArg + i]; 2420 AI->setName(Arg->getName() + "." + Twine(i)); 2421 } 2422 break; 2423 } 2424 2425 case ABIArgInfo::Ignore: 2426 assert(NumIRArgs == 0); 2427 // Initialize the local variable appropriately. 2428 if (!hasScalarEvaluationKind(Ty)) { 2429 ArgVals.push_back(ParamValue::forIndirect(CreateMemTemp(Ty))); 2430 } else { 2431 llvm::Value *U = llvm::UndefValue::get(ConvertType(Arg->getType())); 2432 ArgVals.push_back(ParamValue::forDirect(U)); 2433 } 2434 break; 2435 } 2436 } 2437 2438 if (getTarget().getCXXABI().areArgsDestroyedLeftToRightInCallee()) { 2439 for (int I = Args.size() - 1; I >= 0; --I) 2440 EmitParmDecl(*Args[I], ArgVals[I], I + 1); 2441 } else { 2442 for (unsigned I = 0, E = Args.size(); I != E; ++I) 2443 EmitParmDecl(*Args[I], ArgVals[I], I + 1); 2444 } 2445 } 2446 2447 static void eraseUnusedBitCasts(llvm::Instruction *insn) { 2448 while (insn->use_empty()) { 2449 llvm::BitCastInst *bitcast = dyn_cast<llvm::BitCastInst>(insn); 2450 if (!bitcast) return; 2451 2452 // This is "safe" because we would have used a ConstantExpr otherwise. 2453 insn = cast<llvm::Instruction>(bitcast->getOperand(0)); 2454 bitcast->eraseFromParent(); 2455 } 2456 } 2457 2458 /// Try to emit a fused autorelease of a return result. 2459 static llvm::Value *tryEmitFusedAutoreleaseOfResult(CodeGenFunction &CGF, 2460 llvm::Value *result) { 2461 // We must be immediately followed the cast. 2462 llvm::BasicBlock *BB = CGF.Builder.GetInsertBlock(); 2463 if (BB->empty()) return nullptr; 2464 if (&BB->back() != result) return nullptr; 2465 2466 llvm::Type *resultType = result->getType(); 2467 2468 // result is in a BasicBlock and is therefore an Instruction. 2469 llvm::Instruction *generator = cast<llvm::Instruction>(result); 2470 2471 SmallVector<llvm::Instruction*,4> insnsToKill; 2472 2473 // Look for: 2474 // %generator = bitcast %type1* %generator2 to %type2* 2475 while (llvm::BitCastInst *bitcast = dyn_cast<llvm::BitCastInst>(generator)) { 2476 // We would have emitted this as a constant if the operand weren't 2477 // an Instruction. 2478 generator = cast<llvm::Instruction>(bitcast->getOperand(0)); 2479 2480 // Require the generator to be immediately followed by the cast. 2481 if (generator->getNextNode() != bitcast) 2482 return nullptr; 2483 2484 insnsToKill.push_back(bitcast); 2485 } 2486 2487 // Look for: 2488 // %generator = call i8* @objc_retain(i8* %originalResult) 2489 // or 2490 // %generator = call i8* @objc_retainAutoreleasedReturnValue(i8* %originalResult) 2491 llvm::CallInst *call = dyn_cast<llvm::CallInst>(generator); 2492 if (!call) return nullptr; 2493 2494 bool doRetainAutorelease; 2495 2496 if (call->getCalledValue() == CGF.CGM.getObjCEntrypoints().objc_retain) { 2497 doRetainAutorelease = true; 2498 } else if (call->getCalledValue() == CGF.CGM.getObjCEntrypoints() 2499 .objc_retainAutoreleasedReturnValue) { 2500 doRetainAutorelease = false; 2501 2502 // If we emitted an assembly marker for this call (and the 2503 // ARCEntrypoints field should have been set if so), go looking 2504 // for that call. If we can't find it, we can't do this 2505 // optimization. But it should always be the immediately previous 2506 // instruction, unless we needed bitcasts around the call. 2507 if (CGF.CGM.getObjCEntrypoints().retainAutoreleasedReturnValueMarker) { 2508 llvm::Instruction *prev = call->getPrevNode(); 2509 assert(prev); 2510 if (isa<llvm::BitCastInst>(prev)) { 2511 prev = prev->getPrevNode(); 2512 assert(prev); 2513 } 2514 assert(isa<llvm::CallInst>(prev)); 2515 assert(cast<llvm::CallInst>(prev)->getCalledValue() == 2516 CGF.CGM.getObjCEntrypoints().retainAutoreleasedReturnValueMarker); 2517 insnsToKill.push_back(prev); 2518 } 2519 } else { 2520 return nullptr; 2521 } 2522 2523 result = call->getArgOperand(0); 2524 insnsToKill.push_back(call); 2525 2526 // Keep killing bitcasts, for sanity. Note that we no longer care 2527 // about precise ordering as long as there's exactly one use. 2528 while (llvm::BitCastInst *bitcast = dyn_cast<llvm::BitCastInst>(result)) { 2529 if (!bitcast->hasOneUse()) break; 2530 insnsToKill.push_back(bitcast); 2531 result = bitcast->getOperand(0); 2532 } 2533 2534 // Delete all the unnecessary instructions, from latest to earliest. 2535 for (SmallVectorImpl<llvm::Instruction*>::iterator 2536 i = insnsToKill.begin(), e = insnsToKill.end(); i != e; ++i) 2537 (*i)->eraseFromParent(); 2538 2539 // Do the fused retain/autorelease if we were asked to. 2540 if (doRetainAutorelease) 2541 result = CGF.EmitARCRetainAutoreleaseReturnValue(result); 2542 2543 // Cast back to the result type. 2544 return CGF.Builder.CreateBitCast(result, resultType); 2545 } 2546 2547 /// If this is a +1 of the value of an immutable 'self', remove it. 2548 static llvm::Value *tryRemoveRetainOfSelf(CodeGenFunction &CGF, 2549 llvm::Value *result) { 2550 // This is only applicable to a method with an immutable 'self'. 2551 const ObjCMethodDecl *method = 2552 dyn_cast_or_null<ObjCMethodDecl>(CGF.CurCodeDecl); 2553 if (!method) return nullptr; 2554 const VarDecl *self = method->getSelfDecl(); 2555 if (!self->getType().isConstQualified()) return nullptr; 2556 2557 // Look for a retain call. 2558 llvm::CallInst *retainCall = 2559 dyn_cast<llvm::CallInst>(result->stripPointerCasts()); 2560 if (!retainCall || 2561 retainCall->getCalledValue() != CGF.CGM.getObjCEntrypoints().objc_retain) 2562 return nullptr; 2563 2564 // Look for an ordinary load of 'self'. 2565 llvm::Value *retainedValue = retainCall->getArgOperand(0); 2566 llvm::LoadInst *load = 2567 dyn_cast<llvm::LoadInst>(retainedValue->stripPointerCasts()); 2568 if (!load || load->isAtomic() || load->isVolatile() || 2569 load->getPointerOperand() != CGF.GetAddrOfLocalVar(self).getPointer()) 2570 return nullptr; 2571 2572 // Okay! Burn it all down. This relies for correctness on the 2573 // assumption that the retain is emitted as part of the return and 2574 // that thereafter everything is used "linearly". 2575 llvm::Type *resultType = result->getType(); 2576 eraseUnusedBitCasts(cast<llvm::Instruction>(result)); 2577 assert(retainCall->use_empty()); 2578 retainCall->eraseFromParent(); 2579 eraseUnusedBitCasts(cast<llvm::Instruction>(retainedValue)); 2580 2581 return CGF.Builder.CreateBitCast(load, resultType); 2582 } 2583 2584 /// Emit an ARC autorelease of the result of a function. 2585 /// 2586 /// \return the value to actually return from the function 2587 static llvm::Value *emitAutoreleaseOfResult(CodeGenFunction &CGF, 2588 llvm::Value *result) { 2589 // If we're returning 'self', kill the initial retain. This is a 2590 // heuristic attempt to "encourage correctness" in the really unfortunate 2591 // case where we have a return of self during a dealloc and we desperately 2592 // need to avoid the possible autorelease. 2593 if (llvm::Value *self = tryRemoveRetainOfSelf(CGF, result)) 2594 return self; 2595 2596 // At -O0, try to emit a fused retain/autorelease. 2597 if (CGF.shouldUseFusedARCCalls()) 2598 if (llvm::Value *fused = tryEmitFusedAutoreleaseOfResult(CGF, result)) 2599 return fused; 2600 2601 return CGF.EmitARCAutoreleaseReturnValue(result); 2602 } 2603 2604 /// Heuristically search for a dominating store to the return-value slot. 2605 static llvm::StoreInst *findDominatingStoreToReturnValue(CodeGenFunction &CGF) { 2606 // Check if a User is a store which pointerOperand is the ReturnValue. 2607 // We are looking for stores to the ReturnValue, not for stores of the 2608 // ReturnValue to some other location. 2609 auto GetStoreIfValid = [&CGF](llvm::User *U) -> llvm::StoreInst * { 2610 auto *SI = dyn_cast<llvm::StoreInst>(U); 2611 if (!SI || SI->getPointerOperand() != CGF.ReturnValue.getPointer()) 2612 return nullptr; 2613 // These aren't actually possible for non-coerced returns, and we 2614 // only care about non-coerced returns on this code path. 2615 assert(!SI->isAtomic() && !SI->isVolatile()); 2616 return SI; 2617 }; 2618 // If there are multiple uses of the return-value slot, just check 2619 // for something immediately preceding the IP. Sometimes this can 2620 // happen with how we generate implicit-returns; it can also happen 2621 // with noreturn cleanups. 2622 if (!CGF.ReturnValue.getPointer()->hasOneUse()) { 2623 llvm::BasicBlock *IP = CGF.Builder.GetInsertBlock(); 2624 if (IP->empty()) return nullptr; 2625 llvm::Instruction *I = &IP->back(); 2626 2627 // Skip lifetime markers 2628 for (llvm::BasicBlock::reverse_iterator II = IP->rbegin(), 2629 IE = IP->rend(); 2630 II != IE; ++II) { 2631 if (llvm::IntrinsicInst *Intrinsic = 2632 dyn_cast<llvm::IntrinsicInst>(&*II)) { 2633 if (Intrinsic->getIntrinsicID() == llvm::Intrinsic::lifetime_end) { 2634 const llvm::Value *CastAddr = Intrinsic->getArgOperand(1); 2635 ++II; 2636 if (II == IE) 2637 break; 2638 if (isa<llvm::BitCastInst>(&*II) && (CastAddr == &*II)) 2639 continue; 2640 } 2641 } 2642 I = &*II; 2643 break; 2644 } 2645 2646 return GetStoreIfValid(I); 2647 } 2648 2649 llvm::StoreInst *store = 2650 GetStoreIfValid(CGF.ReturnValue.getPointer()->user_back()); 2651 if (!store) return nullptr; 2652 2653 // Now do a first-and-dirty dominance check: just walk up the 2654 // single-predecessors chain from the current insertion point. 2655 llvm::BasicBlock *StoreBB = store->getParent(); 2656 llvm::BasicBlock *IP = CGF.Builder.GetInsertBlock(); 2657 while (IP != StoreBB) { 2658 if (!(IP = IP->getSinglePredecessor())) 2659 return nullptr; 2660 } 2661 2662 // Okay, the store's basic block dominates the insertion point; we 2663 // can do our thing. 2664 return store; 2665 } 2666 2667 void CodeGenFunction::EmitFunctionEpilog(const CGFunctionInfo &FI, 2668 bool EmitRetDbgLoc, 2669 SourceLocation EndLoc) { 2670 if (CurCodeDecl && CurCodeDecl->hasAttr<NakedAttr>()) { 2671 // Naked functions don't have epilogues. 2672 Builder.CreateUnreachable(); 2673 return; 2674 } 2675 2676 // Functions with no result always return void. 2677 if (!ReturnValue.isValid()) { 2678 Builder.CreateRetVoid(); 2679 return; 2680 } 2681 2682 llvm::DebugLoc RetDbgLoc; 2683 llvm::Value *RV = nullptr; 2684 QualType RetTy = FI.getReturnType(); 2685 const ABIArgInfo &RetAI = FI.getReturnInfo(); 2686 2687 switch (RetAI.getKind()) { 2688 case ABIArgInfo::InAlloca: 2689 // Aggregrates get evaluated directly into the destination. Sometimes we 2690 // need to return the sret value in a register, though. 2691 assert(hasAggregateEvaluationKind(RetTy)); 2692 if (RetAI.getInAllocaSRet()) { 2693 llvm::Function::arg_iterator EI = CurFn->arg_end(); 2694 --EI; 2695 llvm::Value *ArgStruct = &*EI; 2696 llvm::Value *SRet = Builder.CreateStructGEP( 2697 nullptr, ArgStruct, RetAI.getInAllocaFieldIndex()); 2698 RV = Builder.CreateAlignedLoad(SRet, getPointerAlign(), "sret"); 2699 } 2700 break; 2701 2702 case ABIArgInfo::Indirect: { 2703 auto AI = CurFn->arg_begin(); 2704 if (RetAI.isSRetAfterThis()) 2705 ++AI; 2706 switch (getEvaluationKind(RetTy)) { 2707 case TEK_Complex: { 2708 ComplexPairTy RT = 2709 EmitLoadOfComplex(MakeAddrLValue(ReturnValue, RetTy), EndLoc); 2710 EmitStoreOfComplex(RT, MakeNaturalAlignAddrLValue(&*AI, RetTy), 2711 /*isInit*/ true); 2712 break; 2713 } 2714 case TEK_Aggregate: 2715 // Do nothing; aggregrates get evaluated directly into the destination. 2716 break; 2717 case TEK_Scalar: 2718 EmitStoreOfScalar(Builder.CreateLoad(ReturnValue), 2719 MakeNaturalAlignAddrLValue(&*AI, RetTy), 2720 /*isInit*/ true); 2721 break; 2722 } 2723 break; 2724 } 2725 2726 case ABIArgInfo::Extend: 2727 case ABIArgInfo::Direct: 2728 if (RetAI.getCoerceToType() == ConvertType(RetTy) && 2729 RetAI.getDirectOffset() == 0) { 2730 // The internal return value temp always will have pointer-to-return-type 2731 // type, just do a load. 2732 2733 // If there is a dominating store to ReturnValue, we can elide 2734 // the load, zap the store, and usually zap the alloca. 2735 if (llvm::StoreInst *SI = 2736 findDominatingStoreToReturnValue(*this)) { 2737 // Reuse the debug location from the store unless there is 2738 // cleanup code to be emitted between the store and return 2739 // instruction. 2740 if (EmitRetDbgLoc && !AutoreleaseResult) 2741 RetDbgLoc = SI->getDebugLoc(); 2742 // Get the stored value and nuke the now-dead store. 2743 RV = SI->getValueOperand(); 2744 SI->eraseFromParent(); 2745 2746 // If that was the only use of the return value, nuke it as well now. 2747 auto returnValueInst = ReturnValue.getPointer(); 2748 if (returnValueInst->use_empty()) { 2749 if (auto alloca = dyn_cast<llvm::AllocaInst>(returnValueInst)) { 2750 alloca->eraseFromParent(); 2751 ReturnValue = Address::invalid(); 2752 } 2753 } 2754 2755 // Otherwise, we have to do a simple load. 2756 } else { 2757 RV = Builder.CreateLoad(ReturnValue); 2758 } 2759 } else { 2760 // If the value is offset in memory, apply the offset now. 2761 Address V = emitAddressAtOffset(*this, ReturnValue, RetAI); 2762 2763 RV = CreateCoercedLoad(V, RetAI.getCoerceToType(), *this); 2764 } 2765 2766 // In ARC, end functions that return a retainable type with a call 2767 // to objc_autoreleaseReturnValue. 2768 if (AutoreleaseResult) { 2769 #ifndef NDEBUG 2770 // Type::isObjCRetainabletype has to be called on a QualType that hasn't 2771 // been stripped of the typedefs, so we cannot use RetTy here. Get the 2772 // original return type of FunctionDecl, CurCodeDecl, and BlockDecl from 2773 // CurCodeDecl or BlockInfo. 2774 QualType RT; 2775 2776 if (auto *FD = dyn_cast<FunctionDecl>(CurCodeDecl)) 2777 RT = FD->getReturnType(); 2778 else if (auto *MD = dyn_cast<ObjCMethodDecl>(CurCodeDecl)) 2779 RT = MD->getReturnType(); 2780 else if (isa<BlockDecl>(CurCodeDecl)) 2781 RT = BlockInfo->BlockExpression->getFunctionType()->getReturnType(); 2782 else 2783 llvm_unreachable("Unexpected function/method type"); 2784 2785 assert(getLangOpts().ObjCAutoRefCount && 2786 !FI.isReturnsRetained() && 2787 RT->isObjCRetainableType()); 2788 #endif 2789 RV = emitAutoreleaseOfResult(*this, RV); 2790 } 2791 2792 break; 2793 2794 case ABIArgInfo::Ignore: 2795 break; 2796 2797 case ABIArgInfo::CoerceAndExpand: { 2798 auto coercionType = RetAI.getCoerceAndExpandType(); 2799 auto layout = CGM.getDataLayout().getStructLayout(coercionType); 2800 2801 // Load all of the coerced elements out into results. 2802 llvm::SmallVector<llvm::Value*, 4> results; 2803 Address addr = Builder.CreateElementBitCast(ReturnValue, coercionType); 2804 for (unsigned i = 0, e = coercionType->getNumElements(); i != e; ++i) { 2805 auto coercedEltType = coercionType->getElementType(i); 2806 if (ABIArgInfo::isPaddingForCoerceAndExpand(coercedEltType)) 2807 continue; 2808 2809 auto eltAddr = Builder.CreateStructGEP(addr, i, layout); 2810 auto elt = Builder.CreateLoad(eltAddr); 2811 results.push_back(elt); 2812 } 2813 2814 // If we have one result, it's the single direct result type. 2815 if (results.size() == 1) { 2816 RV = results[0]; 2817 2818 // Otherwise, we need to make a first-class aggregate. 2819 } else { 2820 // Construct a return type that lacks padding elements. 2821 llvm::Type *returnType = RetAI.getUnpaddedCoerceAndExpandType(); 2822 2823 RV = llvm::UndefValue::get(returnType); 2824 for (unsigned i = 0, e = results.size(); i != e; ++i) { 2825 RV = Builder.CreateInsertValue(RV, results[i], i); 2826 } 2827 } 2828 break; 2829 } 2830 2831 case ABIArgInfo::Expand: 2832 llvm_unreachable("Invalid ABI kind for return argument"); 2833 } 2834 2835 llvm::Instruction *Ret; 2836 if (RV) { 2837 if (CurCodeDecl && SanOpts.has(SanitizerKind::ReturnsNonnullAttribute)) { 2838 if (auto RetNNAttr = CurCodeDecl->getAttr<ReturnsNonNullAttr>()) { 2839 SanitizerScope SanScope(this); 2840 llvm::Value *Cond = Builder.CreateICmpNE( 2841 RV, llvm::Constant::getNullValue(RV->getType())); 2842 llvm::Constant *StaticData[] = { 2843 EmitCheckSourceLocation(EndLoc), 2844 EmitCheckSourceLocation(RetNNAttr->getLocation()), 2845 }; 2846 EmitCheck(std::make_pair(Cond, SanitizerKind::ReturnsNonnullAttribute), 2847 "nonnull_return", StaticData, None); 2848 } 2849 } 2850 Ret = Builder.CreateRet(RV); 2851 } else { 2852 Ret = Builder.CreateRetVoid(); 2853 } 2854 2855 if (RetDbgLoc) 2856 Ret->setDebugLoc(std::move(RetDbgLoc)); 2857 } 2858 2859 static bool isInAllocaArgument(CGCXXABI &ABI, QualType type) { 2860 const CXXRecordDecl *RD = type->getAsCXXRecordDecl(); 2861 return RD && ABI.getRecordArgABI(RD) == CGCXXABI::RAA_DirectInMemory; 2862 } 2863 2864 static AggValueSlot createPlaceholderSlot(CodeGenFunction &CGF, 2865 QualType Ty) { 2866 // FIXME: Generate IR in one pass, rather than going back and fixing up these 2867 // placeholders. 2868 llvm::Type *IRTy = CGF.ConvertTypeForMem(Ty); 2869 llvm::Value *Placeholder = 2870 llvm::UndefValue::get(IRTy->getPointerTo()->getPointerTo()); 2871 Placeholder = CGF.Builder.CreateDefaultAlignedLoad(Placeholder); 2872 2873 // FIXME: When we generate this IR in one pass, we shouldn't need 2874 // this win32-specific alignment hack. 2875 CharUnits Align = CharUnits::fromQuantity(4); 2876 2877 return AggValueSlot::forAddr(Address(Placeholder, Align), 2878 Ty.getQualifiers(), 2879 AggValueSlot::IsNotDestructed, 2880 AggValueSlot::DoesNotNeedGCBarriers, 2881 AggValueSlot::IsNotAliased); 2882 } 2883 2884 void CodeGenFunction::EmitDelegateCallArg(CallArgList &args, 2885 const VarDecl *param, 2886 SourceLocation loc) { 2887 // StartFunction converted the ABI-lowered parameter(s) into a 2888 // local alloca. We need to turn that into an r-value suitable 2889 // for EmitCall. 2890 Address local = GetAddrOfLocalVar(param); 2891 2892 QualType type = param->getType(); 2893 2894 assert(!isInAllocaArgument(CGM.getCXXABI(), type) && 2895 "cannot emit delegate call arguments for inalloca arguments!"); 2896 2897 // For the most part, we just need to load the alloca, except that 2898 // aggregate r-values are actually pointers to temporaries. 2899 if (type->isReferenceType()) 2900 args.add(RValue::get(Builder.CreateLoad(local)), type); 2901 else 2902 args.add(convertTempToRValue(local, type, loc), type); 2903 } 2904 2905 static bool isProvablyNull(llvm::Value *addr) { 2906 return isa<llvm::ConstantPointerNull>(addr); 2907 } 2908 2909 static bool isProvablyNonNull(llvm::Value *addr) { 2910 return isa<llvm::AllocaInst>(addr); 2911 } 2912 2913 /// Emit the actual writing-back of a writeback. 2914 static void emitWriteback(CodeGenFunction &CGF, 2915 const CallArgList::Writeback &writeback) { 2916 const LValue &srcLV = writeback.Source; 2917 Address srcAddr = srcLV.getAddress(); 2918 assert(!isProvablyNull(srcAddr.getPointer()) && 2919 "shouldn't have writeback for provably null argument"); 2920 2921 llvm::BasicBlock *contBB = nullptr; 2922 2923 // If the argument wasn't provably non-null, we need to null check 2924 // before doing the store. 2925 bool provablyNonNull = isProvablyNonNull(srcAddr.getPointer()); 2926 if (!provablyNonNull) { 2927 llvm::BasicBlock *writebackBB = CGF.createBasicBlock("icr.writeback"); 2928 contBB = CGF.createBasicBlock("icr.done"); 2929 2930 llvm::Value *isNull = 2931 CGF.Builder.CreateIsNull(srcAddr.getPointer(), "icr.isnull"); 2932 CGF.Builder.CreateCondBr(isNull, contBB, writebackBB); 2933 CGF.EmitBlock(writebackBB); 2934 } 2935 2936 // Load the value to writeback. 2937 llvm::Value *value = CGF.Builder.CreateLoad(writeback.Temporary); 2938 2939 // Cast it back, in case we're writing an id to a Foo* or something. 2940 value = CGF.Builder.CreateBitCast(value, srcAddr.getElementType(), 2941 "icr.writeback-cast"); 2942 2943 // Perform the writeback. 2944 2945 // If we have a "to use" value, it's something we need to emit a use 2946 // of. This has to be carefully threaded in: if it's done after the 2947 // release it's potentially undefined behavior (and the optimizer 2948 // will ignore it), and if it happens before the retain then the 2949 // optimizer could move the release there. 2950 if (writeback.ToUse) { 2951 assert(srcLV.getObjCLifetime() == Qualifiers::OCL_Strong); 2952 2953 // Retain the new value. No need to block-copy here: the block's 2954 // being passed up the stack. 2955 value = CGF.EmitARCRetainNonBlock(value); 2956 2957 // Emit the intrinsic use here. 2958 CGF.EmitARCIntrinsicUse(writeback.ToUse); 2959 2960 // Load the old value (primitively). 2961 llvm::Value *oldValue = CGF.EmitLoadOfScalar(srcLV, SourceLocation()); 2962 2963 // Put the new value in place (primitively). 2964 CGF.EmitStoreOfScalar(value, srcLV, /*init*/ false); 2965 2966 // Release the old value. 2967 CGF.EmitARCRelease(oldValue, srcLV.isARCPreciseLifetime()); 2968 2969 // Otherwise, we can just do a normal lvalue store. 2970 } else { 2971 CGF.EmitStoreThroughLValue(RValue::get(value), srcLV); 2972 } 2973 2974 // Jump to the continuation block. 2975 if (!provablyNonNull) 2976 CGF.EmitBlock(contBB); 2977 } 2978 2979 static void emitWritebacks(CodeGenFunction &CGF, 2980 const CallArgList &args) { 2981 for (const auto &I : args.writebacks()) 2982 emitWriteback(CGF, I); 2983 } 2984 2985 static void deactivateArgCleanupsBeforeCall(CodeGenFunction &CGF, 2986 const CallArgList &CallArgs) { 2987 assert(CGF.getTarget().getCXXABI().areArgsDestroyedLeftToRightInCallee()); 2988 ArrayRef<CallArgList::CallArgCleanup> Cleanups = 2989 CallArgs.getCleanupsToDeactivate(); 2990 // Iterate in reverse to increase the likelihood of popping the cleanup. 2991 for (const auto &I : llvm::reverse(Cleanups)) { 2992 CGF.DeactivateCleanupBlock(I.Cleanup, I.IsActiveIP); 2993 I.IsActiveIP->eraseFromParent(); 2994 } 2995 } 2996 2997 static const Expr *maybeGetUnaryAddrOfOperand(const Expr *E) { 2998 if (const UnaryOperator *uop = dyn_cast<UnaryOperator>(E->IgnoreParens())) 2999 if (uop->getOpcode() == UO_AddrOf) 3000 return uop->getSubExpr(); 3001 return nullptr; 3002 } 3003 3004 /// Emit an argument that's being passed call-by-writeback. That is, 3005 /// we are passing the address of an __autoreleased temporary; it 3006 /// might be copy-initialized with the current value of the given 3007 /// address, but it will definitely be copied out of after the call. 3008 static void emitWritebackArg(CodeGenFunction &CGF, CallArgList &args, 3009 const ObjCIndirectCopyRestoreExpr *CRE) { 3010 LValue srcLV; 3011 3012 // Make an optimistic effort to emit the address as an l-value. 3013 // This can fail if the argument expression is more complicated. 3014 if (const Expr *lvExpr = maybeGetUnaryAddrOfOperand(CRE->getSubExpr())) { 3015 srcLV = CGF.EmitLValue(lvExpr); 3016 3017 // Otherwise, just emit it as a scalar. 3018 } else { 3019 Address srcAddr = CGF.EmitPointerWithAlignment(CRE->getSubExpr()); 3020 3021 QualType srcAddrType = 3022 CRE->getSubExpr()->getType()->castAs<PointerType>()->getPointeeType(); 3023 srcLV = CGF.MakeAddrLValue(srcAddr, srcAddrType); 3024 } 3025 Address srcAddr = srcLV.getAddress(); 3026 3027 // The dest and src types don't necessarily match in LLVM terms 3028 // because of the crazy ObjC compatibility rules. 3029 3030 llvm::PointerType *destType = 3031 cast<llvm::PointerType>(CGF.ConvertType(CRE->getType())); 3032 3033 // If the address is a constant null, just pass the appropriate null. 3034 if (isProvablyNull(srcAddr.getPointer())) { 3035 args.add(RValue::get(llvm::ConstantPointerNull::get(destType)), 3036 CRE->getType()); 3037 return; 3038 } 3039 3040 // Create the temporary. 3041 Address temp = CGF.CreateTempAlloca(destType->getElementType(), 3042 CGF.getPointerAlign(), 3043 "icr.temp"); 3044 // Loading an l-value can introduce a cleanup if the l-value is __weak, 3045 // and that cleanup will be conditional if we can't prove that the l-value 3046 // isn't null, so we need to register a dominating point so that the cleanups 3047 // system will make valid IR. 3048 CodeGenFunction::ConditionalEvaluation condEval(CGF); 3049 3050 // Zero-initialize it if we're not doing a copy-initialization. 3051 bool shouldCopy = CRE->shouldCopy(); 3052 if (!shouldCopy) { 3053 llvm::Value *null = 3054 llvm::ConstantPointerNull::get( 3055 cast<llvm::PointerType>(destType->getElementType())); 3056 CGF.Builder.CreateStore(null, temp); 3057 } 3058 3059 llvm::BasicBlock *contBB = nullptr; 3060 llvm::BasicBlock *originBB = nullptr; 3061 3062 // If the address is *not* known to be non-null, we need to switch. 3063 llvm::Value *finalArgument; 3064 3065 bool provablyNonNull = isProvablyNonNull(srcAddr.getPointer()); 3066 if (provablyNonNull) { 3067 finalArgument = temp.getPointer(); 3068 } else { 3069 llvm::Value *isNull = 3070 CGF.Builder.CreateIsNull(srcAddr.getPointer(), "icr.isnull"); 3071 3072 finalArgument = CGF.Builder.CreateSelect(isNull, 3073 llvm::ConstantPointerNull::get(destType), 3074 temp.getPointer(), "icr.argument"); 3075 3076 // If we need to copy, then the load has to be conditional, which 3077 // means we need control flow. 3078 if (shouldCopy) { 3079 originBB = CGF.Builder.GetInsertBlock(); 3080 contBB = CGF.createBasicBlock("icr.cont"); 3081 llvm::BasicBlock *copyBB = CGF.createBasicBlock("icr.copy"); 3082 CGF.Builder.CreateCondBr(isNull, contBB, copyBB); 3083 CGF.EmitBlock(copyBB); 3084 condEval.begin(CGF); 3085 } 3086 } 3087 3088 llvm::Value *valueToUse = nullptr; 3089 3090 // Perform a copy if necessary. 3091 if (shouldCopy) { 3092 RValue srcRV = CGF.EmitLoadOfLValue(srcLV, SourceLocation()); 3093 assert(srcRV.isScalar()); 3094 3095 llvm::Value *src = srcRV.getScalarVal(); 3096 src = CGF.Builder.CreateBitCast(src, destType->getElementType(), 3097 "icr.cast"); 3098 3099 // Use an ordinary store, not a store-to-lvalue. 3100 CGF.Builder.CreateStore(src, temp); 3101 3102 // If optimization is enabled, and the value was held in a 3103 // __strong variable, we need to tell the optimizer that this 3104 // value has to stay alive until we're doing the store back. 3105 // This is because the temporary is effectively unretained, 3106 // and so otherwise we can violate the high-level semantics. 3107 if (CGF.CGM.getCodeGenOpts().OptimizationLevel != 0 && 3108 srcLV.getObjCLifetime() == Qualifiers::OCL_Strong) { 3109 valueToUse = src; 3110 } 3111 } 3112 3113 // Finish the control flow if we needed it. 3114 if (shouldCopy && !provablyNonNull) { 3115 llvm::BasicBlock *copyBB = CGF.Builder.GetInsertBlock(); 3116 CGF.EmitBlock(contBB); 3117 3118 // Make a phi for the value to intrinsically use. 3119 if (valueToUse) { 3120 llvm::PHINode *phiToUse = CGF.Builder.CreatePHI(valueToUse->getType(), 2, 3121 "icr.to-use"); 3122 phiToUse->addIncoming(valueToUse, copyBB); 3123 phiToUse->addIncoming(llvm::UndefValue::get(valueToUse->getType()), 3124 originBB); 3125 valueToUse = phiToUse; 3126 } 3127 3128 condEval.end(CGF); 3129 } 3130 3131 args.addWriteback(srcLV, temp, valueToUse); 3132 args.add(RValue::get(finalArgument), CRE->getType()); 3133 } 3134 3135 void CallArgList::allocateArgumentMemory(CodeGenFunction &CGF) { 3136 assert(!StackBase && !StackCleanup.isValid()); 3137 3138 // Save the stack. 3139 llvm::Function *F = CGF.CGM.getIntrinsic(llvm::Intrinsic::stacksave); 3140 StackBase = CGF.Builder.CreateCall(F, {}, "inalloca.save"); 3141 } 3142 3143 void CallArgList::freeArgumentMemory(CodeGenFunction &CGF) const { 3144 if (StackBase) { 3145 // Restore the stack after the call. 3146 llvm::Value *F = CGF.CGM.getIntrinsic(llvm::Intrinsic::stackrestore); 3147 CGF.Builder.CreateCall(F, StackBase); 3148 } 3149 } 3150 3151 void CodeGenFunction::EmitNonNullArgCheck(RValue RV, QualType ArgType, 3152 SourceLocation ArgLoc, 3153 const FunctionDecl *FD, 3154 unsigned ParmNum) { 3155 if (!SanOpts.has(SanitizerKind::NonnullAttribute) || !FD) 3156 return; 3157 auto PVD = ParmNum < FD->getNumParams() ? FD->getParamDecl(ParmNum) : nullptr; 3158 unsigned ArgNo = PVD ? PVD->getFunctionScopeIndex() : ParmNum; 3159 auto NNAttr = getNonNullAttr(FD, PVD, ArgType, ArgNo); 3160 if (!NNAttr) 3161 return; 3162 SanitizerScope SanScope(this); 3163 assert(RV.isScalar()); 3164 llvm::Value *V = RV.getScalarVal(); 3165 llvm::Value *Cond = 3166 Builder.CreateICmpNE(V, llvm::Constant::getNullValue(V->getType())); 3167 llvm::Constant *StaticData[] = { 3168 EmitCheckSourceLocation(ArgLoc), 3169 EmitCheckSourceLocation(NNAttr->getLocation()), 3170 llvm::ConstantInt::get(Int32Ty, ArgNo + 1), 3171 }; 3172 EmitCheck(std::make_pair(Cond, SanitizerKind::NonnullAttribute), 3173 "nonnull_arg", StaticData, None); 3174 } 3175 3176 void CodeGenFunction::EmitCallArgs( 3177 CallArgList &Args, ArrayRef<QualType> ArgTypes, 3178 llvm::iterator_range<CallExpr::const_arg_iterator> ArgRange, 3179 const FunctionDecl *CalleeDecl, unsigned ParamsToSkip) { 3180 assert((int)ArgTypes.size() == (ArgRange.end() - ArgRange.begin())); 3181 3182 auto MaybeEmitImplicitObjectSize = [&](unsigned I, const Expr *Arg) { 3183 if (CalleeDecl == nullptr || I >= CalleeDecl->getNumParams()) 3184 return; 3185 auto *PS = CalleeDecl->getParamDecl(I)->getAttr<PassObjectSizeAttr>(); 3186 if (PS == nullptr) 3187 return; 3188 3189 const auto &Context = getContext(); 3190 auto SizeTy = Context.getSizeType(); 3191 auto T = Builder.getIntNTy(Context.getTypeSize(SizeTy)); 3192 llvm::Value *V = evaluateOrEmitBuiltinObjectSize(Arg, PS->getType(), T); 3193 Args.add(RValue::get(V), SizeTy); 3194 }; 3195 3196 // We *have* to evaluate arguments from right to left in the MS C++ ABI, 3197 // because arguments are destroyed left to right in the callee. 3198 if (CGM.getTarget().getCXXABI().areArgsDestroyedLeftToRightInCallee()) { 3199 // Insert a stack save if we're going to need any inalloca args. 3200 bool HasInAllocaArgs = false; 3201 for (ArrayRef<QualType>::iterator I = ArgTypes.begin(), E = ArgTypes.end(); 3202 I != E && !HasInAllocaArgs; ++I) 3203 HasInAllocaArgs = isInAllocaArgument(CGM.getCXXABI(), *I); 3204 if (HasInAllocaArgs) { 3205 assert(getTarget().getTriple().getArch() == llvm::Triple::x86); 3206 Args.allocateArgumentMemory(*this); 3207 } 3208 3209 // Evaluate each argument. 3210 size_t CallArgsStart = Args.size(); 3211 for (int I = ArgTypes.size() - 1; I >= 0; --I) { 3212 CallExpr::const_arg_iterator Arg = ArgRange.begin() + I; 3213 MaybeEmitImplicitObjectSize(I, *Arg); 3214 EmitCallArg(Args, *Arg, ArgTypes[I]); 3215 EmitNonNullArgCheck(Args.back().RV, ArgTypes[I], (*Arg)->getExprLoc(), 3216 CalleeDecl, ParamsToSkip + I); 3217 } 3218 3219 // Un-reverse the arguments we just evaluated so they match up with the LLVM 3220 // IR function. 3221 std::reverse(Args.begin() + CallArgsStart, Args.end()); 3222 return; 3223 } 3224 3225 for (unsigned I = 0, E = ArgTypes.size(); I != E; ++I) { 3226 CallExpr::const_arg_iterator Arg = ArgRange.begin() + I; 3227 assert(Arg != ArgRange.end()); 3228 EmitCallArg(Args, *Arg, ArgTypes[I]); 3229 EmitNonNullArgCheck(Args.back().RV, ArgTypes[I], (*Arg)->getExprLoc(), 3230 CalleeDecl, ParamsToSkip + I); 3231 MaybeEmitImplicitObjectSize(I, *Arg); 3232 } 3233 } 3234 3235 namespace { 3236 3237 struct DestroyUnpassedArg final : EHScopeStack::Cleanup { 3238 DestroyUnpassedArg(Address Addr, QualType Ty) 3239 : Addr(Addr), Ty(Ty) {} 3240 3241 Address Addr; 3242 QualType Ty; 3243 3244 void Emit(CodeGenFunction &CGF, Flags flags) override { 3245 const CXXDestructorDecl *Dtor = Ty->getAsCXXRecordDecl()->getDestructor(); 3246 assert(!Dtor->isTrivial()); 3247 CGF.EmitCXXDestructorCall(Dtor, Dtor_Complete, /*for vbase*/ false, 3248 /*Delegating=*/false, Addr); 3249 } 3250 }; 3251 3252 struct DisableDebugLocationUpdates { 3253 CodeGenFunction &CGF; 3254 bool disabledDebugInfo; 3255 DisableDebugLocationUpdates(CodeGenFunction &CGF, const Expr *E) : CGF(CGF) { 3256 if ((disabledDebugInfo = isa<CXXDefaultArgExpr>(E) && CGF.getDebugInfo())) 3257 CGF.disableDebugInfo(); 3258 } 3259 ~DisableDebugLocationUpdates() { 3260 if (disabledDebugInfo) 3261 CGF.enableDebugInfo(); 3262 } 3263 }; 3264 3265 } // end anonymous namespace 3266 3267 void CodeGenFunction::EmitCallArg(CallArgList &args, const Expr *E, 3268 QualType type) { 3269 DisableDebugLocationUpdates Dis(*this, E); 3270 if (const ObjCIndirectCopyRestoreExpr *CRE 3271 = dyn_cast<ObjCIndirectCopyRestoreExpr>(E)) { 3272 assert(getLangOpts().ObjCAutoRefCount); 3273 assert(getContext().hasSameType(E->getType(), type)); 3274 return emitWritebackArg(*this, args, CRE); 3275 } 3276 3277 assert(type->isReferenceType() == E->isGLValue() && 3278 "reference binding to unmaterialized r-value!"); 3279 3280 if (E->isGLValue()) { 3281 assert(E->getObjectKind() == OK_Ordinary); 3282 return args.add(EmitReferenceBindingToExpr(E), type); 3283 } 3284 3285 bool HasAggregateEvalKind = hasAggregateEvaluationKind(type); 3286 3287 // In the Microsoft C++ ABI, aggregate arguments are destructed by the callee. 3288 // However, we still have to push an EH-only cleanup in case we unwind before 3289 // we make it to the call. 3290 if (HasAggregateEvalKind && 3291 CGM.getTarget().getCXXABI().areArgsDestroyedLeftToRightInCallee()) { 3292 // If we're using inalloca, use the argument memory. Otherwise, use a 3293 // temporary. 3294 AggValueSlot Slot; 3295 if (args.isUsingInAlloca()) 3296 Slot = createPlaceholderSlot(*this, type); 3297 else 3298 Slot = CreateAggTemp(type, "agg.tmp"); 3299 3300 const CXXRecordDecl *RD = type->getAsCXXRecordDecl(); 3301 bool DestroyedInCallee = 3302 RD && RD->hasNonTrivialDestructor() && 3303 CGM.getCXXABI().getRecordArgABI(RD) != CGCXXABI::RAA_Default; 3304 if (DestroyedInCallee) 3305 Slot.setExternallyDestructed(); 3306 3307 EmitAggExpr(E, Slot); 3308 RValue RV = Slot.asRValue(); 3309 args.add(RV, type); 3310 3311 if (DestroyedInCallee) { 3312 // Create a no-op GEP between the placeholder and the cleanup so we can 3313 // RAUW it successfully. It also serves as a marker of the first 3314 // instruction where the cleanup is active. 3315 pushFullExprCleanup<DestroyUnpassedArg>(EHCleanup, Slot.getAddress(), 3316 type); 3317 // This unreachable is a temporary marker which will be removed later. 3318 llvm::Instruction *IsActive = Builder.CreateUnreachable(); 3319 args.addArgCleanupDeactivation(EHStack.getInnermostEHScope(), IsActive); 3320 } 3321 return; 3322 } 3323 3324 if (HasAggregateEvalKind && isa<ImplicitCastExpr>(E) && 3325 cast<CastExpr>(E)->getCastKind() == CK_LValueToRValue) { 3326 LValue L = EmitLValue(cast<CastExpr>(E)->getSubExpr()); 3327 assert(L.isSimple()); 3328 if (L.getAlignment() >= getContext().getTypeAlignInChars(type)) { 3329 args.add(L.asAggregateRValue(), type, /*NeedsCopy*/true); 3330 } else { 3331 // We can't represent a misaligned lvalue in the CallArgList, so copy 3332 // to an aligned temporary now. 3333 Address tmp = CreateMemTemp(type); 3334 EmitAggregateCopy(tmp, L.getAddress(), type, L.isVolatile()); 3335 args.add(RValue::getAggregate(tmp), type); 3336 } 3337 return; 3338 } 3339 3340 args.add(EmitAnyExprToTemp(E), type); 3341 } 3342 3343 QualType CodeGenFunction::getVarArgType(const Expr *Arg) { 3344 // System headers on Windows define NULL to 0 instead of 0LL on Win64. MSVC 3345 // implicitly widens null pointer constants that are arguments to varargs 3346 // functions to pointer-sized ints. 3347 if (!getTarget().getTriple().isOSWindows()) 3348 return Arg->getType(); 3349 3350 if (Arg->getType()->isIntegerType() && 3351 getContext().getTypeSize(Arg->getType()) < 3352 getContext().getTargetInfo().getPointerWidth(0) && 3353 Arg->isNullPointerConstant(getContext(), 3354 Expr::NPC_ValueDependentIsNotNull)) { 3355 return getContext().getIntPtrType(); 3356 } 3357 3358 return Arg->getType(); 3359 } 3360 3361 // In ObjC ARC mode with no ObjC ARC exception safety, tell the ARC 3362 // optimizer it can aggressively ignore unwind edges. 3363 void 3364 CodeGenFunction::AddObjCARCExceptionMetadata(llvm::Instruction *Inst) { 3365 if (CGM.getCodeGenOpts().OptimizationLevel != 0 && 3366 !CGM.getCodeGenOpts().ObjCAutoRefCountExceptions) 3367 Inst->setMetadata("clang.arc.no_objc_arc_exceptions", 3368 CGM.getNoObjCARCExceptionsMetadata()); 3369 } 3370 3371 /// Emits a call to the given no-arguments nounwind runtime function. 3372 llvm::CallInst * 3373 CodeGenFunction::EmitNounwindRuntimeCall(llvm::Value *callee, 3374 const llvm::Twine &name) { 3375 return EmitNounwindRuntimeCall(callee, None, name); 3376 } 3377 3378 /// Emits a call to the given nounwind runtime function. 3379 llvm::CallInst * 3380 CodeGenFunction::EmitNounwindRuntimeCall(llvm::Value *callee, 3381 ArrayRef<llvm::Value*> args, 3382 const llvm::Twine &name) { 3383 llvm::CallInst *call = EmitRuntimeCall(callee, args, name); 3384 call->setDoesNotThrow(); 3385 return call; 3386 } 3387 3388 /// Emits a simple call (never an invoke) to the given no-arguments 3389 /// runtime function. 3390 llvm::CallInst * 3391 CodeGenFunction::EmitRuntimeCall(llvm::Value *callee, 3392 const llvm::Twine &name) { 3393 return EmitRuntimeCall(callee, None, name); 3394 } 3395 3396 // Calls which may throw must have operand bundles indicating which funclet 3397 // they are nested within. 3398 static void 3399 getBundlesForFunclet(llvm::Value *Callee, llvm::Instruction *CurrentFuncletPad, 3400 SmallVectorImpl<llvm::OperandBundleDef> &BundleList) { 3401 // There is no need for a funclet operand bundle if we aren't inside a 3402 // funclet. 3403 if (!CurrentFuncletPad) 3404 return; 3405 3406 // Skip intrinsics which cannot throw. 3407 auto *CalleeFn = dyn_cast<llvm::Function>(Callee->stripPointerCasts()); 3408 if (CalleeFn && CalleeFn->isIntrinsic() && CalleeFn->doesNotThrow()) 3409 return; 3410 3411 BundleList.emplace_back("funclet", CurrentFuncletPad); 3412 } 3413 3414 /// Emits a simple call (never an invoke) to the given runtime function. 3415 llvm::CallInst * 3416 CodeGenFunction::EmitRuntimeCall(llvm::Value *callee, 3417 ArrayRef<llvm::Value*> args, 3418 const llvm::Twine &name) { 3419 SmallVector<llvm::OperandBundleDef, 1> BundleList; 3420 getBundlesForFunclet(callee, CurrentFuncletPad, BundleList); 3421 3422 llvm::CallInst *call = Builder.CreateCall(callee, args, BundleList, name); 3423 call->setCallingConv(getRuntimeCC()); 3424 return call; 3425 } 3426 3427 /// Emits a call or invoke to the given noreturn runtime function. 3428 void CodeGenFunction::EmitNoreturnRuntimeCallOrInvoke(llvm::Value *callee, 3429 ArrayRef<llvm::Value*> args) { 3430 SmallVector<llvm::OperandBundleDef, 1> BundleList; 3431 getBundlesForFunclet(callee, CurrentFuncletPad, BundleList); 3432 3433 if (getInvokeDest()) { 3434 llvm::InvokeInst *invoke = 3435 Builder.CreateInvoke(callee, 3436 getUnreachableBlock(), 3437 getInvokeDest(), 3438 args, 3439 BundleList); 3440 invoke->setDoesNotReturn(); 3441 invoke->setCallingConv(getRuntimeCC()); 3442 } else { 3443 llvm::CallInst *call = Builder.CreateCall(callee, args, BundleList); 3444 call->setDoesNotReturn(); 3445 call->setCallingConv(getRuntimeCC()); 3446 Builder.CreateUnreachable(); 3447 } 3448 } 3449 3450 /// Emits a call or invoke instruction to the given nullary runtime function. 3451 llvm::CallSite 3452 CodeGenFunction::EmitRuntimeCallOrInvoke(llvm::Value *callee, 3453 const Twine &name) { 3454 return EmitRuntimeCallOrInvoke(callee, None, name); 3455 } 3456 3457 /// Emits a call or invoke instruction to the given runtime function. 3458 llvm::CallSite 3459 CodeGenFunction::EmitRuntimeCallOrInvoke(llvm::Value *callee, 3460 ArrayRef<llvm::Value*> args, 3461 const Twine &name) { 3462 llvm::CallSite callSite = EmitCallOrInvoke(callee, args, name); 3463 callSite.setCallingConv(getRuntimeCC()); 3464 return callSite; 3465 } 3466 3467 /// Emits a call or invoke instruction to the given function, depending 3468 /// on the current state of the EH stack. 3469 llvm::CallSite 3470 CodeGenFunction::EmitCallOrInvoke(llvm::Value *Callee, 3471 ArrayRef<llvm::Value *> Args, 3472 const Twine &Name) { 3473 llvm::BasicBlock *InvokeDest = getInvokeDest(); 3474 SmallVector<llvm::OperandBundleDef, 1> BundleList; 3475 getBundlesForFunclet(Callee, CurrentFuncletPad, BundleList); 3476 3477 llvm::Instruction *Inst; 3478 if (!InvokeDest) 3479 Inst = Builder.CreateCall(Callee, Args, BundleList, Name); 3480 else { 3481 llvm::BasicBlock *ContBB = createBasicBlock("invoke.cont"); 3482 Inst = Builder.CreateInvoke(Callee, ContBB, InvokeDest, Args, BundleList, 3483 Name); 3484 EmitBlock(ContBB); 3485 } 3486 3487 // In ObjC ARC mode with no ObjC ARC exception safety, tell the ARC 3488 // optimizer it can aggressively ignore unwind edges. 3489 if (CGM.getLangOpts().ObjCAutoRefCount) 3490 AddObjCARCExceptionMetadata(Inst); 3491 3492 return llvm::CallSite(Inst); 3493 } 3494 3495 /// \brief Store a non-aggregate value to an address to initialize it. For 3496 /// initialization, a non-atomic store will be used. 3497 static void EmitInitStoreOfNonAggregate(CodeGenFunction &CGF, RValue Src, 3498 LValue Dst) { 3499 if (Src.isScalar()) 3500 CGF.EmitStoreOfScalar(Src.getScalarVal(), Dst, /*init=*/true); 3501 else 3502 CGF.EmitStoreOfComplex(Src.getComplexVal(), Dst, /*init=*/true); 3503 } 3504 3505 void CodeGenFunction::deferPlaceholderReplacement(llvm::Instruction *Old, 3506 llvm::Value *New) { 3507 DeferredReplacements.push_back(std::make_pair(Old, New)); 3508 } 3509 3510 RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo, 3511 llvm::Value *Callee, 3512 ReturnValueSlot ReturnValue, 3513 const CallArgList &CallArgs, 3514 CGCalleeInfo CalleeInfo, 3515 llvm::Instruction **callOrInvoke) { 3516 // FIXME: We no longer need the types from CallArgs; lift up and simplify. 3517 3518 // Handle struct-return functions by passing a pointer to the 3519 // location that we would like to return into. 3520 QualType RetTy = CallInfo.getReturnType(); 3521 const ABIArgInfo &RetAI = CallInfo.getReturnInfo(); 3522 3523 llvm::FunctionType *IRFuncTy = 3524 cast<llvm::FunctionType>( 3525 cast<llvm::PointerType>(Callee->getType())->getElementType()); 3526 3527 // If we're using inalloca, insert the allocation after the stack save. 3528 // FIXME: Do this earlier rather than hacking it in here! 3529 Address ArgMemory = Address::invalid(); 3530 const llvm::StructLayout *ArgMemoryLayout = nullptr; 3531 if (llvm::StructType *ArgStruct = CallInfo.getArgStruct()) { 3532 ArgMemoryLayout = CGM.getDataLayout().getStructLayout(ArgStruct); 3533 llvm::Instruction *IP = CallArgs.getStackBase(); 3534 llvm::AllocaInst *AI; 3535 if (IP) { 3536 IP = IP->getNextNode(); 3537 AI = new llvm::AllocaInst(ArgStruct, "argmem", IP); 3538 } else { 3539 AI = CreateTempAlloca(ArgStruct, "argmem"); 3540 } 3541 auto Align = CallInfo.getArgStructAlignment(); 3542 AI->setAlignment(Align.getQuantity()); 3543 AI->setUsedWithInAlloca(true); 3544 assert(AI->isUsedWithInAlloca() && !AI->isStaticAlloca()); 3545 ArgMemory = Address(AI, Align); 3546 } 3547 3548 // Helper function to drill into the inalloca allocation. 3549 auto createInAllocaStructGEP = [&](unsigned FieldIndex) -> Address { 3550 auto FieldOffset = 3551 CharUnits::fromQuantity(ArgMemoryLayout->getElementOffset(FieldIndex)); 3552 return Builder.CreateStructGEP(ArgMemory, FieldIndex, FieldOffset); 3553 }; 3554 3555 ClangToLLVMArgMapping IRFunctionArgs(CGM.getContext(), CallInfo); 3556 SmallVector<llvm::Value *, 16> IRCallArgs(IRFunctionArgs.totalIRArgs()); 3557 3558 // If the call returns a temporary with struct return, create a temporary 3559 // alloca to hold the result, unless one is given to us. 3560 Address SRetPtr = Address::invalid(); 3561 size_t UnusedReturnSize = 0; 3562 if (RetAI.isIndirect() || RetAI.isInAlloca() || RetAI.isCoerceAndExpand()) { 3563 if (!ReturnValue.isNull()) { 3564 SRetPtr = ReturnValue.getValue(); 3565 } else { 3566 SRetPtr = CreateMemTemp(RetTy); 3567 if (HaveInsertPoint() && ReturnValue.isUnused()) { 3568 uint64_t size = 3569 CGM.getDataLayout().getTypeAllocSize(ConvertTypeForMem(RetTy)); 3570 if (EmitLifetimeStart(size, SRetPtr.getPointer())) 3571 UnusedReturnSize = size; 3572 } 3573 } 3574 if (IRFunctionArgs.hasSRetArg()) { 3575 IRCallArgs[IRFunctionArgs.getSRetArgNo()] = SRetPtr.getPointer(); 3576 } else if (RetAI.isInAlloca()) { 3577 Address Addr = createInAllocaStructGEP(RetAI.getInAllocaFieldIndex()); 3578 Builder.CreateStore(SRetPtr.getPointer(), Addr); 3579 } 3580 } 3581 3582 Address swiftErrorTemp = Address::invalid(); 3583 Address swiftErrorArg = Address::invalid(); 3584 3585 assert(CallInfo.arg_size() == CallArgs.size() && 3586 "Mismatch between function signature & arguments."); 3587 unsigned ArgNo = 0; 3588 CGFunctionInfo::const_arg_iterator info_it = CallInfo.arg_begin(); 3589 for (CallArgList::const_iterator I = CallArgs.begin(), E = CallArgs.end(); 3590 I != E; ++I, ++info_it, ++ArgNo) { 3591 const ABIArgInfo &ArgInfo = info_it->info; 3592 RValue RV = I->RV; 3593 3594 // Insert a padding argument to ensure proper alignment. 3595 if (IRFunctionArgs.hasPaddingArg(ArgNo)) 3596 IRCallArgs[IRFunctionArgs.getPaddingArgNo(ArgNo)] = 3597 llvm::UndefValue::get(ArgInfo.getPaddingType()); 3598 3599 unsigned FirstIRArg, NumIRArgs; 3600 std::tie(FirstIRArg, NumIRArgs) = IRFunctionArgs.getIRArgs(ArgNo); 3601 3602 switch (ArgInfo.getKind()) { 3603 case ABIArgInfo::InAlloca: { 3604 assert(NumIRArgs == 0); 3605 assert(getTarget().getTriple().getArch() == llvm::Triple::x86); 3606 if (RV.isAggregate()) { 3607 // Replace the placeholder with the appropriate argument slot GEP. 3608 llvm::Instruction *Placeholder = 3609 cast<llvm::Instruction>(RV.getAggregatePointer()); 3610 CGBuilderTy::InsertPoint IP = Builder.saveIP(); 3611 Builder.SetInsertPoint(Placeholder); 3612 Address Addr = createInAllocaStructGEP(ArgInfo.getInAllocaFieldIndex()); 3613 Builder.restoreIP(IP); 3614 deferPlaceholderReplacement(Placeholder, Addr.getPointer()); 3615 } else { 3616 // Store the RValue into the argument struct. 3617 Address Addr = createInAllocaStructGEP(ArgInfo.getInAllocaFieldIndex()); 3618 unsigned AS = Addr.getType()->getPointerAddressSpace(); 3619 llvm::Type *MemType = ConvertTypeForMem(I->Ty)->getPointerTo(AS); 3620 // There are some cases where a trivial bitcast is not avoidable. The 3621 // definition of a type later in a translation unit may change it's type 3622 // from {}* to (%struct.foo*)*. 3623 if (Addr.getType() != MemType) 3624 Addr = Builder.CreateBitCast(Addr, MemType); 3625 LValue argLV = MakeAddrLValue(Addr, I->Ty); 3626 EmitInitStoreOfNonAggregate(*this, RV, argLV); 3627 } 3628 break; 3629 } 3630 3631 case ABIArgInfo::Indirect: { 3632 assert(NumIRArgs == 1); 3633 if (RV.isScalar() || RV.isComplex()) { 3634 // Make a temporary alloca to pass the argument. 3635 Address Addr = CreateMemTemp(I->Ty, ArgInfo.getIndirectAlign()); 3636 IRCallArgs[FirstIRArg] = Addr.getPointer(); 3637 3638 LValue argLV = MakeAddrLValue(Addr, I->Ty); 3639 EmitInitStoreOfNonAggregate(*this, RV, argLV); 3640 } else { 3641 // We want to avoid creating an unnecessary temporary+copy here; 3642 // however, we need one in three cases: 3643 // 1. If the argument is not byval, and we are required to copy the 3644 // source. (This case doesn't occur on any common architecture.) 3645 // 2. If the argument is byval, RV is not sufficiently aligned, and 3646 // we cannot force it to be sufficiently aligned. 3647 // 3. If the argument is byval, but RV is located in an address space 3648 // different than that of the argument (0). 3649 Address Addr = RV.getAggregateAddress(); 3650 CharUnits Align = ArgInfo.getIndirectAlign(); 3651 const llvm::DataLayout *TD = &CGM.getDataLayout(); 3652 const unsigned RVAddrSpace = Addr.getType()->getAddressSpace(); 3653 const unsigned ArgAddrSpace = 3654 (FirstIRArg < IRFuncTy->getNumParams() 3655 ? IRFuncTy->getParamType(FirstIRArg)->getPointerAddressSpace() 3656 : 0); 3657 if ((!ArgInfo.getIndirectByVal() && I->NeedsCopy) || 3658 (ArgInfo.getIndirectByVal() && Addr.getAlignment() < Align && 3659 llvm::getOrEnforceKnownAlignment(Addr.getPointer(), 3660 Align.getQuantity(), *TD) 3661 < Align.getQuantity()) || 3662 (ArgInfo.getIndirectByVal() && (RVAddrSpace != ArgAddrSpace))) { 3663 // Create an aligned temporary, and copy to it. 3664 Address AI = CreateMemTemp(I->Ty, ArgInfo.getIndirectAlign()); 3665 IRCallArgs[FirstIRArg] = AI.getPointer(); 3666 EmitAggregateCopy(AI, Addr, I->Ty, RV.isVolatileQualified()); 3667 } else { 3668 // Skip the extra memcpy call. 3669 IRCallArgs[FirstIRArg] = Addr.getPointer(); 3670 } 3671 } 3672 break; 3673 } 3674 3675 case ABIArgInfo::Ignore: 3676 assert(NumIRArgs == 0); 3677 break; 3678 3679 case ABIArgInfo::Extend: 3680 case ABIArgInfo::Direct: { 3681 if (!isa<llvm::StructType>(ArgInfo.getCoerceToType()) && 3682 ArgInfo.getCoerceToType() == ConvertType(info_it->type) && 3683 ArgInfo.getDirectOffset() == 0) { 3684 assert(NumIRArgs == 1); 3685 llvm::Value *V; 3686 if (RV.isScalar()) 3687 V = RV.getScalarVal(); 3688 else 3689 V = Builder.CreateLoad(RV.getAggregateAddress()); 3690 3691 // Implement swifterror by copying into a new swifterror argument. 3692 // We'll write back in the normal path out of the call. 3693 if (CallInfo.getExtParameterInfo(ArgNo).getABI() 3694 == ParameterABI::SwiftErrorResult) { 3695 assert(!swiftErrorTemp.isValid() && "multiple swifterror args"); 3696 3697 QualType pointeeTy = I->Ty->getPointeeType(); 3698 swiftErrorArg = 3699 Address(V, getContext().getTypeAlignInChars(pointeeTy)); 3700 3701 swiftErrorTemp = 3702 CreateMemTemp(pointeeTy, getPointerAlign(), "swifterror.temp"); 3703 V = swiftErrorTemp.getPointer(); 3704 cast<llvm::AllocaInst>(V)->setSwiftError(true); 3705 3706 llvm::Value *errorValue = Builder.CreateLoad(swiftErrorArg); 3707 Builder.CreateStore(errorValue, swiftErrorTemp); 3708 } 3709 3710 // We might have to widen integers, but we should never truncate. 3711 if (ArgInfo.getCoerceToType() != V->getType() && 3712 V->getType()->isIntegerTy()) 3713 V = Builder.CreateZExt(V, ArgInfo.getCoerceToType()); 3714 3715 // If the argument doesn't match, perform a bitcast to coerce it. This 3716 // can happen due to trivial type mismatches. 3717 if (FirstIRArg < IRFuncTy->getNumParams() && 3718 V->getType() != IRFuncTy->getParamType(FirstIRArg)) 3719 V = Builder.CreateBitCast(V, IRFuncTy->getParamType(FirstIRArg)); 3720 3721 IRCallArgs[FirstIRArg] = V; 3722 break; 3723 } 3724 3725 // FIXME: Avoid the conversion through memory if possible. 3726 Address Src = Address::invalid(); 3727 if (RV.isScalar() || RV.isComplex()) { 3728 Src = CreateMemTemp(I->Ty, "coerce"); 3729 LValue SrcLV = MakeAddrLValue(Src, I->Ty); 3730 EmitInitStoreOfNonAggregate(*this, RV, SrcLV); 3731 } else { 3732 Src = RV.getAggregateAddress(); 3733 } 3734 3735 // If the value is offset in memory, apply the offset now. 3736 Src = emitAddressAtOffset(*this, Src, ArgInfo); 3737 3738 // Fast-isel and the optimizer generally like scalar values better than 3739 // FCAs, so we flatten them if this is safe to do for this argument. 3740 llvm::StructType *STy = 3741 dyn_cast<llvm::StructType>(ArgInfo.getCoerceToType()); 3742 if (STy && ArgInfo.isDirect() && ArgInfo.getCanBeFlattened()) { 3743 llvm::Type *SrcTy = Src.getType()->getElementType(); 3744 uint64_t SrcSize = CGM.getDataLayout().getTypeAllocSize(SrcTy); 3745 uint64_t DstSize = CGM.getDataLayout().getTypeAllocSize(STy); 3746 3747 // If the source type is smaller than the destination type of the 3748 // coerce-to logic, copy the source value into a temp alloca the size 3749 // of the destination type to allow loading all of it. The bits past 3750 // the source value are left undef. 3751 if (SrcSize < DstSize) { 3752 Address TempAlloca 3753 = CreateTempAlloca(STy, Src.getAlignment(), 3754 Src.getName() + ".coerce"); 3755 Builder.CreateMemCpy(TempAlloca, Src, SrcSize); 3756 Src = TempAlloca; 3757 } else { 3758 Src = Builder.CreateBitCast(Src, llvm::PointerType::getUnqual(STy)); 3759 } 3760 3761 auto SrcLayout = CGM.getDataLayout().getStructLayout(STy); 3762 assert(NumIRArgs == STy->getNumElements()); 3763 for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) { 3764 auto Offset = CharUnits::fromQuantity(SrcLayout->getElementOffset(i)); 3765 Address EltPtr = Builder.CreateStructGEP(Src, i, Offset); 3766 llvm::Value *LI = Builder.CreateLoad(EltPtr); 3767 IRCallArgs[FirstIRArg + i] = LI; 3768 } 3769 } else { 3770 // In the simple case, just pass the coerced loaded value. 3771 assert(NumIRArgs == 1); 3772 IRCallArgs[FirstIRArg] = 3773 CreateCoercedLoad(Src, ArgInfo.getCoerceToType(), *this); 3774 } 3775 3776 break; 3777 } 3778 3779 case ABIArgInfo::CoerceAndExpand: { 3780 auto coercionType = ArgInfo.getCoerceAndExpandType(); 3781 auto layout = CGM.getDataLayout().getStructLayout(coercionType); 3782 3783 llvm::Value *tempSize = nullptr; 3784 Address addr = Address::invalid(); 3785 if (RV.isAggregate()) { 3786 addr = RV.getAggregateAddress(); 3787 } else { 3788 assert(RV.isScalar()); // complex should always just be direct 3789 3790 llvm::Type *scalarType = RV.getScalarVal()->getType(); 3791 auto scalarSize = CGM.getDataLayout().getTypeAllocSize(scalarType); 3792 auto scalarAlign = CGM.getDataLayout().getPrefTypeAlignment(scalarType); 3793 3794 tempSize = llvm::ConstantInt::get(CGM.Int64Ty, scalarSize); 3795 3796 // Materialize to a temporary. 3797 addr = CreateTempAlloca(RV.getScalarVal()->getType(), 3798 CharUnits::fromQuantity(std::max(layout->getAlignment(), 3799 scalarAlign))); 3800 EmitLifetimeStart(scalarSize, addr.getPointer()); 3801 3802 Builder.CreateStore(RV.getScalarVal(), addr); 3803 } 3804 3805 addr = Builder.CreateElementBitCast(addr, coercionType); 3806 3807 unsigned IRArgPos = FirstIRArg; 3808 for (unsigned i = 0, e = coercionType->getNumElements(); i != e; ++i) { 3809 llvm::Type *eltType = coercionType->getElementType(i); 3810 if (ABIArgInfo::isPaddingForCoerceAndExpand(eltType)) continue; 3811 Address eltAddr = Builder.CreateStructGEP(addr, i, layout); 3812 llvm::Value *elt = Builder.CreateLoad(eltAddr); 3813 IRCallArgs[IRArgPos++] = elt; 3814 } 3815 assert(IRArgPos == FirstIRArg + NumIRArgs); 3816 3817 if (tempSize) { 3818 EmitLifetimeEnd(tempSize, addr.getPointer()); 3819 } 3820 3821 break; 3822 } 3823 3824 case ABIArgInfo::Expand: 3825 unsigned IRArgPos = FirstIRArg; 3826 ExpandTypeToArgs(I->Ty, RV, IRFuncTy, IRCallArgs, IRArgPos); 3827 assert(IRArgPos == FirstIRArg + NumIRArgs); 3828 break; 3829 } 3830 } 3831 3832 if (ArgMemory.isValid()) { 3833 llvm::Value *Arg = ArgMemory.getPointer(); 3834 if (CallInfo.isVariadic()) { 3835 // When passing non-POD arguments by value to variadic functions, we will 3836 // end up with a variadic prototype and an inalloca call site. In such 3837 // cases, we can't do any parameter mismatch checks. Give up and bitcast 3838 // the callee. 3839 unsigned CalleeAS = 3840 cast<llvm::PointerType>(Callee->getType())->getAddressSpace(); 3841 Callee = Builder.CreateBitCast( 3842 Callee, getTypes().GetFunctionType(CallInfo)->getPointerTo(CalleeAS)); 3843 } else { 3844 llvm::Type *LastParamTy = 3845 IRFuncTy->getParamType(IRFuncTy->getNumParams() - 1); 3846 if (Arg->getType() != LastParamTy) { 3847 #ifndef NDEBUG 3848 // Assert that these structs have equivalent element types. 3849 llvm::StructType *FullTy = CallInfo.getArgStruct(); 3850 llvm::StructType *DeclaredTy = cast<llvm::StructType>( 3851 cast<llvm::PointerType>(LastParamTy)->getElementType()); 3852 assert(DeclaredTy->getNumElements() == FullTy->getNumElements()); 3853 for (llvm::StructType::element_iterator DI = DeclaredTy->element_begin(), 3854 DE = DeclaredTy->element_end(), 3855 FI = FullTy->element_begin(); 3856 DI != DE; ++DI, ++FI) 3857 assert(*DI == *FI); 3858 #endif 3859 Arg = Builder.CreateBitCast(Arg, LastParamTy); 3860 } 3861 } 3862 assert(IRFunctionArgs.hasInallocaArg()); 3863 IRCallArgs[IRFunctionArgs.getInallocaArgNo()] = Arg; 3864 } 3865 3866 if (!CallArgs.getCleanupsToDeactivate().empty()) 3867 deactivateArgCleanupsBeforeCall(*this, CallArgs); 3868 3869 // If the callee is a bitcast of a function to a varargs pointer to function 3870 // type, check to see if we can remove the bitcast. This handles some cases 3871 // with unprototyped functions. 3872 if (llvm::ConstantExpr *CE = dyn_cast<llvm::ConstantExpr>(Callee)) 3873 if (llvm::Function *CalleeF = dyn_cast<llvm::Function>(CE->getOperand(0))) { 3874 llvm::PointerType *CurPT=cast<llvm::PointerType>(Callee->getType()); 3875 llvm::FunctionType *CurFT = 3876 cast<llvm::FunctionType>(CurPT->getElementType()); 3877 llvm::FunctionType *ActualFT = CalleeF->getFunctionType(); 3878 3879 if (CE->getOpcode() == llvm::Instruction::BitCast && 3880 ActualFT->getReturnType() == CurFT->getReturnType() && 3881 ActualFT->getNumParams() == CurFT->getNumParams() && 3882 ActualFT->getNumParams() == IRCallArgs.size() && 3883 (CurFT->isVarArg() || !ActualFT->isVarArg())) { 3884 bool ArgsMatch = true; 3885 for (unsigned i = 0, e = ActualFT->getNumParams(); i != e; ++i) 3886 if (ActualFT->getParamType(i) != CurFT->getParamType(i)) { 3887 ArgsMatch = false; 3888 break; 3889 } 3890 3891 // Strip the cast if we can get away with it. This is a nice cleanup, 3892 // but also allows us to inline the function at -O0 if it is marked 3893 // always_inline. 3894 if (ArgsMatch) 3895 Callee = CalleeF; 3896 } 3897 } 3898 3899 assert(IRCallArgs.size() == IRFuncTy->getNumParams() || IRFuncTy->isVarArg()); 3900 for (unsigned i = 0; i < IRCallArgs.size(); ++i) { 3901 // Inalloca argument can have different type. 3902 if (IRFunctionArgs.hasInallocaArg() && 3903 i == IRFunctionArgs.getInallocaArgNo()) 3904 continue; 3905 if (i < IRFuncTy->getNumParams()) 3906 assert(IRCallArgs[i]->getType() == IRFuncTy->getParamType(i)); 3907 } 3908 3909 unsigned CallingConv; 3910 CodeGen::AttributeListType AttributeList; 3911 CGM.ConstructAttributeList(Callee->getName(), CallInfo, CalleeInfo, 3912 AttributeList, CallingConv, 3913 /*AttrOnCallSite=*/true); 3914 llvm::AttributeSet Attrs = llvm::AttributeSet::get(getLLVMContext(), 3915 AttributeList); 3916 3917 bool CannotThrow; 3918 if (currentFunctionUsesSEHTry()) { 3919 // SEH cares about asynchronous exceptions, everything can "throw." 3920 CannotThrow = false; 3921 } else if (isCleanupPadScope() && 3922 EHPersonality::get(*this).isMSVCXXPersonality()) { 3923 // The MSVC++ personality will implicitly terminate the program if an 3924 // exception is thrown. An unwind edge cannot be reached. 3925 CannotThrow = true; 3926 } else { 3927 // Otherwise, nowunind callsites will never throw. 3928 CannotThrow = Attrs.hasAttribute(llvm::AttributeSet::FunctionIndex, 3929 llvm::Attribute::NoUnwind); 3930 } 3931 llvm::BasicBlock *InvokeDest = CannotThrow ? nullptr : getInvokeDest(); 3932 3933 SmallVector<llvm::OperandBundleDef, 1> BundleList; 3934 getBundlesForFunclet(Callee, CurrentFuncletPad, BundleList); 3935 3936 llvm::CallSite CS; 3937 if (!InvokeDest) { 3938 CS = Builder.CreateCall(Callee, IRCallArgs, BundleList); 3939 } else { 3940 llvm::BasicBlock *Cont = createBasicBlock("invoke.cont"); 3941 CS = Builder.CreateInvoke(Callee, Cont, InvokeDest, IRCallArgs, 3942 BundleList); 3943 EmitBlock(Cont); 3944 } 3945 if (callOrInvoke) 3946 *callOrInvoke = CS.getInstruction(); 3947 3948 if (CurCodeDecl && CurCodeDecl->hasAttr<FlattenAttr>() && 3949 !CS.hasFnAttr(llvm::Attribute::NoInline)) 3950 Attrs = 3951 Attrs.addAttribute(getLLVMContext(), llvm::AttributeSet::FunctionIndex, 3952 llvm::Attribute::AlwaysInline); 3953 3954 // Disable inlining inside SEH __try blocks. 3955 if (isSEHTryScope()) 3956 Attrs = 3957 Attrs.addAttribute(getLLVMContext(), llvm::AttributeSet::FunctionIndex, 3958 llvm::Attribute::NoInline); 3959 3960 CS.setAttributes(Attrs); 3961 CS.setCallingConv(static_cast<llvm::CallingConv::ID>(CallingConv)); 3962 3963 // Insert instrumentation or attach profile metadata at indirect call sites. 3964 // For more details, see the comment before the definition of 3965 // IPVK_IndirectCallTarget in InstrProfData.inc. 3966 if (!CS.getCalledFunction()) 3967 PGO.valueProfile(Builder, llvm::IPVK_IndirectCallTarget, 3968 CS.getInstruction(), Callee); 3969 3970 // In ObjC ARC mode with no ObjC ARC exception safety, tell the ARC 3971 // optimizer it can aggressively ignore unwind edges. 3972 if (CGM.getLangOpts().ObjCAutoRefCount) 3973 AddObjCARCExceptionMetadata(CS.getInstruction()); 3974 3975 // If the call doesn't return, finish the basic block and clear the 3976 // insertion point; this allows the rest of IRgen to discard 3977 // unreachable code. 3978 if (CS.doesNotReturn()) { 3979 if (UnusedReturnSize) 3980 EmitLifetimeEnd(llvm::ConstantInt::get(Int64Ty, UnusedReturnSize), 3981 SRetPtr.getPointer()); 3982 3983 Builder.CreateUnreachable(); 3984 Builder.ClearInsertionPoint(); 3985 3986 // FIXME: For now, emit a dummy basic block because expr emitters in 3987 // generally are not ready to handle emitting expressions at unreachable 3988 // points. 3989 EnsureInsertPoint(); 3990 3991 // Return a reasonable RValue. 3992 return GetUndefRValue(RetTy); 3993 } 3994 3995 llvm::Instruction *CI = CS.getInstruction(); 3996 if (!CI->getType()->isVoidTy()) 3997 CI->setName("call"); 3998 3999 // Perform the swifterror writeback. 4000 if (swiftErrorTemp.isValid()) { 4001 llvm::Value *errorResult = Builder.CreateLoad(swiftErrorTemp); 4002 Builder.CreateStore(errorResult, swiftErrorArg); 4003 } 4004 4005 // Emit any writebacks immediately. Arguably this should happen 4006 // after any return-value munging. 4007 if (CallArgs.hasWritebacks()) 4008 emitWritebacks(*this, CallArgs); 4009 4010 // The stack cleanup for inalloca arguments has to run out of the normal 4011 // lexical order, so deactivate it and run it manually here. 4012 CallArgs.freeArgumentMemory(*this); 4013 4014 if (llvm::CallInst *Call = dyn_cast<llvm::CallInst>(CI)) { 4015 const Decl *TargetDecl = CalleeInfo.getCalleeDecl(); 4016 if (TargetDecl && TargetDecl->hasAttr<NotTailCalledAttr>()) 4017 Call->setTailCallKind(llvm::CallInst::TCK_NoTail); 4018 } 4019 4020 RValue Ret = [&] { 4021 switch (RetAI.getKind()) { 4022 case ABIArgInfo::CoerceAndExpand: { 4023 auto coercionType = RetAI.getCoerceAndExpandType(); 4024 auto layout = CGM.getDataLayout().getStructLayout(coercionType); 4025 4026 Address addr = SRetPtr; 4027 addr = Builder.CreateElementBitCast(addr, coercionType); 4028 4029 assert(CI->getType() == RetAI.getUnpaddedCoerceAndExpandType()); 4030 bool requiresExtract = isa<llvm::StructType>(CI->getType()); 4031 4032 unsigned unpaddedIndex = 0; 4033 for (unsigned i = 0, e = coercionType->getNumElements(); i != e; ++i) { 4034 llvm::Type *eltType = coercionType->getElementType(i); 4035 if (ABIArgInfo::isPaddingForCoerceAndExpand(eltType)) continue; 4036 Address eltAddr = Builder.CreateStructGEP(addr, i, layout); 4037 llvm::Value *elt = CI; 4038 if (requiresExtract) 4039 elt = Builder.CreateExtractValue(elt, unpaddedIndex++); 4040 else 4041 assert(unpaddedIndex == 0); 4042 Builder.CreateStore(elt, eltAddr); 4043 } 4044 // FALLTHROUGH 4045 } 4046 4047 case ABIArgInfo::InAlloca: 4048 case ABIArgInfo::Indirect: { 4049 RValue ret = convertTempToRValue(SRetPtr, RetTy, SourceLocation()); 4050 if (UnusedReturnSize) 4051 EmitLifetimeEnd(llvm::ConstantInt::get(Int64Ty, UnusedReturnSize), 4052 SRetPtr.getPointer()); 4053 return ret; 4054 } 4055 4056 case ABIArgInfo::Ignore: 4057 // If we are ignoring an argument that had a result, make sure to 4058 // construct the appropriate return value for our caller. 4059 return GetUndefRValue(RetTy); 4060 4061 case ABIArgInfo::Extend: 4062 case ABIArgInfo::Direct: { 4063 llvm::Type *RetIRTy = ConvertType(RetTy); 4064 if (RetAI.getCoerceToType() == RetIRTy && RetAI.getDirectOffset() == 0) { 4065 switch (getEvaluationKind(RetTy)) { 4066 case TEK_Complex: { 4067 llvm::Value *Real = Builder.CreateExtractValue(CI, 0); 4068 llvm::Value *Imag = Builder.CreateExtractValue(CI, 1); 4069 return RValue::getComplex(std::make_pair(Real, Imag)); 4070 } 4071 case TEK_Aggregate: { 4072 Address DestPtr = ReturnValue.getValue(); 4073 bool DestIsVolatile = ReturnValue.isVolatile(); 4074 4075 if (!DestPtr.isValid()) { 4076 DestPtr = CreateMemTemp(RetTy, "agg.tmp"); 4077 DestIsVolatile = false; 4078 } 4079 BuildAggStore(*this, CI, DestPtr, DestIsVolatile); 4080 return RValue::getAggregate(DestPtr); 4081 } 4082 case TEK_Scalar: { 4083 // If the argument doesn't match, perform a bitcast to coerce it. This 4084 // can happen due to trivial type mismatches. 4085 llvm::Value *V = CI; 4086 if (V->getType() != RetIRTy) 4087 V = Builder.CreateBitCast(V, RetIRTy); 4088 return RValue::get(V); 4089 } 4090 } 4091 llvm_unreachable("bad evaluation kind"); 4092 } 4093 4094 Address DestPtr = ReturnValue.getValue(); 4095 bool DestIsVolatile = ReturnValue.isVolatile(); 4096 4097 if (!DestPtr.isValid()) { 4098 DestPtr = CreateMemTemp(RetTy, "coerce"); 4099 DestIsVolatile = false; 4100 } 4101 4102 // If the value is offset in memory, apply the offset now. 4103 Address StorePtr = emitAddressAtOffset(*this, DestPtr, RetAI); 4104 CreateCoercedStore(CI, StorePtr, DestIsVolatile, *this); 4105 4106 return convertTempToRValue(DestPtr, RetTy, SourceLocation()); 4107 } 4108 4109 case ABIArgInfo::Expand: 4110 llvm_unreachable("Invalid ABI kind for return argument"); 4111 } 4112 4113 llvm_unreachable("Unhandled ABIArgInfo::Kind"); 4114 } (); 4115 4116 const Decl *TargetDecl = CalleeInfo.getCalleeDecl(); 4117 4118 if (Ret.isScalar() && TargetDecl) { 4119 if (const auto *AA = TargetDecl->getAttr<AssumeAlignedAttr>()) { 4120 llvm::Value *OffsetValue = nullptr; 4121 if (const auto *Offset = AA->getOffset()) 4122 OffsetValue = EmitScalarExpr(Offset); 4123 4124 llvm::Value *Alignment = EmitScalarExpr(AA->getAlignment()); 4125 llvm::ConstantInt *AlignmentCI = cast<llvm::ConstantInt>(Alignment); 4126 EmitAlignmentAssumption(Ret.getScalarVal(), AlignmentCI->getZExtValue(), 4127 OffsetValue); 4128 } 4129 } 4130 4131 return Ret; 4132 } 4133 4134 /* VarArg handling */ 4135 4136 Address CodeGenFunction::EmitVAArg(VAArgExpr *VE, Address &VAListAddr) { 4137 VAListAddr = VE->isMicrosoftABI() 4138 ? EmitMSVAListRef(VE->getSubExpr()) 4139 : EmitVAListRef(VE->getSubExpr()); 4140 QualType Ty = VE->getType(); 4141 if (VE->isMicrosoftABI()) 4142 return CGM.getTypes().getABIInfo().EmitMSVAArg(*this, VAListAddr, Ty); 4143 return CGM.getTypes().getABIInfo().EmitVAArg(*this, VAListAddr, Ty); 4144 } 4145