1 //===--- CGCall.cpp - Encapsulate calling convention details --------------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // These classes wrap the information about a call or function 11 // definition used to handle ABI compliancy. 12 // 13 //===----------------------------------------------------------------------===// 14 15 #include "CGCall.h" 16 #include "ABIInfo.h" 17 #include "CGBlocks.h" 18 #include "CGCXXABI.h" 19 #include "CGCleanup.h" 20 #include "CodeGenFunction.h" 21 #include "CodeGenModule.h" 22 #include "TargetInfo.h" 23 #include "clang/AST/Decl.h" 24 #include "clang/AST/DeclCXX.h" 25 #include "clang/AST/DeclObjC.h" 26 #include "clang/Basic/TargetBuiltins.h" 27 #include "clang/Basic/TargetInfo.h" 28 #include "clang/CodeGen/CGFunctionInfo.h" 29 #include "clang/CodeGen/SwiftCallingConv.h" 30 #include "clang/Frontend/CodeGenOptions.h" 31 #include "llvm/ADT/StringExtras.h" 32 #include "llvm/Analysis/ValueTracking.h" 33 #include "llvm/IR/Attributes.h" 34 #include "llvm/IR/CallingConv.h" 35 #include "llvm/IR/CallSite.h" 36 #include "llvm/IR/DataLayout.h" 37 #include "llvm/IR/InlineAsm.h" 38 #include "llvm/IR/Intrinsics.h" 39 #include "llvm/IR/IntrinsicInst.h" 40 #include "llvm/Transforms/Utils/Local.h" 41 using namespace clang; 42 using namespace CodeGen; 43 44 /***/ 45 46 unsigned CodeGenTypes::ClangCallConvToLLVMCallConv(CallingConv CC) { 47 switch (CC) { 48 default: return llvm::CallingConv::C; 49 case CC_X86StdCall: return llvm::CallingConv::X86_StdCall; 50 case CC_X86FastCall: return llvm::CallingConv::X86_FastCall; 51 case CC_X86RegCall: return llvm::CallingConv::X86_RegCall; 52 case CC_X86ThisCall: return llvm::CallingConv::X86_ThisCall; 53 case CC_X86_64Win64: return llvm::CallingConv::X86_64_Win64; 54 case CC_X86_64SysV: return llvm::CallingConv::X86_64_SysV; 55 case CC_AAPCS: return llvm::CallingConv::ARM_AAPCS; 56 case CC_AAPCS_VFP: return llvm::CallingConv::ARM_AAPCS_VFP; 57 case CC_IntelOclBicc: return llvm::CallingConv::Intel_OCL_BI; 58 // TODO: Add support for __pascal to LLVM. 59 case CC_X86Pascal: return llvm::CallingConv::C; 60 // TODO: Add support for __vectorcall to LLVM. 61 case CC_X86VectorCall: return llvm::CallingConv::X86_VectorCall; 62 case CC_SpirFunction: return llvm::CallingConv::SPIR_FUNC; 63 case CC_OpenCLKernel: return CGM.getTargetCodeGenInfo().getOpenCLKernelCallingConv(); 64 case CC_PreserveMost: return llvm::CallingConv::PreserveMost; 65 case CC_PreserveAll: return llvm::CallingConv::PreserveAll; 66 case CC_Swift: return llvm::CallingConv::Swift; 67 } 68 } 69 70 /// Derives the 'this' type for codegen purposes, i.e. ignoring method 71 /// qualification. 72 /// FIXME: address space qualification? 73 static CanQualType GetThisType(ASTContext &Context, const CXXRecordDecl *RD) { 74 QualType RecTy = Context.getTagDeclType(RD)->getCanonicalTypeInternal(); 75 return Context.getPointerType(CanQualType::CreateUnsafe(RecTy)); 76 } 77 78 /// Returns the canonical formal type of the given C++ method. 79 static CanQual<FunctionProtoType> GetFormalType(const CXXMethodDecl *MD) { 80 return MD->getType()->getCanonicalTypeUnqualified() 81 .getAs<FunctionProtoType>(); 82 } 83 84 /// Returns the "extra-canonicalized" return type, which discards 85 /// qualifiers on the return type. Codegen doesn't care about them, 86 /// and it makes ABI code a little easier to be able to assume that 87 /// all parameter and return types are top-level unqualified. 88 static CanQualType GetReturnType(QualType RetTy) { 89 return RetTy->getCanonicalTypeUnqualified().getUnqualifiedType(); 90 } 91 92 /// Arrange the argument and result information for a value of the given 93 /// unprototyped freestanding function type. 94 const CGFunctionInfo & 95 CodeGenTypes::arrangeFreeFunctionType(CanQual<FunctionNoProtoType> FTNP) { 96 // When translating an unprototyped function type, always use a 97 // variadic type. 98 return arrangeLLVMFunctionInfo(FTNP->getReturnType().getUnqualifiedType(), 99 /*instanceMethod=*/false, 100 /*chainCall=*/false, None, 101 FTNP->getExtInfo(), {}, RequiredArgs(0)); 102 } 103 104 /// Adds the formal paramaters in FPT to the given prefix. If any parameter in 105 /// FPT has pass_object_size attrs, then we'll add parameters for those, too. 106 static void appendParameterTypes(const CodeGenTypes &CGT, 107 SmallVectorImpl<CanQualType> &prefix, 108 SmallVectorImpl<FunctionProtoType::ExtParameterInfo> ¶mInfos, 109 CanQual<FunctionProtoType> FPT, 110 const FunctionDecl *FD) { 111 // Fill out paramInfos. 112 if (FPT->hasExtParameterInfos() || !paramInfos.empty()) { 113 assert(paramInfos.size() <= prefix.size()); 114 auto protoParamInfos = FPT->getExtParameterInfos(); 115 paramInfos.reserve(prefix.size() + protoParamInfos.size()); 116 paramInfos.resize(prefix.size()); 117 paramInfos.append(protoParamInfos.begin(), protoParamInfos.end()); 118 } 119 120 // Fast path: unknown target. 121 if (FD == nullptr) { 122 prefix.append(FPT->param_type_begin(), FPT->param_type_end()); 123 return; 124 } 125 126 // In the vast majority cases, we'll have precisely FPT->getNumParams() 127 // parameters; the only thing that can change this is the presence of 128 // pass_object_size. So, we preallocate for the common case. 129 prefix.reserve(prefix.size() + FPT->getNumParams()); 130 131 assert(FD->getNumParams() == FPT->getNumParams()); 132 for (unsigned I = 0, E = FPT->getNumParams(); I != E; ++I) { 133 prefix.push_back(FPT->getParamType(I)); 134 if (FD->getParamDecl(I)->hasAttr<PassObjectSizeAttr>()) 135 prefix.push_back(CGT.getContext().getSizeType()); 136 } 137 } 138 139 /// Arrange the LLVM function layout for a value of the given function 140 /// type, on top of any implicit parameters already stored. 141 static const CGFunctionInfo & 142 arrangeLLVMFunctionInfo(CodeGenTypes &CGT, bool instanceMethod, 143 SmallVectorImpl<CanQualType> &prefix, 144 CanQual<FunctionProtoType> FTP, 145 const FunctionDecl *FD) { 146 SmallVector<FunctionProtoType::ExtParameterInfo, 16> paramInfos; 147 RequiredArgs Required = 148 RequiredArgs::forPrototypePlus(FTP, prefix.size(), FD); 149 // FIXME: Kill copy. 150 appendParameterTypes(CGT, prefix, paramInfos, FTP, FD); 151 CanQualType resultType = FTP->getReturnType().getUnqualifiedType(); 152 153 return CGT.arrangeLLVMFunctionInfo(resultType, instanceMethod, 154 /*chainCall=*/false, prefix, 155 FTP->getExtInfo(), paramInfos, 156 Required); 157 } 158 159 /// Arrange the argument and result information for a value of the 160 /// given freestanding function type. 161 const CGFunctionInfo & 162 CodeGenTypes::arrangeFreeFunctionType(CanQual<FunctionProtoType> FTP, 163 const FunctionDecl *FD) { 164 SmallVector<CanQualType, 16> argTypes; 165 return ::arrangeLLVMFunctionInfo(*this, /*instanceMethod=*/false, argTypes, 166 FTP, FD); 167 } 168 169 static CallingConv getCallingConventionForDecl(const Decl *D, bool IsWindows) { 170 // Set the appropriate calling convention for the Function. 171 if (D->hasAttr<StdCallAttr>()) 172 return CC_X86StdCall; 173 174 if (D->hasAttr<FastCallAttr>()) 175 return CC_X86FastCall; 176 177 if (D->hasAttr<RegCallAttr>()) 178 return CC_X86RegCall; 179 180 if (D->hasAttr<ThisCallAttr>()) 181 return CC_X86ThisCall; 182 183 if (D->hasAttr<VectorCallAttr>()) 184 return CC_X86VectorCall; 185 186 if (D->hasAttr<PascalAttr>()) 187 return CC_X86Pascal; 188 189 if (PcsAttr *PCS = D->getAttr<PcsAttr>()) 190 return (PCS->getPCS() == PcsAttr::AAPCS ? CC_AAPCS : CC_AAPCS_VFP); 191 192 if (D->hasAttr<IntelOclBiccAttr>()) 193 return CC_IntelOclBicc; 194 195 if (D->hasAttr<MSABIAttr>()) 196 return IsWindows ? CC_C : CC_X86_64Win64; 197 198 if (D->hasAttr<SysVABIAttr>()) 199 return IsWindows ? CC_X86_64SysV : CC_C; 200 201 if (D->hasAttr<PreserveMostAttr>()) 202 return CC_PreserveMost; 203 204 if (D->hasAttr<PreserveAllAttr>()) 205 return CC_PreserveAll; 206 207 return CC_C; 208 } 209 210 /// Arrange the argument and result information for a call to an 211 /// unknown C++ non-static member function of the given abstract type. 212 /// (Zero value of RD means we don't have any meaningful "this" argument type, 213 /// so fall back to a generic pointer type). 214 /// The member function must be an ordinary function, i.e. not a 215 /// constructor or destructor. 216 const CGFunctionInfo & 217 CodeGenTypes::arrangeCXXMethodType(const CXXRecordDecl *RD, 218 const FunctionProtoType *FTP, 219 const CXXMethodDecl *MD) { 220 SmallVector<CanQualType, 16> argTypes; 221 222 // Add the 'this' pointer. 223 if (RD) 224 argTypes.push_back(GetThisType(Context, RD)); 225 else 226 argTypes.push_back(Context.VoidPtrTy); 227 228 return ::arrangeLLVMFunctionInfo( 229 *this, true, argTypes, 230 FTP->getCanonicalTypeUnqualified().getAs<FunctionProtoType>(), MD); 231 } 232 233 /// Arrange the argument and result information for a declaration or 234 /// definition of the given C++ non-static member function. The 235 /// member function must be an ordinary function, i.e. not a 236 /// constructor or destructor. 237 const CGFunctionInfo & 238 CodeGenTypes::arrangeCXXMethodDeclaration(const CXXMethodDecl *MD) { 239 assert(!isa<CXXConstructorDecl>(MD) && "wrong method for constructors!"); 240 assert(!isa<CXXDestructorDecl>(MD) && "wrong method for destructors!"); 241 242 CanQual<FunctionProtoType> prototype = GetFormalType(MD); 243 244 if (MD->isInstance()) { 245 // The abstract case is perfectly fine. 246 const CXXRecordDecl *ThisType = TheCXXABI.getThisArgumentTypeForMethod(MD); 247 return arrangeCXXMethodType(ThisType, prototype.getTypePtr(), MD); 248 } 249 250 return arrangeFreeFunctionType(prototype, MD); 251 } 252 253 bool CodeGenTypes::inheritingCtorHasParams( 254 const InheritedConstructor &Inherited, CXXCtorType Type) { 255 // Parameters are unnecessary if we're constructing a base class subobject 256 // and the inherited constructor lives in a virtual base. 257 return Type == Ctor_Complete || 258 !Inherited.getShadowDecl()->constructsVirtualBase() || 259 !Target.getCXXABI().hasConstructorVariants(); 260 } 261 262 const CGFunctionInfo & 263 CodeGenTypes::arrangeCXXStructorDeclaration(const CXXMethodDecl *MD, 264 StructorType Type) { 265 266 SmallVector<CanQualType, 16> argTypes; 267 SmallVector<FunctionProtoType::ExtParameterInfo, 16> paramInfos; 268 argTypes.push_back(GetThisType(Context, MD->getParent())); 269 270 bool PassParams = true; 271 272 GlobalDecl GD; 273 if (auto *CD = dyn_cast<CXXConstructorDecl>(MD)) { 274 GD = GlobalDecl(CD, toCXXCtorType(Type)); 275 276 // A base class inheriting constructor doesn't get forwarded arguments 277 // needed to construct a virtual base (or base class thereof). 278 if (auto Inherited = CD->getInheritedConstructor()) 279 PassParams = inheritingCtorHasParams(Inherited, toCXXCtorType(Type)); 280 } else { 281 auto *DD = dyn_cast<CXXDestructorDecl>(MD); 282 GD = GlobalDecl(DD, toCXXDtorType(Type)); 283 } 284 285 CanQual<FunctionProtoType> FTP = GetFormalType(MD); 286 287 // Add the formal parameters. 288 if (PassParams) 289 appendParameterTypes(*this, argTypes, paramInfos, FTP, MD); 290 291 TheCXXABI.buildStructorSignature(MD, Type, argTypes); 292 293 RequiredArgs required = 294 (PassParams && MD->isVariadic() ? RequiredArgs(argTypes.size()) 295 : RequiredArgs::All); 296 297 FunctionType::ExtInfo extInfo = FTP->getExtInfo(); 298 CanQualType resultType = TheCXXABI.HasThisReturn(GD) 299 ? argTypes.front() 300 : TheCXXABI.hasMostDerivedReturn(GD) 301 ? CGM.getContext().VoidPtrTy 302 : Context.VoidTy; 303 return arrangeLLVMFunctionInfo(resultType, /*instanceMethod=*/true, 304 /*chainCall=*/false, argTypes, extInfo, 305 paramInfos, required); 306 } 307 308 static SmallVector<CanQualType, 16> 309 getArgTypesForCall(ASTContext &ctx, const CallArgList &args) { 310 SmallVector<CanQualType, 16> argTypes; 311 for (auto &arg : args) 312 argTypes.push_back(ctx.getCanonicalParamType(arg.Ty)); 313 return argTypes; 314 } 315 316 static SmallVector<CanQualType, 16> 317 getArgTypesForDeclaration(ASTContext &ctx, const FunctionArgList &args) { 318 SmallVector<CanQualType, 16> argTypes; 319 for (auto &arg : args) 320 argTypes.push_back(ctx.getCanonicalParamType(arg->getType())); 321 return argTypes; 322 } 323 324 static void addExtParameterInfosForCall( 325 llvm::SmallVectorImpl<FunctionProtoType::ExtParameterInfo> ¶mInfos, 326 const FunctionProtoType *proto, 327 unsigned prefixArgs, 328 unsigned totalArgs) { 329 assert(proto->hasExtParameterInfos()); 330 assert(paramInfos.size() <= prefixArgs); 331 assert(proto->getNumParams() + prefixArgs <= totalArgs); 332 333 // Add default infos for any prefix args that don't already have infos. 334 paramInfos.resize(prefixArgs); 335 336 // Add infos for the prototype. 337 auto protoInfos = proto->getExtParameterInfos(); 338 paramInfos.append(protoInfos.begin(), protoInfos.end()); 339 340 // Add default infos for the variadic arguments. 341 paramInfos.resize(totalArgs); 342 } 343 344 static llvm::SmallVector<FunctionProtoType::ExtParameterInfo, 16> 345 getExtParameterInfosForCall(const FunctionProtoType *proto, 346 unsigned prefixArgs, unsigned totalArgs) { 347 llvm::SmallVector<FunctionProtoType::ExtParameterInfo, 16> result; 348 if (proto->hasExtParameterInfos()) { 349 addExtParameterInfosForCall(result, proto, prefixArgs, totalArgs); 350 } 351 return result; 352 } 353 354 /// Arrange a call to a C++ method, passing the given arguments. 355 const CGFunctionInfo & 356 CodeGenTypes::arrangeCXXConstructorCall(const CallArgList &args, 357 const CXXConstructorDecl *D, 358 CXXCtorType CtorKind, 359 unsigned ExtraArgs) { 360 // FIXME: Kill copy. 361 SmallVector<CanQualType, 16> ArgTypes; 362 for (const auto &Arg : args) 363 ArgTypes.push_back(Context.getCanonicalParamType(Arg.Ty)); 364 365 CanQual<FunctionProtoType> FPT = GetFormalType(D); 366 RequiredArgs Required = RequiredArgs::forPrototypePlus(FPT, 1 + ExtraArgs, D); 367 GlobalDecl GD(D, CtorKind); 368 CanQualType ResultType = TheCXXABI.HasThisReturn(GD) 369 ? ArgTypes.front() 370 : TheCXXABI.hasMostDerivedReturn(GD) 371 ? CGM.getContext().VoidPtrTy 372 : Context.VoidTy; 373 374 FunctionType::ExtInfo Info = FPT->getExtInfo(); 375 auto ParamInfos = getExtParameterInfosForCall(FPT.getTypePtr(), 1 + ExtraArgs, 376 ArgTypes.size()); 377 return arrangeLLVMFunctionInfo(ResultType, /*instanceMethod=*/true, 378 /*chainCall=*/false, ArgTypes, Info, 379 ParamInfos, Required); 380 } 381 382 /// Arrange the argument and result information for the declaration or 383 /// definition of the given function. 384 const CGFunctionInfo & 385 CodeGenTypes::arrangeFunctionDeclaration(const FunctionDecl *FD) { 386 if (const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(FD)) 387 if (MD->isInstance()) 388 return arrangeCXXMethodDeclaration(MD); 389 390 CanQualType FTy = FD->getType()->getCanonicalTypeUnqualified(); 391 392 assert(isa<FunctionType>(FTy)); 393 394 // When declaring a function without a prototype, always use a 395 // non-variadic type. 396 if (CanQual<FunctionNoProtoType> noProto = FTy.getAs<FunctionNoProtoType>()) { 397 return arrangeLLVMFunctionInfo( 398 noProto->getReturnType(), /*instanceMethod=*/false, 399 /*chainCall=*/false, None, noProto->getExtInfo(), {},RequiredArgs::All); 400 } 401 402 return arrangeFreeFunctionType(FTy.castAs<FunctionProtoType>(), FD); 403 } 404 405 /// Arrange the argument and result information for the declaration or 406 /// definition of an Objective-C method. 407 const CGFunctionInfo & 408 CodeGenTypes::arrangeObjCMethodDeclaration(const ObjCMethodDecl *MD) { 409 // It happens that this is the same as a call with no optional 410 // arguments, except also using the formal 'self' type. 411 return arrangeObjCMessageSendSignature(MD, MD->getSelfDecl()->getType()); 412 } 413 414 /// Arrange the argument and result information for the function type 415 /// through which to perform a send to the given Objective-C method, 416 /// using the given receiver type. The receiver type is not always 417 /// the 'self' type of the method or even an Objective-C pointer type. 418 /// This is *not* the right method for actually performing such a 419 /// message send, due to the possibility of optional arguments. 420 const CGFunctionInfo & 421 CodeGenTypes::arrangeObjCMessageSendSignature(const ObjCMethodDecl *MD, 422 QualType receiverType) { 423 SmallVector<CanQualType, 16> argTys; 424 argTys.push_back(Context.getCanonicalParamType(receiverType)); 425 argTys.push_back(Context.getCanonicalParamType(Context.getObjCSelType())); 426 // FIXME: Kill copy? 427 for (const auto *I : MD->parameters()) { 428 argTys.push_back(Context.getCanonicalParamType(I->getType())); 429 } 430 431 FunctionType::ExtInfo einfo; 432 bool IsWindows = getContext().getTargetInfo().getTriple().isOSWindows(); 433 einfo = einfo.withCallingConv(getCallingConventionForDecl(MD, IsWindows)); 434 435 if (getContext().getLangOpts().ObjCAutoRefCount && 436 MD->hasAttr<NSReturnsRetainedAttr>()) 437 einfo = einfo.withProducesResult(true); 438 439 RequiredArgs required = 440 (MD->isVariadic() ? RequiredArgs(argTys.size()) : RequiredArgs::All); 441 442 return arrangeLLVMFunctionInfo( 443 GetReturnType(MD->getReturnType()), /*instanceMethod=*/false, 444 /*chainCall=*/false, argTys, einfo, {}, required); 445 } 446 447 const CGFunctionInfo & 448 CodeGenTypes::arrangeUnprototypedObjCMessageSend(QualType returnType, 449 const CallArgList &args) { 450 auto argTypes = getArgTypesForCall(Context, args); 451 FunctionType::ExtInfo einfo; 452 453 return arrangeLLVMFunctionInfo( 454 GetReturnType(returnType), /*instanceMethod=*/false, 455 /*chainCall=*/false, argTypes, einfo, {}, RequiredArgs::All); 456 } 457 458 const CGFunctionInfo & 459 CodeGenTypes::arrangeGlobalDeclaration(GlobalDecl GD) { 460 // FIXME: Do we need to handle ObjCMethodDecl? 461 const FunctionDecl *FD = cast<FunctionDecl>(GD.getDecl()); 462 463 if (const CXXConstructorDecl *CD = dyn_cast<CXXConstructorDecl>(FD)) 464 return arrangeCXXStructorDeclaration(CD, getFromCtorType(GD.getCtorType())); 465 466 if (const CXXDestructorDecl *DD = dyn_cast<CXXDestructorDecl>(FD)) 467 return arrangeCXXStructorDeclaration(DD, getFromDtorType(GD.getDtorType())); 468 469 return arrangeFunctionDeclaration(FD); 470 } 471 472 /// Arrange a thunk that takes 'this' as the first parameter followed by 473 /// varargs. Return a void pointer, regardless of the actual return type. 474 /// The body of the thunk will end in a musttail call to a function of the 475 /// correct type, and the caller will bitcast the function to the correct 476 /// prototype. 477 const CGFunctionInfo & 478 CodeGenTypes::arrangeMSMemberPointerThunk(const CXXMethodDecl *MD) { 479 assert(MD->isVirtual() && "only virtual memptrs have thunks"); 480 CanQual<FunctionProtoType> FTP = GetFormalType(MD); 481 CanQualType ArgTys[] = { GetThisType(Context, MD->getParent()) }; 482 return arrangeLLVMFunctionInfo(Context.VoidTy, /*instanceMethod=*/false, 483 /*chainCall=*/false, ArgTys, 484 FTP->getExtInfo(), {}, RequiredArgs(1)); 485 } 486 487 const CGFunctionInfo & 488 CodeGenTypes::arrangeMSCtorClosure(const CXXConstructorDecl *CD, 489 CXXCtorType CT) { 490 assert(CT == Ctor_CopyingClosure || CT == Ctor_DefaultClosure); 491 492 CanQual<FunctionProtoType> FTP = GetFormalType(CD); 493 SmallVector<CanQualType, 2> ArgTys; 494 const CXXRecordDecl *RD = CD->getParent(); 495 ArgTys.push_back(GetThisType(Context, RD)); 496 if (CT == Ctor_CopyingClosure) 497 ArgTys.push_back(*FTP->param_type_begin()); 498 if (RD->getNumVBases() > 0) 499 ArgTys.push_back(Context.IntTy); 500 CallingConv CC = Context.getDefaultCallingConvention( 501 /*IsVariadic=*/false, /*IsCXXMethod=*/true); 502 return arrangeLLVMFunctionInfo(Context.VoidTy, /*instanceMethod=*/true, 503 /*chainCall=*/false, ArgTys, 504 FunctionType::ExtInfo(CC), {}, 505 RequiredArgs::All); 506 } 507 508 /// Arrange a call as unto a free function, except possibly with an 509 /// additional number of formal parameters considered required. 510 static const CGFunctionInfo & 511 arrangeFreeFunctionLikeCall(CodeGenTypes &CGT, 512 CodeGenModule &CGM, 513 const CallArgList &args, 514 const FunctionType *fnType, 515 unsigned numExtraRequiredArgs, 516 bool chainCall) { 517 assert(args.size() >= numExtraRequiredArgs); 518 519 llvm::SmallVector<FunctionProtoType::ExtParameterInfo, 16> paramInfos; 520 521 // In most cases, there are no optional arguments. 522 RequiredArgs required = RequiredArgs::All; 523 524 // If we have a variadic prototype, the required arguments are the 525 // extra prefix plus the arguments in the prototype. 526 if (const FunctionProtoType *proto = dyn_cast<FunctionProtoType>(fnType)) { 527 if (proto->isVariadic()) 528 required = RequiredArgs(proto->getNumParams() + numExtraRequiredArgs); 529 530 if (proto->hasExtParameterInfos()) 531 addExtParameterInfosForCall(paramInfos, proto, numExtraRequiredArgs, 532 args.size()); 533 534 // If we don't have a prototype at all, but we're supposed to 535 // explicitly use the variadic convention for unprototyped calls, 536 // treat all of the arguments as required but preserve the nominal 537 // possibility of variadics. 538 } else if (CGM.getTargetCodeGenInfo() 539 .isNoProtoCallVariadic(args, 540 cast<FunctionNoProtoType>(fnType))) { 541 required = RequiredArgs(args.size()); 542 } 543 544 // FIXME: Kill copy. 545 SmallVector<CanQualType, 16> argTypes; 546 for (const auto &arg : args) 547 argTypes.push_back(CGT.getContext().getCanonicalParamType(arg.Ty)); 548 return CGT.arrangeLLVMFunctionInfo(GetReturnType(fnType->getReturnType()), 549 /*instanceMethod=*/false, chainCall, 550 argTypes, fnType->getExtInfo(), paramInfos, 551 required); 552 } 553 554 /// Figure out the rules for calling a function with the given formal 555 /// type using the given arguments. The arguments are necessary 556 /// because the function might be unprototyped, in which case it's 557 /// target-dependent in crazy ways. 558 const CGFunctionInfo & 559 CodeGenTypes::arrangeFreeFunctionCall(const CallArgList &args, 560 const FunctionType *fnType, 561 bool chainCall) { 562 return arrangeFreeFunctionLikeCall(*this, CGM, args, fnType, 563 chainCall ? 1 : 0, chainCall); 564 } 565 566 /// A block function is essentially a free function with an 567 /// extra implicit argument. 568 const CGFunctionInfo & 569 CodeGenTypes::arrangeBlockFunctionCall(const CallArgList &args, 570 const FunctionType *fnType) { 571 return arrangeFreeFunctionLikeCall(*this, CGM, args, fnType, 1, 572 /*chainCall=*/false); 573 } 574 575 const CGFunctionInfo & 576 CodeGenTypes::arrangeBlockFunctionDeclaration(const FunctionProtoType *proto, 577 const FunctionArgList ¶ms) { 578 auto paramInfos = getExtParameterInfosForCall(proto, 1, params.size()); 579 auto argTypes = getArgTypesForDeclaration(Context, params); 580 581 return arrangeLLVMFunctionInfo( 582 GetReturnType(proto->getReturnType()), 583 /*instanceMethod*/ false, /*chainCall*/ false, argTypes, 584 proto->getExtInfo(), paramInfos, 585 RequiredArgs::forPrototypePlus(proto, 1, nullptr)); 586 } 587 588 const CGFunctionInfo & 589 CodeGenTypes::arrangeBuiltinFunctionCall(QualType resultType, 590 const CallArgList &args) { 591 // FIXME: Kill copy. 592 SmallVector<CanQualType, 16> argTypes; 593 for (const auto &Arg : args) 594 argTypes.push_back(Context.getCanonicalParamType(Arg.Ty)); 595 return arrangeLLVMFunctionInfo( 596 GetReturnType(resultType), /*instanceMethod=*/false, 597 /*chainCall=*/false, argTypes, FunctionType::ExtInfo(), 598 /*paramInfos=*/ {}, RequiredArgs::All); 599 } 600 601 const CGFunctionInfo & 602 CodeGenTypes::arrangeBuiltinFunctionDeclaration(QualType resultType, 603 const FunctionArgList &args) { 604 auto argTypes = getArgTypesForDeclaration(Context, args); 605 606 return arrangeLLVMFunctionInfo( 607 GetReturnType(resultType), /*instanceMethod=*/false, /*chainCall=*/false, 608 argTypes, FunctionType::ExtInfo(), {}, RequiredArgs::All); 609 } 610 611 const CGFunctionInfo & 612 CodeGenTypes::arrangeBuiltinFunctionDeclaration(CanQualType resultType, 613 ArrayRef<CanQualType> argTypes) { 614 return arrangeLLVMFunctionInfo( 615 resultType, /*instanceMethod=*/false, /*chainCall=*/false, 616 argTypes, FunctionType::ExtInfo(), {}, RequiredArgs::All); 617 } 618 619 /// Arrange a call to a C++ method, passing the given arguments. 620 const CGFunctionInfo & 621 CodeGenTypes::arrangeCXXMethodCall(const CallArgList &args, 622 const FunctionProtoType *proto, 623 RequiredArgs required) { 624 unsigned numRequiredArgs = 625 (proto->isVariadic() ? required.getNumRequiredArgs() : args.size()); 626 unsigned numPrefixArgs = numRequiredArgs - proto->getNumParams(); 627 auto paramInfos = 628 getExtParameterInfosForCall(proto, numPrefixArgs, args.size()); 629 630 // FIXME: Kill copy. 631 auto argTypes = getArgTypesForCall(Context, args); 632 633 FunctionType::ExtInfo info = proto->getExtInfo(); 634 return arrangeLLVMFunctionInfo( 635 GetReturnType(proto->getReturnType()), /*instanceMethod=*/true, 636 /*chainCall=*/false, argTypes, info, paramInfos, required); 637 } 638 639 const CGFunctionInfo &CodeGenTypes::arrangeNullaryFunction() { 640 return arrangeLLVMFunctionInfo( 641 getContext().VoidTy, /*instanceMethod=*/false, /*chainCall=*/false, 642 None, FunctionType::ExtInfo(), {}, RequiredArgs::All); 643 } 644 645 const CGFunctionInfo & 646 CodeGenTypes::arrangeCall(const CGFunctionInfo &signature, 647 const CallArgList &args) { 648 assert(signature.arg_size() <= args.size()); 649 if (signature.arg_size() == args.size()) 650 return signature; 651 652 SmallVector<FunctionProtoType::ExtParameterInfo, 16> paramInfos; 653 auto sigParamInfos = signature.getExtParameterInfos(); 654 if (!sigParamInfos.empty()) { 655 paramInfos.append(sigParamInfos.begin(), sigParamInfos.end()); 656 paramInfos.resize(args.size()); 657 } 658 659 auto argTypes = getArgTypesForCall(Context, args); 660 661 assert(signature.getRequiredArgs().allowsOptionalArgs()); 662 return arrangeLLVMFunctionInfo(signature.getReturnType(), 663 signature.isInstanceMethod(), 664 signature.isChainCall(), 665 argTypes, 666 signature.getExtInfo(), 667 paramInfos, 668 signature.getRequiredArgs()); 669 } 670 671 /// Arrange the argument and result information for an abstract value 672 /// of a given function type. This is the method which all of the 673 /// above functions ultimately defer to. 674 const CGFunctionInfo & 675 CodeGenTypes::arrangeLLVMFunctionInfo(CanQualType resultType, 676 bool instanceMethod, 677 bool chainCall, 678 ArrayRef<CanQualType> argTypes, 679 FunctionType::ExtInfo info, 680 ArrayRef<FunctionProtoType::ExtParameterInfo> paramInfos, 681 RequiredArgs required) { 682 assert(std::all_of(argTypes.begin(), argTypes.end(), 683 std::mem_fun_ref(&CanQualType::isCanonicalAsParam))); 684 685 // Lookup or create unique function info. 686 llvm::FoldingSetNodeID ID; 687 CGFunctionInfo::Profile(ID, instanceMethod, chainCall, info, paramInfos, 688 required, resultType, argTypes); 689 690 void *insertPos = nullptr; 691 CGFunctionInfo *FI = FunctionInfos.FindNodeOrInsertPos(ID, insertPos); 692 if (FI) 693 return *FI; 694 695 unsigned CC = ClangCallConvToLLVMCallConv(info.getCC()); 696 697 // Construct the function info. We co-allocate the ArgInfos. 698 FI = CGFunctionInfo::create(CC, instanceMethod, chainCall, info, 699 paramInfos, resultType, argTypes, required); 700 FunctionInfos.InsertNode(FI, insertPos); 701 702 bool inserted = FunctionsBeingProcessed.insert(FI).second; 703 (void)inserted; 704 assert(inserted && "Recursively being processed?"); 705 706 // Compute ABI information. 707 if (info.getCC() != CC_Swift) { 708 getABIInfo().computeInfo(*FI); 709 } else { 710 swiftcall::computeABIInfo(CGM, *FI); 711 } 712 713 // Loop over all of the computed argument and return value info. If any of 714 // them are direct or extend without a specified coerce type, specify the 715 // default now. 716 ABIArgInfo &retInfo = FI->getReturnInfo(); 717 if (retInfo.canHaveCoerceToType() && retInfo.getCoerceToType() == nullptr) 718 retInfo.setCoerceToType(ConvertType(FI->getReturnType())); 719 720 for (auto &I : FI->arguments()) 721 if (I.info.canHaveCoerceToType() && I.info.getCoerceToType() == nullptr) 722 I.info.setCoerceToType(ConvertType(I.type)); 723 724 bool erased = FunctionsBeingProcessed.erase(FI); (void)erased; 725 assert(erased && "Not in set?"); 726 727 return *FI; 728 } 729 730 CGFunctionInfo *CGFunctionInfo::create(unsigned llvmCC, 731 bool instanceMethod, 732 bool chainCall, 733 const FunctionType::ExtInfo &info, 734 ArrayRef<ExtParameterInfo> paramInfos, 735 CanQualType resultType, 736 ArrayRef<CanQualType> argTypes, 737 RequiredArgs required) { 738 assert(paramInfos.empty() || paramInfos.size() == argTypes.size()); 739 740 void *buffer = 741 operator new(totalSizeToAlloc<ArgInfo, ExtParameterInfo>( 742 argTypes.size() + 1, paramInfos.size())); 743 744 CGFunctionInfo *FI = new(buffer) CGFunctionInfo(); 745 FI->CallingConvention = llvmCC; 746 FI->EffectiveCallingConvention = llvmCC; 747 FI->ASTCallingConvention = info.getCC(); 748 FI->InstanceMethod = instanceMethod; 749 FI->ChainCall = chainCall; 750 FI->NoReturn = info.getNoReturn(); 751 FI->ReturnsRetained = info.getProducesResult(); 752 FI->Required = required; 753 FI->HasRegParm = info.getHasRegParm(); 754 FI->RegParm = info.getRegParm(); 755 FI->ArgStruct = nullptr; 756 FI->ArgStructAlign = 0; 757 FI->NumArgs = argTypes.size(); 758 FI->HasExtParameterInfos = !paramInfos.empty(); 759 FI->getArgsBuffer()[0].type = resultType; 760 for (unsigned i = 0, e = argTypes.size(); i != e; ++i) 761 FI->getArgsBuffer()[i + 1].type = argTypes[i]; 762 for (unsigned i = 0, e = paramInfos.size(); i != e; ++i) 763 FI->getExtParameterInfosBuffer()[i] = paramInfos[i]; 764 return FI; 765 } 766 767 /***/ 768 769 namespace { 770 // ABIArgInfo::Expand implementation. 771 772 // Specifies the way QualType passed as ABIArgInfo::Expand is expanded. 773 struct TypeExpansion { 774 enum TypeExpansionKind { 775 // Elements of constant arrays are expanded recursively. 776 TEK_ConstantArray, 777 // Record fields are expanded recursively (but if record is a union, only 778 // the field with the largest size is expanded). 779 TEK_Record, 780 // For complex types, real and imaginary parts are expanded recursively. 781 TEK_Complex, 782 // All other types are not expandable. 783 TEK_None 784 }; 785 786 const TypeExpansionKind Kind; 787 788 TypeExpansion(TypeExpansionKind K) : Kind(K) {} 789 virtual ~TypeExpansion() {} 790 }; 791 792 struct ConstantArrayExpansion : TypeExpansion { 793 QualType EltTy; 794 uint64_t NumElts; 795 796 ConstantArrayExpansion(QualType EltTy, uint64_t NumElts) 797 : TypeExpansion(TEK_ConstantArray), EltTy(EltTy), NumElts(NumElts) {} 798 static bool classof(const TypeExpansion *TE) { 799 return TE->Kind == TEK_ConstantArray; 800 } 801 }; 802 803 struct RecordExpansion : TypeExpansion { 804 SmallVector<const CXXBaseSpecifier *, 1> Bases; 805 806 SmallVector<const FieldDecl *, 1> Fields; 807 808 RecordExpansion(SmallVector<const CXXBaseSpecifier *, 1> &&Bases, 809 SmallVector<const FieldDecl *, 1> &&Fields) 810 : TypeExpansion(TEK_Record), Bases(std::move(Bases)), 811 Fields(std::move(Fields)) {} 812 static bool classof(const TypeExpansion *TE) { 813 return TE->Kind == TEK_Record; 814 } 815 }; 816 817 struct ComplexExpansion : TypeExpansion { 818 QualType EltTy; 819 820 ComplexExpansion(QualType EltTy) : TypeExpansion(TEK_Complex), EltTy(EltTy) {} 821 static bool classof(const TypeExpansion *TE) { 822 return TE->Kind == TEK_Complex; 823 } 824 }; 825 826 struct NoExpansion : TypeExpansion { 827 NoExpansion() : TypeExpansion(TEK_None) {} 828 static bool classof(const TypeExpansion *TE) { 829 return TE->Kind == TEK_None; 830 } 831 }; 832 } // namespace 833 834 static std::unique_ptr<TypeExpansion> 835 getTypeExpansion(QualType Ty, const ASTContext &Context) { 836 if (const ConstantArrayType *AT = Context.getAsConstantArrayType(Ty)) { 837 return llvm::make_unique<ConstantArrayExpansion>( 838 AT->getElementType(), AT->getSize().getZExtValue()); 839 } 840 if (const RecordType *RT = Ty->getAs<RecordType>()) { 841 SmallVector<const CXXBaseSpecifier *, 1> Bases; 842 SmallVector<const FieldDecl *, 1> Fields; 843 const RecordDecl *RD = RT->getDecl(); 844 assert(!RD->hasFlexibleArrayMember() && 845 "Cannot expand structure with flexible array."); 846 if (RD->isUnion()) { 847 // Unions can be here only in degenerative cases - all the fields are same 848 // after flattening. Thus we have to use the "largest" field. 849 const FieldDecl *LargestFD = nullptr; 850 CharUnits UnionSize = CharUnits::Zero(); 851 852 for (const auto *FD : RD->fields()) { 853 // Skip zero length bitfields. 854 if (FD->isBitField() && FD->getBitWidthValue(Context) == 0) 855 continue; 856 assert(!FD->isBitField() && 857 "Cannot expand structure with bit-field members."); 858 CharUnits FieldSize = Context.getTypeSizeInChars(FD->getType()); 859 if (UnionSize < FieldSize) { 860 UnionSize = FieldSize; 861 LargestFD = FD; 862 } 863 } 864 if (LargestFD) 865 Fields.push_back(LargestFD); 866 } else { 867 if (const auto *CXXRD = dyn_cast<CXXRecordDecl>(RD)) { 868 assert(!CXXRD->isDynamicClass() && 869 "cannot expand vtable pointers in dynamic classes"); 870 for (const CXXBaseSpecifier &BS : CXXRD->bases()) 871 Bases.push_back(&BS); 872 } 873 874 for (const auto *FD : RD->fields()) { 875 // Skip zero length bitfields. 876 if (FD->isBitField() && FD->getBitWidthValue(Context) == 0) 877 continue; 878 assert(!FD->isBitField() && 879 "Cannot expand structure with bit-field members."); 880 Fields.push_back(FD); 881 } 882 } 883 return llvm::make_unique<RecordExpansion>(std::move(Bases), 884 std::move(Fields)); 885 } 886 if (const ComplexType *CT = Ty->getAs<ComplexType>()) { 887 return llvm::make_unique<ComplexExpansion>(CT->getElementType()); 888 } 889 return llvm::make_unique<NoExpansion>(); 890 } 891 892 static int getExpansionSize(QualType Ty, const ASTContext &Context) { 893 auto Exp = getTypeExpansion(Ty, Context); 894 if (auto CAExp = dyn_cast<ConstantArrayExpansion>(Exp.get())) { 895 return CAExp->NumElts * getExpansionSize(CAExp->EltTy, Context); 896 } 897 if (auto RExp = dyn_cast<RecordExpansion>(Exp.get())) { 898 int Res = 0; 899 for (auto BS : RExp->Bases) 900 Res += getExpansionSize(BS->getType(), Context); 901 for (auto FD : RExp->Fields) 902 Res += getExpansionSize(FD->getType(), Context); 903 return Res; 904 } 905 if (isa<ComplexExpansion>(Exp.get())) 906 return 2; 907 assert(isa<NoExpansion>(Exp.get())); 908 return 1; 909 } 910 911 void 912 CodeGenTypes::getExpandedTypes(QualType Ty, 913 SmallVectorImpl<llvm::Type *>::iterator &TI) { 914 auto Exp = getTypeExpansion(Ty, Context); 915 if (auto CAExp = dyn_cast<ConstantArrayExpansion>(Exp.get())) { 916 for (int i = 0, n = CAExp->NumElts; i < n; i++) { 917 getExpandedTypes(CAExp->EltTy, TI); 918 } 919 } else if (auto RExp = dyn_cast<RecordExpansion>(Exp.get())) { 920 for (auto BS : RExp->Bases) 921 getExpandedTypes(BS->getType(), TI); 922 for (auto FD : RExp->Fields) 923 getExpandedTypes(FD->getType(), TI); 924 } else if (auto CExp = dyn_cast<ComplexExpansion>(Exp.get())) { 925 llvm::Type *EltTy = ConvertType(CExp->EltTy); 926 *TI++ = EltTy; 927 *TI++ = EltTy; 928 } else { 929 assert(isa<NoExpansion>(Exp.get())); 930 *TI++ = ConvertType(Ty); 931 } 932 } 933 934 static void forConstantArrayExpansion(CodeGenFunction &CGF, 935 ConstantArrayExpansion *CAE, 936 Address BaseAddr, 937 llvm::function_ref<void(Address)> Fn) { 938 CharUnits EltSize = CGF.getContext().getTypeSizeInChars(CAE->EltTy); 939 CharUnits EltAlign = 940 BaseAddr.getAlignment().alignmentOfArrayElement(EltSize); 941 942 for (int i = 0, n = CAE->NumElts; i < n; i++) { 943 llvm::Value *EltAddr = 944 CGF.Builder.CreateConstGEP2_32(nullptr, BaseAddr.getPointer(), 0, i); 945 Fn(Address(EltAddr, EltAlign)); 946 } 947 } 948 949 void CodeGenFunction::ExpandTypeFromArgs( 950 QualType Ty, LValue LV, SmallVectorImpl<llvm::Value *>::iterator &AI) { 951 assert(LV.isSimple() && 952 "Unexpected non-simple lvalue during struct expansion."); 953 954 auto Exp = getTypeExpansion(Ty, getContext()); 955 if (auto CAExp = dyn_cast<ConstantArrayExpansion>(Exp.get())) { 956 forConstantArrayExpansion(*this, CAExp, LV.getAddress(), 957 [&](Address EltAddr) { 958 LValue LV = MakeAddrLValue(EltAddr, CAExp->EltTy); 959 ExpandTypeFromArgs(CAExp->EltTy, LV, AI); 960 }); 961 } else if (auto RExp = dyn_cast<RecordExpansion>(Exp.get())) { 962 Address This = LV.getAddress(); 963 for (const CXXBaseSpecifier *BS : RExp->Bases) { 964 // Perform a single step derived-to-base conversion. 965 Address Base = 966 GetAddressOfBaseClass(This, Ty->getAsCXXRecordDecl(), &BS, &BS + 1, 967 /*NullCheckValue=*/false, SourceLocation()); 968 LValue SubLV = MakeAddrLValue(Base, BS->getType()); 969 970 // Recurse onto bases. 971 ExpandTypeFromArgs(BS->getType(), SubLV, AI); 972 } 973 for (auto FD : RExp->Fields) { 974 // FIXME: What are the right qualifiers here? 975 LValue SubLV = EmitLValueForFieldInitialization(LV, FD); 976 ExpandTypeFromArgs(FD->getType(), SubLV, AI); 977 } 978 } else if (isa<ComplexExpansion>(Exp.get())) { 979 auto realValue = *AI++; 980 auto imagValue = *AI++; 981 EmitStoreOfComplex(ComplexPairTy(realValue, imagValue), LV, /*init*/ true); 982 } else { 983 assert(isa<NoExpansion>(Exp.get())); 984 EmitStoreThroughLValue(RValue::get(*AI++), LV); 985 } 986 } 987 988 void CodeGenFunction::ExpandTypeToArgs( 989 QualType Ty, RValue RV, llvm::FunctionType *IRFuncTy, 990 SmallVectorImpl<llvm::Value *> &IRCallArgs, unsigned &IRCallArgPos) { 991 auto Exp = getTypeExpansion(Ty, getContext()); 992 if (auto CAExp = dyn_cast<ConstantArrayExpansion>(Exp.get())) { 993 forConstantArrayExpansion(*this, CAExp, RV.getAggregateAddress(), 994 [&](Address EltAddr) { 995 RValue EltRV = 996 convertTempToRValue(EltAddr, CAExp->EltTy, SourceLocation()); 997 ExpandTypeToArgs(CAExp->EltTy, EltRV, IRFuncTy, IRCallArgs, IRCallArgPos); 998 }); 999 } else if (auto RExp = dyn_cast<RecordExpansion>(Exp.get())) { 1000 Address This = RV.getAggregateAddress(); 1001 for (const CXXBaseSpecifier *BS : RExp->Bases) { 1002 // Perform a single step derived-to-base conversion. 1003 Address Base = 1004 GetAddressOfBaseClass(This, Ty->getAsCXXRecordDecl(), &BS, &BS + 1, 1005 /*NullCheckValue=*/false, SourceLocation()); 1006 RValue BaseRV = RValue::getAggregate(Base); 1007 1008 // Recurse onto bases. 1009 ExpandTypeToArgs(BS->getType(), BaseRV, IRFuncTy, IRCallArgs, 1010 IRCallArgPos); 1011 } 1012 1013 LValue LV = MakeAddrLValue(This, Ty); 1014 for (auto FD : RExp->Fields) { 1015 RValue FldRV = EmitRValueForField(LV, FD, SourceLocation()); 1016 ExpandTypeToArgs(FD->getType(), FldRV, IRFuncTy, IRCallArgs, 1017 IRCallArgPos); 1018 } 1019 } else if (isa<ComplexExpansion>(Exp.get())) { 1020 ComplexPairTy CV = RV.getComplexVal(); 1021 IRCallArgs[IRCallArgPos++] = CV.first; 1022 IRCallArgs[IRCallArgPos++] = CV.second; 1023 } else { 1024 assert(isa<NoExpansion>(Exp.get())); 1025 assert(RV.isScalar() && 1026 "Unexpected non-scalar rvalue during struct expansion."); 1027 1028 // Insert a bitcast as needed. 1029 llvm::Value *V = RV.getScalarVal(); 1030 if (IRCallArgPos < IRFuncTy->getNumParams() && 1031 V->getType() != IRFuncTy->getParamType(IRCallArgPos)) 1032 V = Builder.CreateBitCast(V, IRFuncTy->getParamType(IRCallArgPos)); 1033 1034 IRCallArgs[IRCallArgPos++] = V; 1035 } 1036 } 1037 1038 /// Create a temporary allocation for the purposes of coercion. 1039 static Address CreateTempAllocaForCoercion(CodeGenFunction &CGF, llvm::Type *Ty, 1040 CharUnits MinAlign) { 1041 // Don't use an alignment that's worse than what LLVM would prefer. 1042 auto PrefAlign = CGF.CGM.getDataLayout().getPrefTypeAlignment(Ty); 1043 CharUnits Align = std::max(MinAlign, CharUnits::fromQuantity(PrefAlign)); 1044 1045 return CGF.CreateTempAlloca(Ty, Align); 1046 } 1047 1048 /// EnterStructPointerForCoercedAccess - Given a struct pointer that we are 1049 /// accessing some number of bytes out of it, try to gep into the struct to get 1050 /// at its inner goodness. Dive as deep as possible without entering an element 1051 /// with an in-memory size smaller than DstSize. 1052 static Address 1053 EnterStructPointerForCoercedAccess(Address SrcPtr, 1054 llvm::StructType *SrcSTy, 1055 uint64_t DstSize, CodeGenFunction &CGF) { 1056 // We can't dive into a zero-element struct. 1057 if (SrcSTy->getNumElements() == 0) return SrcPtr; 1058 1059 llvm::Type *FirstElt = SrcSTy->getElementType(0); 1060 1061 // If the first elt is at least as large as what we're looking for, or if the 1062 // first element is the same size as the whole struct, we can enter it. The 1063 // comparison must be made on the store size and not the alloca size. Using 1064 // the alloca size may overstate the size of the load. 1065 uint64_t FirstEltSize = 1066 CGF.CGM.getDataLayout().getTypeStoreSize(FirstElt); 1067 if (FirstEltSize < DstSize && 1068 FirstEltSize < CGF.CGM.getDataLayout().getTypeStoreSize(SrcSTy)) 1069 return SrcPtr; 1070 1071 // GEP into the first element. 1072 SrcPtr = CGF.Builder.CreateStructGEP(SrcPtr, 0, CharUnits(), "coerce.dive"); 1073 1074 // If the first element is a struct, recurse. 1075 llvm::Type *SrcTy = SrcPtr.getElementType(); 1076 if (llvm::StructType *SrcSTy = dyn_cast<llvm::StructType>(SrcTy)) 1077 return EnterStructPointerForCoercedAccess(SrcPtr, SrcSTy, DstSize, CGF); 1078 1079 return SrcPtr; 1080 } 1081 1082 /// CoerceIntOrPtrToIntOrPtr - Convert a value Val to the specific Ty where both 1083 /// are either integers or pointers. This does a truncation of the value if it 1084 /// is too large or a zero extension if it is too small. 1085 /// 1086 /// This behaves as if the value were coerced through memory, so on big-endian 1087 /// targets the high bits are preserved in a truncation, while little-endian 1088 /// targets preserve the low bits. 1089 static llvm::Value *CoerceIntOrPtrToIntOrPtr(llvm::Value *Val, 1090 llvm::Type *Ty, 1091 CodeGenFunction &CGF) { 1092 if (Val->getType() == Ty) 1093 return Val; 1094 1095 if (isa<llvm::PointerType>(Val->getType())) { 1096 // If this is Pointer->Pointer avoid conversion to and from int. 1097 if (isa<llvm::PointerType>(Ty)) 1098 return CGF.Builder.CreateBitCast(Val, Ty, "coerce.val"); 1099 1100 // Convert the pointer to an integer so we can play with its width. 1101 Val = CGF.Builder.CreatePtrToInt(Val, CGF.IntPtrTy, "coerce.val.pi"); 1102 } 1103 1104 llvm::Type *DestIntTy = Ty; 1105 if (isa<llvm::PointerType>(DestIntTy)) 1106 DestIntTy = CGF.IntPtrTy; 1107 1108 if (Val->getType() != DestIntTy) { 1109 const llvm::DataLayout &DL = CGF.CGM.getDataLayout(); 1110 if (DL.isBigEndian()) { 1111 // Preserve the high bits on big-endian targets. 1112 // That is what memory coercion does. 1113 uint64_t SrcSize = DL.getTypeSizeInBits(Val->getType()); 1114 uint64_t DstSize = DL.getTypeSizeInBits(DestIntTy); 1115 1116 if (SrcSize > DstSize) { 1117 Val = CGF.Builder.CreateLShr(Val, SrcSize - DstSize, "coerce.highbits"); 1118 Val = CGF.Builder.CreateTrunc(Val, DestIntTy, "coerce.val.ii"); 1119 } else { 1120 Val = CGF.Builder.CreateZExt(Val, DestIntTy, "coerce.val.ii"); 1121 Val = CGF.Builder.CreateShl(Val, DstSize - SrcSize, "coerce.highbits"); 1122 } 1123 } else { 1124 // Little-endian targets preserve the low bits. No shifts required. 1125 Val = CGF.Builder.CreateIntCast(Val, DestIntTy, false, "coerce.val.ii"); 1126 } 1127 } 1128 1129 if (isa<llvm::PointerType>(Ty)) 1130 Val = CGF.Builder.CreateIntToPtr(Val, Ty, "coerce.val.ip"); 1131 return Val; 1132 } 1133 1134 1135 1136 /// CreateCoercedLoad - Create a load from \arg SrcPtr interpreted as 1137 /// a pointer to an object of type \arg Ty, known to be aligned to 1138 /// \arg SrcAlign bytes. 1139 /// 1140 /// This safely handles the case when the src type is smaller than the 1141 /// destination type; in this situation the values of bits which not 1142 /// present in the src are undefined. 1143 static llvm::Value *CreateCoercedLoad(Address Src, llvm::Type *Ty, 1144 CodeGenFunction &CGF) { 1145 llvm::Type *SrcTy = Src.getElementType(); 1146 1147 // If SrcTy and Ty are the same, just do a load. 1148 if (SrcTy == Ty) 1149 return CGF.Builder.CreateLoad(Src); 1150 1151 uint64_t DstSize = CGF.CGM.getDataLayout().getTypeAllocSize(Ty); 1152 1153 if (llvm::StructType *SrcSTy = dyn_cast<llvm::StructType>(SrcTy)) { 1154 Src = EnterStructPointerForCoercedAccess(Src, SrcSTy, DstSize, CGF); 1155 SrcTy = Src.getType()->getElementType(); 1156 } 1157 1158 uint64_t SrcSize = CGF.CGM.getDataLayout().getTypeAllocSize(SrcTy); 1159 1160 // If the source and destination are integer or pointer types, just do an 1161 // extension or truncation to the desired type. 1162 if ((isa<llvm::IntegerType>(Ty) || isa<llvm::PointerType>(Ty)) && 1163 (isa<llvm::IntegerType>(SrcTy) || isa<llvm::PointerType>(SrcTy))) { 1164 llvm::Value *Load = CGF.Builder.CreateLoad(Src); 1165 return CoerceIntOrPtrToIntOrPtr(Load, Ty, CGF); 1166 } 1167 1168 // If load is legal, just bitcast the src pointer. 1169 if (SrcSize >= DstSize) { 1170 // Generally SrcSize is never greater than DstSize, since this means we are 1171 // losing bits. However, this can happen in cases where the structure has 1172 // additional padding, for example due to a user specified alignment. 1173 // 1174 // FIXME: Assert that we aren't truncating non-padding bits when have access 1175 // to that information. 1176 Src = CGF.Builder.CreateBitCast(Src, llvm::PointerType::getUnqual(Ty)); 1177 return CGF.Builder.CreateLoad(Src); 1178 } 1179 1180 // Otherwise do coercion through memory. This is stupid, but simple. 1181 Address Tmp = CreateTempAllocaForCoercion(CGF, Ty, Src.getAlignment()); 1182 Address Casted = CGF.Builder.CreateBitCast(Tmp, CGF.Int8PtrTy); 1183 Address SrcCasted = CGF.Builder.CreateBitCast(Src, CGF.Int8PtrTy); 1184 CGF.Builder.CreateMemCpy(Casted, SrcCasted, 1185 llvm::ConstantInt::get(CGF.IntPtrTy, SrcSize), 1186 false); 1187 return CGF.Builder.CreateLoad(Tmp); 1188 } 1189 1190 // Function to store a first-class aggregate into memory. We prefer to 1191 // store the elements rather than the aggregate to be more friendly to 1192 // fast-isel. 1193 // FIXME: Do we need to recurse here? 1194 static void BuildAggStore(CodeGenFunction &CGF, llvm::Value *Val, 1195 Address Dest, bool DestIsVolatile) { 1196 // Prefer scalar stores to first-class aggregate stores. 1197 if (llvm::StructType *STy = 1198 dyn_cast<llvm::StructType>(Val->getType())) { 1199 const llvm::StructLayout *Layout = 1200 CGF.CGM.getDataLayout().getStructLayout(STy); 1201 1202 for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) { 1203 auto EltOffset = CharUnits::fromQuantity(Layout->getElementOffset(i)); 1204 Address EltPtr = CGF.Builder.CreateStructGEP(Dest, i, EltOffset); 1205 llvm::Value *Elt = CGF.Builder.CreateExtractValue(Val, i); 1206 CGF.Builder.CreateStore(Elt, EltPtr, DestIsVolatile); 1207 } 1208 } else { 1209 CGF.Builder.CreateStore(Val, Dest, DestIsVolatile); 1210 } 1211 } 1212 1213 /// CreateCoercedStore - Create a store to \arg DstPtr from \arg Src, 1214 /// where the source and destination may have different types. The 1215 /// destination is known to be aligned to \arg DstAlign bytes. 1216 /// 1217 /// This safely handles the case when the src type is larger than the 1218 /// destination type; the upper bits of the src will be lost. 1219 static void CreateCoercedStore(llvm::Value *Src, 1220 Address Dst, 1221 bool DstIsVolatile, 1222 CodeGenFunction &CGF) { 1223 llvm::Type *SrcTy = Src->getType(); 1224 llvm::Type *DstTy = Dst.getType()->getElementType(); 1225 if (SrcTy == DstTy) { 1226 CGF.Builder.CreateStore(Src, Dst, DstIsVolatile); 1227 return; 1228 } 1229 1230 uint64_t SrcSize = CGF.CGM.getDataLayout().getTypeAllocSize(SrcTy); 1231 1232 if (llvm::StructType *DstSTy = dyn_cast<llvm::StructType>(DstTy)) { 1233 Dst = EnterStructPointerForCoercedAccess(Dst, DstSTy, SrcSize, CGF); 1234 DstTy = Dst.getType()->getElementType(); 1235 } 1236 1237 // If the source and destination are integer or pointer types, just do an 1238 // extension or truncation to the desired type. 1239 if ((isa<llvm::IntegerType>(SrcTy) || isa<llvm::PointerType>(SrcTy)) && 1240 (isa<llvm::IntegerType>(DstTy) || isa<llvm::PointerType>(DstTy))) { 1241 Src = CoerceIntOrPtrToIntOrPtr(Src, DstTy, CGF); 1242 CGF.Builder.CreateStore(Src, Dst, DstIsVolatile); 1243 return; 1244 } 1245 1246 uint64_t DstSize = CGF.CGM.getDataLayout().getTypeAllocSize(DstTy); 1247 1248 // If store is legal, just bitcast the src pointer. 1249 if (SrcSize <= DstSize) { 1250 Dst = CGF.Builder.CreateBitCast(Dst, llvm::PointerType::getUnqual(SrcTy)); 1251 BuildAggStore(CGF, Src, Dst, DstIsVolatile); 1252 } else { 1253 // Otherwise do coercion through memory. This is stupid, but 1254 // simple. 1255 1256 // Generally SrcSize is never greater than DstSize, since this means we are 1257 // losing bits. However, this can happen in cases where the structure has 1258 // additional padding, for example due to a user specified alignment. 1259 // 1260 // FIXME: Assert that we aren't truncating non-padding bits when have access 1261 // to that information. 1262 Address Tmp = CreateTempAllocaForCoercion(CGF, SrcTy, Dst.getAlignment()); 1263 CGF.Builder.CreateStore(Src, Tmp); 1264 Address Casted = CGF.Builder.CreateBitCast(Tmp, CGF.Int8PtrTy); 1265 Address DstCasted = CGF.Builder.CreateBitCast(Dst, CGF.Int8PtrTy); 1266 CGF.Builder.CreateMemCpy(DstCasted, Casted, 1267 llvm::ConstantInt::get(CGF.IntPtrTy, DstSize), 1268 false); 1269 } 1270 } 1271 1272 static Address emitAddressAtOffset(CodeGenFunction &CGF, Address addr, 1273 const ABIArgInfo &info) { 1274 if (unsigned offset = info.getDirectOffset()) { 1275 addr = CGF.Builder.CreateElementBitCast(addr, CGF.Int8Ty); 1276 addr = CGF.Builder.CreateConstInBoundsByteGEP(addr, 1277 CharUnits::fromQuantity(offset)); 1278 addr = CGF.Builder.CreateElementBitCast(addr, info.getCoerceToType()); 1279 } 1280 return addr; 1281 } 1282 1283 namespace { 1284 1285 /// Encapsulates information about the way function arguments from 1286 /// CGFunctionInfo should be passed to actual LLVM IR function. 1287 class ClangToLLVMArgMapping { 1288 static const unsigned InvalidIndex = ~0U; 1289 unsigned InallocaArgNo; 1290 unsigned SRetArgNo; 1291 unsigned TotalIRArgs; 1292 1293 /// Arguments of LLVM IR function corresponding to single Clang argument. 1294 struct IRArgs { 1295 unsigned PaddingArgIndex; 1296 // Argument is expanded to IR arguments at positions 1297 // [FirstArgIndex, FirstArgIndex + NumberOfArgs). 1298 unsigned FirstArgIndex; 1299 unsigned NumberOfArgs; 1300 1301 IRArgs() 1302 : PaddingArgIndex(InvalidIndex), FirstArgIndex(InvalidIndex), 1303 NumberOfArgs(0) {} 1304 }; 1305 1306 SmallVector<IRArgs, 8> ArgInfo; 1307 1308 public: 1309 ClangToLLVMArgMapping(const ASTContext &Context, const CGFunctionInfo &FI, 1310 bool OnlyRequiredArgs = false) 1311 : InallocaArgNo(InvalidIndex), SRetArgNo(InvalidIndex), TotalIRArgs(0), 1312 ArgInfo(OnlyRequiredArgs ? FI.getNumRequiredArgs() : FI.arg_size()) { 1313 construct(Context, FI, OnlyRequiredArgs); 1314 } 1315 1316 bool hasInallocaArg() const { return InallocaArgNo != InvalidIndex; } 1317 unsigned getInallocaArgNo() const { 1318 assert(hasInallocaArg()); 1319 return InallocaArgNo; 1320 } 1321 1322 bool hasSRetArg() const { return SRetArgNo != InvalidIndex; } 1323 unsigned getSRetArgNo() const { 1324 assert(hasSRetArg()); 1325 return SRetArgNo; 1326 } 1327 1328 unsigned totalIRArgs() const { return TotalIRArgs; } 1329 1330 bool hasPaddingArg(unsigned ArgNo) const { 1331 assert(ArgNo < ArgInfo.size()); 1332 return ArgInfo[ArgNo].PaddingArgIndex != InvalidIndex; 1333 } 1334 unsigned getPaddingArgNo(unsigned ArgNo) const { 1335 assert(hasPaddingArg(ArgNo)); 1336 return ArgInfo[ArgNo].PaddingArgIndex; 1337 } 1338 1339 /// Returns index of first IR argument corresponding to ArgNo, and their 1340 /// quantity. 1341 std::pair<unsigned, unsigned> getIRArgs(unsigned ArgNo) const { 1342 assert(ArgNo < ArgInfo.size()); 1343 return std::make_pair(ArgInfo[ArgNo].FirstArgIndex, 1344 ArgInfo[ArgNo].NumberOfArgs); 1345 } 1346 1347 private: 1348 void construct(const ASTContext &Context, const CGFunctionInfo &FI, 1349 bool OnlyRequiredArgs); 1350 }; 1351 1352 void ClangToLLVMArgMapping::construct(const ASTContext &Context, 1353 const CGFunctionInfo &FI, 1354 bool OnlyRequiredArgs) { 1355 unsigned IRArgNo = 0; 1356 bool SwapThisWithSRet = false; 1357 const ABIArgInfo &RetAI = FI.getReturnInfo(); 1358 1359 if (RetAI.getKind() == ABIArgInfo::Indirect) { 1360 SwapThisWithSRet = RetAI.isSRetAfterThis(); 1361 SRetArgNo = SwapThisWithSRet ? 1 : IRArgNo++; 1362 } 1363 1364 unsigned ArgNo = 0; 1365 unsigned NumArgs = OnlyRequiredArgs ? FI.getNumRequiredArgs() : FI.arg_size(); 1366 for (CGFunctionInfo::const_arg_iterator I = FI.arg_begin(); ArgNo < NumArgs; 1367 ++I, ++ArgNo) { 1368 assert(I != FI.arg_end()); 1369 QualType ArgType = I->type; 1370 const ABIArgInfo &AI = I->info; 1371 // Collect data about IR arguments corresponding to Clang argument ArgNo. 1372 auto &IRArgs = ArgInfo[ArgNo]; 1373 1374 if (AI.getPaddingType()) 1375 IRArgs.PaddingArgIndex = IRArgNo++; 1376 1377 switch (AI.getKind()) { 1378 case ABIArgInfo::Extend: 1379 case ABIArgInfo::Direct: { 1380 // FIXME: handle sseregparm someday... 1381 llvm::StructType *STy = dyn_cast<llvm::StructType>(AI.getCoerceToType()); 1382 if (AI.isDirect() && AI.getCanBeFlattened() && STy) { 1383 IRArgs.NumberOfArgs = STy->getNumElements(); 1384 } else { 1385 IRArgs.NumberOfArgs = 1; 1386 } 1387 break; 1388 } 1389 case ABIArgInfo::Indirect: 1390 IRArgs.NumberOfArgs = 1; 1391 break; 1392 case ABIArgInfo::Ignore: 1393 case ABIArgInfo::InAlloca: 1394 // ignore and inalloca doesn't have matching LLVM parameters. 1395 IRArgs.NumberOfArgs = 0; 1396 break; 1397 case ABIArgInfo::CoerceAndExpand: 1398 IRArgs.NumberOfArgs = AI.getCoerceAndExpandTypeSequence().size(); 1399 break; 1400 case ABIArgInfo::Expand: 1401 IRArgs.NumberOfArgs = getExpansionSize(ArgType, Context); 1402 break; 1403 } 1404 1405 if (IRArgs.NumberOfArgs > 0) { 1406 IRArgs.FirstArgIndex = IRArgNo; 1407 IRArgNo += IRArgs.NumberOfArgs; 1408 } 1409 1410 // Skip over the sret parameter when it comes second. We already handled it 1411 // above. 1412 if (IRArgNo == 1 && SwapThisWithSRet) 1413 IRArgNo++; 1414 } 1415 assert(ArgNo == ArgInfo.size()); 1416 1417 if (FI.usesInAlloca()) 1418 InallocaArgNo = IRArgNo++; 1419 1420 TotalIRArgs = IRArgNo; 1421 } 1422 } // namespace 1423 1424 /***/ 1425 1426 bool CodeGenModule::ReturnTypeUsesSRet(const CGFunctionInfo &FI) { 1427 return FI.getReturnInfo().isIndirect(); 1428 } 1429 1430 bool CodeGenModule::ReturnSlotInterferesWithArgs(const CGFunctionInfo &FI) { 1431 return ReturnTypeUsesSRet(FI) && 1432 getTargetCodeGenInfo().doesReturnSlotInterfereWithArgs(); 1433 } 1434 1435 bool CodeGenModule::ReturnTypeUsesFPRet(QualType ResultType) { 1436 if (const BuiltinType *BT = ResultType->getAs<BuiltinType>()) { 1437 switch (BT->getKind()) { 1438 default: 1439 return false; 1440 case BuiltinType::Float: 1441 return getTarget().useObjCFPRetForRealType(TargetInfo::Float); 1442 case BuiltinType::Double: 1443 return getTarget().useObjCFPRetForRealType(TargetInfo::Double); 1444 case BuiltinType::LongDouble: 1445 return getTarget().useObjCFPRetForRealType(TargetInfo::LongDouble); 1446 } 1447 } 1448 1449 return false; 1450 } 1451 1452 bool CodeGenModule::ReturnTypeUsesFP2Ret(QualType ResultType) { 1453 if (const ComplexType *CT = ResultType->getAs<ComplexType>()) { 1454 if (const BuiltinType *BT = CT->getElementType()->getAs<BuiltinType>()) { 1455 if (BT->getKind() == BuiltinType::LongDouble) 1456 return getTarget().useObjCFP2RetForComplexLongDouble(); 1457 } 1458 } 1459 1460 return false; 1461 } 1462 1463 llvm::FunctionType *CodeGenTypes::GetFunctionType(GlobalDecl GD) { 1464 const CGFunctionInfo &FI = arrangeGlobalDeclaration(GD); 1465 return GetFunctionType(FI); 1466 } 1467 1468 llvm::FunctionType * 1469 CodeGenTypes::GetFunctionType(const CGFunctionInfo &FI) { 1470 1471 bool Inserted = FunctionsBeingProcessed.insert(&FI).second; 1472 (void)Inserted; 1473 assert(Inserted && "Recursively being processed?"); 1474 1475 llvm::Type *resultType = nullptr; 1476 const ABIArgInfo &retAI = FI.getReturnInfo(); 1477 switch (retAI.getKind()) { 1478 case ABIArgInfo::Expand: 1479 llvm_unreachable("Invalid ABI kind for return argument"); 1480 1481 case ABIArgInfo::Extend: 1482 case ABIArgInfo::Direct: 1483 resultType = retAI.getCoerceToType(); 1484 break; 1485 1486 case ABIArgInfo::InAlloca: 1487 if (retAI.getInAllocaSRet()) { 1488 // sret things on win32 aren't void, they return the sret pointer. 1489 QualType ret = FI.getReturnType(); 1490 llvm::Type *ty = ConvertType(ret); 1491 unsigned addressSpace = Context.getTargetAddressSpace(ret); 1492 resultType = llvm::PointerType::get(ty, addressSpace); 1493 } else { 1494 resultType = llvm::Type::getVoidTy(getLLVMContext()); 1495 } 1496 break; 1497 1498 case ABIArgInfo::Indirect: 1499 case ABIArgInfo::Ignore: 1500 resultType = llvm::Type::getVoidTy(getLLVMContext()); 1501 break; 1502 1503 case ABIArgInfo::CoerceAndExpand: 1504 resultType = retAI.getUnpaddedCoerceAndExpandType(); 1505 break; 1506 } 1507 1508 ClangToLLVMArgMapping IRFunctionArgs(getContext(), FI, true); 1509 SmallVector<llvm::Type*, 8> ArgTypes(IRFunctionArgs.totalIRArgs()); 1510 1511 // Add type for sret argument. 1512 if (IRFunctionArgs.hasSRetArg()) { 1513 QualType Ret = FI.getReturnType(); 1514 llvm::Type *Ty = ConvertType(Ret); 1515 unsigned AddressSpace = Context.getTargetAddressSpace(Ret); 1516 ArgTypes[IRFunctionArgs.getSRetArgNo()] = 1517 llvm::PointerType::get(Ty, AddressSpace); 1518 } 1519 1520 // Add type for inalloca argument. 1521 if (IRFunctionArgs.hasInallocaArg()) { 1522 auto ArgStruct = FI.getArgStruct(); 1523 assert(ArgStruct); 1524 ArgTypes[IRFunctionArgs.getInallocaArgNo()] = ArgStruct->getPointerTo(); 1525 } 1526 1527 // Add in all of the required arguments. 1528 unsigned ArgNo = 0; 1529 CGFunctionInfo::const_arg_iterator it = FI.arg_begin(), 1530 ie = it + FI.getNumRequiredArgs(); 1531 for (; it != ie; ++it, ++ArgNo) { 1532 const ABIArgInfo &ArgInfo = it->info; 1533 1534 // Insert a padding type to ensure proper alignment. 1535 if (IRFunctionArgs.hasPaddingArg(ArgNo)) 1536 ArgTypes[IRFunctionArgs.getPaddingArgNo(ArgNo)] = 1537 ArgInfo.getPaddingType(); 1538 1539 unsigned FirstIRArg, NumIRArgs; 1540 std::tie(FirstIRArg, NumIRArgs) = IRFunctionArgs.getIRArgs(ArgNo); 1541 1542 switch (ArgInfo.getKind()) { 1543 case ABIArgInfo::Ignore: 1544 case ABIArgInfo::InAlloca: 1545 assert(NumIRArgs == 0); 1546 break; 1547 1548 case ABIArgInfo::Indirect: { 1549 assert(NumIRArgs == 1); 1550 // indirect arguments are always on the stack, which is addr space #0. 1551 llvm::Type *LTy = ConvertTypeForMem(it->type); 1552 ArgTypes[FirstIRArg] = LTy->getPointerTo(); 1553 break; 1554 } 1555 1556 case ABIArgInfo::Extend: 1557 case ABIArgInfo::Direct: { 1558 // Fast-isel and the optimizer generally like scalar values better than 1559 // FCAs, so we flatten them if this is safe to do for this argument. 1560 llvm::Type *argType = ArgInfo.getCoerceToType(); 1561 llvm::StructType *st = dyn_cast<llvm::StructType>(argType); 1562 if (st && ArgInfo.isDirect() && ArgInfo.getCanBeFlattened()) { 1563 assert(NumIRArgs == st->getNumElements()); 1564 for (unsigned i = 0, e = st->getNumElements(); i != e; ++i) 1565 ArgTypes[FirstIRArg + i] = st->getElementType(i); 1566 } else { 1567 assert(NumIRArgs == 1); 1568 ArgTypes[FirstIRArg] = argType; 1569 } 1570 break; 1571 } 1572 1573 case ABIArgInfo::CoerceAndExpand: { 1574 auto ArgTypesIter = ArgTypes.begin() + FirstIRArg; 1575 for (auto EltTy : ArgInfo.getCoerceAndExpandTypeSequence()) { 1576 *ArgTypesIter++ = EltTy; 1577 } 1578 assert(ArgTypesIter == ArgTypes.begin() + FirstIRArg + NumIRArgs); 1579 break; 1580 } 1581 1582 case ABIArgInfo::Expand: 1583 auto ArgTypesIter = ArgTypes.begin() + FirstIRArg; 1584 getExpandedTypes(it->type, ArgTypesIter); 1585 assert(ArgTypesIter == ArgTypes.begin() + FirstIRArg + NumIRArgs); 1586 break; 1587 } 1588 } 1589 1590 bool Erased = FunctionsBeingProcessed.erase(&FI); (void)Erased; 1591 assert(Erased && "Not in set?"); 1592 1593 return llvm::FunctionType::get(resultType, ArgTypes, FI.isVariadic()); 1594 } 1595 1596 llvm::Type *CodeGenTypes::GetFunctionTypeForVTable(GlobalDecl GD) { 1597 const CXXMethodDecl *MD = cast<CXXMethodDecl>(GD.getDecl()); 1598 const FunctionProtoType *FPT = MD->getType()->getAs<FunctionProtoType>(); 1599 1600 if (!isFuncTypeConvertible(FPT)) 1601 return llvm::StructType::get(getLLVMContext()); 1602 1603 const CGFunctionInfo *Info; 1604 if (isa<CXXDestructorDecl>(MD)) 1605 Info = 1606 &arrangeCXXStructorDeclaration(MD, getFromDtorType(GD.getDtorType())); 1607 else 1608 Info = &arrangeCXXMethodDeclaration(MD); 1609 return GetFunctionType(*Info); 1610 } 1611 1612 static void AddAttributesFromFunctionProtoType(ASTContext &Ctx, 1613 llvm::AttrBuilder &FuncAttrs, 1614 const FunctionProtoType *FPT) { 1615 if (!FPT) 1616 return; 1617 1618 if (!isUnresolvedExceptionSpec(FPT->getExceptionSpecType()) && 1619 FPT->isNothrow(Ctx)) 1620 FuncAttrs.addAttribute(llvm::Attribute::NoUnwind); 1621 } 1622 1623 void CodeGenModule::ConstructAttributeList( 1624 StringRef Name, const CGFunctionInfo &FI, CGCalleeInfo CalleeInfo, 1625 AttributeListType &PAL, unsigned &CallingConv, bool AttrOnCallSite) { 1626 llvm::AttrBuilder FuncAttrs; 1627 llvm::AttrBuilder RetAttrs; 1628 bool HasOptnone = false; 1629 1630 CallingConv = FI.getEffectiveCallingConvention(); 1631 1632 if (FI.isNoReturn()) 1633 FuncAttrs.addAttribute(llvm::Attribute::NoReturn); 1634 1635 // If we have information about the function prototype, we can learn 1636 // attributes form there. 1637 AddAttributesFromFunctionProtoType(getContext(), FuncAttrs, 1638 CalleeInfo.getCalleeFunctionProtoType()); 1639 1640 const Decl *TargetDecl = CalleeInfo.getCalleeDecl(); 1641 1642 bool HasAnyX86InterruptAttr = false; 1643 // FIXME: handle sseregparm someday... 1644 if (TargetDecl) { 1645 if (TargetDecl->hasAttr<ReturnsTwiceAttr>()) 1646 FuncAttrs.addAttribute(llvm::Attribute::ReturnsTwice); 1647 if (TargetDecl->hasAttr<NoThrowAttr>()) 1648 FuncAttrs.addAttribute(llvm::Attribute::NoUnwind); 1649 if (TargetDecl->hasAttr<NoReturnAttr>()) 1650 FuncAttrs.addAttribute(llvm::Attribute::NoReturn); 1651 if (TargetDecl->hasAttr<NoDuplicateAttr>()) 1652 FuncAttrs.addAttribute(llvm::Attribute::NoDuplicate); 1653 if (TargetDecl->hasAttr<ConvergentAttr>()) 1654 FuncAttrs.addAttribute(llvm::Attribute::Convergent); 1655 1656 if (const FunctionDecl *Fn = dyn_cast<FunctionDecl>(TargetDecl)) { 1657 AddAttributesFromFunctionProtoType( 1658 getContext(), FuncAttrs, Fn->getType()->getAs<FunctionProtoType>()); 1659 // Don't use [[noreturn]] or _Noreturn for a call to a virtual function. 1660 // These attributes are not inherited by overloads. 1661 const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(Fn); 1662 if (Fn->isNoReturn() && !(AttrOnCallSite && MD && MD->isVirtual())) 1663 FuncAttrs.addAttribute(llvm::Attribute::NoReturn); 1664 } 1665 1666 // 'const', 'pure' and 'noalias' attributed functions are also nounwind. 1667 if (TargetDecl->hasAttr<ConstAttr>()) { 1668 FuncAttrs.addAttribute(llvm::Attribute::ReadNone); 1669 FuncAttrs.addAttribute(llvm::Attribute::NoUnwind); 1670 } else if (TargetDecl->hasAttr<PureAttr>()) { 1671 FuncAttrs.addAttribute(llvm::Attribute::ReadOnly); 1672 FuncAttrs.addAttribute(llvm::Attribute::NoUnwind); 1673 } else if (TargetDecl->hasAttr<NoAliasAttr>()) { 1674 FuncAttrs.addAttribute(llvm::Attribute::ArgMemOnly); 1675 FuncAttrs.addAttribute(llvm::Attribute::NoUnwind); 1676 } 1677 if (TargetDecl->hasAttr<RestrictAttr>()) 1678 RetAttrs.addAttribute(llvm::Attribute::NoAlias); 1679 if (TargetDecl->hasAttr<ReturnsNonNullAttr>()) 1680 RetAttrs.addAttribute(llvm::Attribute::NonNull); 1681 1682 HasAnyX86InterruptAttr = TargetDecl->hasAttr<AnyX86InterruptAttr>(); 1683 HasOptnone = TargetDecl->hasAttr<OptimizeNoneAttr>(); 1684 if (auto *AllocSize = TargetDecl->getAttr<AllocSizeAttr>()) { 1685 Optional<unsigned> NumElemsParam; 1686 // alloc_size args are base-1, 0 means not present. 1687 if (unsigned N = AllocSize->getNumElemsParam()) 1688 NumElemsParam = N - 1; 1689 FuncAttrs.addAllocSizeAttr(AllocSize->getElemSizeParam() - 1, 1690 NumElemsParam); 1691 } 1692 } 1693 1694 // OptimizeNoneAttr takes precedence over -Os or -Oz. No warning needed. 1695 if (!HasOptnone) { 1696 if (CodeGenOpts.OptimizeSize) 1697 FuncAttrs.addAttribute(llvm::Attribute::OptimizeForSize); 1698 if (CodeGenOpts.OptimizeSize == 2) 1699 FuncAttrs.addAttribute(llvm::Attribute::MinSize); 1700 } 1701 1702 if (CodeGenOpts.DisableRedZone) 1703 FuncAttrs.addAttribute(llvm::Attribute::NoRedZone); 1704 if (CodeGenOpts.NoImplicitFloat) 1705 FuncAttrs.addAttribute(llvm::Attribute::NoImplicitFloat); 1706 if (CodeGenOpts.EnableSegmentedStacks && 1707 !(TargetDecl && TargetDecl->hasAttr<NoSplitStackAttr>())) 1708 FuncAttrs.addAttribute("split-stack"); 1709 1710 if (AttrOnCallSite) { 1711 // Attributes that should go on the call site only. 1712 if (!CodeGenOpts.SimplifyLibCalls || 1713 CodeGenOpts.isNoBuiltinFunc(Name.data())) 1714 FuncAttrs.addAttribute(llvm::Attribute::NoBuiltin); 1715 if (!CodeGenOpts.TrapFuncName.empty()) 1716 FuncAttrs.addAttribute("trap-func-name", CodeGenOpts.TrapFuncName); 1717 } else { 1718 // Attributes that should go on the function, but not the call site. 1719 if (!CodeGenOpts.DisableFPElim) { 1720 FuncAttrs.addAttribute("no-frame-pointer-elim", "false"); 1721 } else if (CodeGenOpts.OmitLeafFramePointer) { 1722 FuncAttrs.addAttribute("no-frame-pointer-elim", "false"); 1723 FuncAttrs.addAttribute("no-frame-pointer-elim-non-leaf"); 1724 } else { 1725 FuncAttrs.addAttribute("no-frame-pointer-elim", "true"); 1726 FuncAttrs.addAttribute("no-frame-pointer-elim-non-leaf"); 1727 } 1728 1729 bool DisableTailCalls = 1730 CodeGenOpts.DisableTailCalls || HasAnyX86InterruptAttr || 1731 (TargetDecl && TargetDecl->hasAttr<DisableTailCallsAttr>()); 1732 FuncAttrs.addAttribute( 1733 "disable-tail-calls", 1734 llvm::toStringRef(DisableTailCalls)); 1735 1736 FuncAttrs.addAttribute("less-precise-fpmad", 1737 llvm::toStringRef(CodeGenOpts.LessPreciseFPMAD)); 1738 1739 if (!CodeGenOpts.FPDenormalMode.empty()) 1740 FuncAttrs.addAttribute("denormal-fp-math", 1741 CodeGenOpts.FPDenormalMode); 1742 1743 FuncAttrs.addAttribute("no-trapping-math", 1744 llvm::toStringRef(CodeGenOpts.NoTrappingMath)); 1745 1746 // TODO: Are these all needed? 1747 // unsafe/inf/nan/nsz are handled by instruction-level FastMathFlags. 1748 FuncAttrs.addAttribute("no-infs-fp-math", 1749 llvm::toStringRef(CodeGenOpts.NoInfsFPMath)); 1750 FuncAttrs.addAttribute("no-nans-fp-math", 1751 llvm::toStringRef(CodeGenOpts.NoNaNsFPMath)); 1752 FuncAttrs.addAttribute("unsafe-fp-math", 1753 llvm::toStringRef(CodeGenOpts.UnsafeFPMath)); 1754 FuncAttrs.addAttribute("use-soft-float", 1755 llvm::toStringRef(CodeGenOpts.SoftFloat)); 1756 FuncAttrs.addAttribute("stack-protector-buffer-size", 1757 llvm::utostr(CodeGenOpts.SSPBufferSize)); 1758 FuncAttrs.addAttribute("no-signed-zeros-fp-math", 1759 llvm::toStringRef(CodeGenOpts.NoSignedZeros)); 1760 FuncAttrs.addAttribute( 1761 "correctly-rounded-divide-sqrt-fp-math", 1762 llvm::toStringRef(CodeGenOpts.CorrectlyRoundedDivSqrt)); 1763 1764 // TODO: Reciprocal estimate codegen options should apply to instructions? 1765 std::vector<std::string> &Recips = getTarget().getTargetOpts().Reciprocals; 1766 if (!Recips.empty()) 1767 FuncAttrs.addAttribute("reciprocal-estimates", 1768 llvm::join(Recips.begin(), Recips.end(), ",")); 1769 1770 if (CodeGenOpts.StackRealignment) 1771 FuncAttrs.addAttribute("stackrealign"); 1772 if (CodeGenOpts.Backchain) 1773 FuncAttrs.addAttribute("backchain"); 1774 1775 // Add target-cpu and target-features attributes to functions. If 1776 // we have a decl for the function and it has a target attribute then 1777 // parse that and add it to the feature set. 1778 StringRef TargetCPU = getTarget().getTargetOpts().CPU; 1779 const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(TargetDecl); 1780 if (FD && FD->hasAttr<TargetAttr>()) { 1781 llvm::StringMap<bool> FeatureMap; 1782 getFunctionFeatureMap(FeatureMap, FD); 1783 1784 // Produce the canonical string for this set of features. 1785 std::vector<std::string> Features; 1786 for (llvm::StringMap<bool>::const_iterator it = FeatureMap.begin(), 1787 ie = FeatureMap.end(); 1788 it != ie; ++it) 1789 Features.push_back((it->second ? "+" : "-") + it->first().str()); 1790 1791 // Now add the target-cpu and target-features to the function. 1792 // While we populated the feature map above, we still need to 1793 // get and parse the target attribute so we can get the cpu for 1794 // the function. 1795 const auto *TD = FD->getAttr<TargetAttr>(); 1796 TargetAttr::ParsedTargetAttr ParsedAttr = TD->parse(); 1797 if (ParsedAttr.second != "") 1798 TargetCPU = ParsedAttr.second; 1799 if (TargetCPU != "") 1800 FuncAttrs.addAttribute("target-cpu", TargetCPU); 1801 if (!Features.empty()) { 1802 std::sort(Features.begin(), Features.end()); 1803 FuncAttrs.addAttribute( 1804 "target-features", 1805 llvm::join(Features.begin(), Features.end(), ",")); 1806 } 1807 } else { 1808 // Otherwise just add the existing target cpu and target features to the 1809 // function. 1810 std::vector<std::string> &Features = getTarget().getTargetOpts().Features; 1811 if (TargetCPU != "") 1812 FuncAttrs.addAttribute("target-cpu", TargetCPU); 1813 if (!Features.empty()) { 1814 std::sort(Features.begin(), Features.end()); 1815 FuncAttrs.addAttribute( 1816 "target-features", 1817 llvm::join(Features.begin(), Features.end(), ",")); 1818 } 1819 } 1820 } 1821 1822 if (getLangOpts().CUDA && getLangOpts().CUDAIsDevice) { 1823 // Conservatively, mark all functions and calls in CUDA as convergent 1824 // (meaning, they may call an intrinsically convergent op, such as 1825 // __syncthreads(), and so can't have certain optimizations applied around 1826 // them). LLVM will remove this attribute where it safely can. 1827 FuncAttrs.addAttribute(llvm::Attribute::Convergent); 1828 1829 // Exceptions aren't supported in CUDA device code. 1830 FuncAttrs.addAttribute(llvm::Attribute::NoUnwind); 1831 1832 // Respect -fcuda-flush-denormals-to-zero. 1833 if (getLangOpts().CUDADeviceFlushDenormalsToZero) 1834 FuncAttrs.addAttribute("nvptx-f32ftz", "true"); 1835 } 1836 1837 ClangToLLVMArgMapping IRFunctionArgs(getContext(), FI); 1838 1839 QualType RetTy = FI.getReturnType(); 1840 const ABIArgInfo &RetAI = FI.getReturnInfo(); 1841 switch (RetAI.getKind()) { 1842 case ABIArgInfo::Extend: 1843 if (RetTy->hasSignedIntegerRepresentation()) 1844 RetAttrs.addAttribute(llvm::Attribute::SExt); 1845 else if (RetTy->hasUnsignedIntegerRepresentation()) 1846 RetAttrs.addAttribute(llvm::Attribute::ZExt); 1847 // FALL THROUGH 1848 case ABIArgInfo::Direct: 1849 if (RetAI.getInReg()) 1850 RetAttrs.addAttribute(llvm::Attribute::InReg); 1851 break; 1852 case ABIArgInfo::Ignore: 1853 break; 1854 1855 case ABIArgInfo::InAlloca: 1856 case ABIArgInfo::Indirect: { 1857 // inalloca and sret disable readnone and readonly 1858 FuncAttrs.removeAttribute(llvm::Attribute::ReadOnly) 1859 .removeAttribute(llvm::Attribute::ReadNone); 1860 break; 1861 } 1862 1863 case ABIArgInfo::CoerceAndExpand: 1864 break; 1865 1866 case ABIArgInfo::Expand: 1867 llvm_unreachable("Invalid ABI kind for return argument"); 1868 } 1869 1870 if (const auto *RefTy = RetTy->getAs<ReferenceType>()) { 1871 QualType PTy = RefTy->getPointeeType(); 1872 if (!PTy->isIncompleteType() && PTy->isConstantSizeType()) 1873 RetAttrs.addDereferenceableAttr(getContext().getTypeSizeInChars(PTy) 1874 .getQuantity()); 1875 else if (getContext().getTargetAddressSpace(PTy) == 0) 1876 RetAttrs.addAttribute(llvm::Attribute::NonNull); 1877 } 1878 1879 // Attach return attributes. 1880 if (RetAttrs.hasAttributes()) { 1881 PAL.push_back(llvm::AttributeSet::get( 1882 getLLVMContext(), llvm::AttributeSet::ReturnIndex, RetAttrs)); 1883 } 1884 1885 bool hasUsedSRet = false; 1886 1887 // Attach attributes to sret. 1888 if (IRFunctionArgs.hasSRetArg()) { 1889 llvm::AttrBuilder SRETAttrs; 1890 SRETAttrs.addAttribute(llvm::Attribute::StructRet); 1891 hasUsedSRet = true; 1892 if (RetAI.getInReg()) 1893 SRETAttrs.addAttribute(llvm::Attribute::InReg); 1894 PAL.push_back(llvm::AttributeSet::get( 1895 getLLVMContext(), IRFunctionArgs.getSRetArgNo() + 1, SRETAttrs)); 1896 } 1897 1898 // Attach attributes to inalloca argument. 1899 if (IRFunctionArgs.hasInallocaArg()) { 1900 llvm::AttrBuilder Attrs; 1901 Attrs.addAttribute(llvm::Attribute::InAlloca); 1902 PAL.push_back(llvm::AttributeSet::get( 1903 getLLVMContext(), IRFunctionArgs.getInallocaArgNo() + 1, Attrs)); 1904 } 1905 1906 unsigned ArgNo = 0; 1907 for (CGFunctionInfo::const_arg_iterator I = FI.arg_begin(), 1908 E = FI.arg_end(); 1909 I != E; ++I, ++ArgNo) { 1910 QualType ParamType = I->type; 1911 const ABIArgInfo &AI = I->info; 1912 llvm::AttrBuilder Attrs; 1913 1914 // Add attribute for padding argument, if necessary. 1915 if (IRFunctionArgs.hasPaddingArg(ArgNo)) { 1916 if (AI.getPaddingInReg()) 1917 PAL.push_back(llvm::AttributeSet::get( 1918 getLLVMContext(), IRFunctionArgs.getPaddingArgNo(ArgNo) + 1, 1919 llvm::Attribute::InReg)); 1920 } 1921 1922 // 'restrict' -> 'noalias' is done in EmitFunctionProlog when we 1923 // have the corresponding parameter variable. It doesn't make 1924 // sense to do it here because parameters are so messed up. 1925 switch (AI.getKind()) { 1926 case ABIArgInfo::Extend: 1927 if (ParamType->isSignedIntegerOrEnumerationType()) 1928 Attrs.addAttribute(llvm::Attribute::SExt); 1929 else if (ParamType->isUnsignedIntegerOrEnumerationType()) { 1930 if (getTypes().getABIInfo().shouldSignExtUnsignedType(ParamType)) 1931 Attrs.addAttribute(llvm::Attribute::SExt); 1932 else 1933 Attrs.addAttribute(llvm::Attribute::ZExt); 1934 } 1935 // FALL THROUGH 1936 case ABIArgInfo::Direct: 1937 if (ArgNo == 0 && FI.isChainCall()) 1938 Attrs.addAttribute(llvm::Attribute::Nest); 1939 else if (AI.getInReg()) 1940 Attrs.addAttribute(llvm::Attribute::InReg); 1941 break; 1942 1943 case ABIArgInfo::Indirect: { 1944 if (AI.getInReg()) 1945 Attrs.addAttribute(llvm::Attribute::InReg); 1946 1947 if (AI.getIndirectByVal()) 1948 Attrs.addAttribute(llvm::Attribute::ByVal); 1949 1950 CharUnits Align = AI.getIndirectAlign(); 1951 1952 // In a byval argument, it is important that the required 1953 // alignment of the type is honored, as LLVM might be creating a 1954 // *new* stack object, and needs to know what alignment to give 1955 // it. (Sometimes it can deduce a sensible alignment on its own, 1956 // but not if clang decides it must emit a packed struct, or the 1957 // user specifies increased alignment requirements.) 1958 // 1959 // This is different from indirect *not* byval, where the object 1960 // exists already, and the align attribute is purely 1961 // informative. 1962 assert(!Align.isZero()); 1963 1964 // For now, only add this when we have a byval argument. 1965 // TODO: be less lazy about updating test cases. 1966 if (AI.getIndirectByVal()) 1967 Attrs.addAlignmentAttr(Align.getQuantity()); 1968 1969 // byval disables readnone and readonly. 1970 FuncAttrs.removeAttribute(llvm::Attribute::ReadOnly) 1971 .removeAttribute(llvm::Attribute::ReadNone); 1972 break; 1973 } 1974 case ABIArgInfo::Ignore: 1975 case ABIArgInfo::Expand: 1976 case ABIArgInfo::CoerceAndExpand: 1977 break; 1978 1979 case ABIArgInfo::InAlloca: 1980 // inalloca disables readnone and readonly. 1981 FuncAttrs.removeAttribute(llvm::Attribute::ReadOnly) 1982 .removeAttribute(llvm::Attribute::ReadNone); 1983 continue; 1984 } 1985 1986 if (const auto *RefTy = ParamType->getAs<ReferenceType>()) { 1987 QualType PTy = RefTy->getPointeeType(); 1988 if (!PTy->isIncompleteType() && PTy->isConstantSizeType()) 1989 Attrs.addDereferenceableAttr(getContext().getTypeSizeInChars(PTy) 1990 .getQuantity()); 1991 else if (getContext().getTargetAddressSpace(PTy) == 0) 1992 Attrs.addAttribute(llvm::Attribute::NonNull); 1993 } 1994 1995 switch (FI.getExtParameterInfo(ArgNo).getABI()) { 1996 case ParameterABI::Ordinary: 1997 break; 1998 1999 case ParameterABI::SwiftIndirectResult: { 2000 // Add 'sret' if we haven't already used it for something, but 2001 // only if the result is void. 2002 if (!hasUsedSRet && RetTy->isVoidType()) { 2003 Attrs.addAttribute(llvm::Attribute::StructRet); 2004 hasUsedSRet = true; 2005 } 2006 2007 // Add 'noalias' in either case. 2008 Attrs.addAttribute(llvm::Attribute::NoAlias); 2009 2010 // Add 'dereferenceable' and 'alignment'. 2011 auto PTy = ParamType->getPointeeType(); 2012 if (!PTy->isIncompleteType() && PTy->isConstantSizeType()) { 2013 auto info = getContext().getTypeInfoInChars(PTy); 2014 Attrs.addDereferenceableAttr(info.first.getQuantity()); 2015 Attrs.addAttribute(llvm::Attribute::getWithAlignment(getLLVMContext(), 2016 info.second.getQuantity())); 2017 } 2018 break; 2019 } 2020 2021 case ParameterABI::SwiftErrorResult: 2022 Attrs.addAttribute(llvm::Attribute::SwiftError); 2023 break; 2024 2025 case ParameterABI::SwiftContext: 2026 Attrs.addAttribute(llvm::Attribute::SwiftSelf); 2027 break; 2028 } 2029 2030 if (Attrs.hasAttributes()) { 2031 unsigned FirstIRArg, NumIRArgs; 2032 std::tie(FirstIRArg, NumIRArgs) = IRFunctionArgs.getIRArgs(ArgNo); 2033 for (unsigned i = 0; i < NumIRArgs; i++) 2034 PAL.push_back(llvm::AttributeSet::get(getLLVMContext(), 2035 FirstIRArg + i + 1, Attrs)); 2036 } 2037 } 2038 assert(ArgNo == FI.arg_size()); 2039 2040 if (FuncAttrs.hasAttributes()) 2041 PAL.push_back(llvm:: 2042 AttributeSet::get(getLLVMContext(), 2043 llvm::AttributeSet::FunctionIndex, 2044 FuncAttrs)); 2045 } 2046 2047 /// An argument came in as a promoted argument; demote it back to its 2048 /// declared type. 2049 static llvm::Value *emitArgumentDemotion(CodeGenFunction &CGF, 2050 const VarDecl *var, 2051 llvm::Value *value) { 2052 llvm::Type *varType = CGF.ConvertType(var->getType()); 2053 2054 // This can happen with promotions that actually don't change the 2055 // underlying type, like the enum promotions. 2056 if (value->getType() == varType) return value; 2057 2058 assert((varType->isIntegerTy() || varType->isFloatingPointTy()) 2059 && "unexpected promotion type"); 2060 2061 if (isa<llvm::IntegerType>(varType)) 2062 return CGF.Builder.CreateTrunc(value, varType, "arg.unpromote"); 2063 2064 return CGF.Builder.CreateFPCast(value, varType, "arg.unpromote"); 2065 } 2066 2067 /// Returns the attribute (either parameter attribute, or function 2068 /// attribute), which declares argument ArgNo to be non-null. 2069 static const NonNullAttr *getNonNullAttr(const Decl *FD, const ParmVarDecl *PVD, 2070 QualType ArgType, unsigned ArgNo) { 2071 // FIXME: __attribute__((nonnull)) can also be applied to: 2072 // - references to pointers, where the pointee is known to be 2073 // nonnull (apparently a Clang extension) 2074 // - transparent unions containing pointers 2075 // In the former case, LLVM IR cannot represent the constraint. In 2076 // the latter case, we have no guarantee that the transparent union 2077 // is in fact passed as a pointer. 2078 if (!ArgType->isAnyPointerType() && !ArgType->isBlockPointerType()) 2079 return nullptr; 2080 // First, check attribute on parameter itself. 2081 if (PVD) { 2082 if (auto ParmNNAttr = PVD->getAttr<NonNullAttr>()) 2083 return ParmNNAttr; 2084 } 2085 // Check function attributes. 2086 if (!FD) 2087 return nullptr; 2088 for (const auto *NNAttr : FD->specific_attrs<NonNullAttr>()) { 2089 if (NNAttr->isNonNull(ArgNo)) 2090 return NNAttr; 2091 } 2092 return nullptr; 2093 } 2094 2095 namespace { 2096 struct CopyBackSwiftError final : EHScopeStack::Cleanup { 2097 Address Temp; 2098 Address Arg; 2099 CopyBackSwiftError(Address temp, Address arg) : Temp(temp), Arg(arg) {} 2100 void Emit(CodeGenFunction &CGF, Flags flags) override { 2101 llvm::Value *errorValue = CGF.Builder.CreateLoad(Temp); 2102 CGF.Builder.CreateStore(errorValue, Arg); 2103 } 2104 }; 2105 } 2106 2107 void CodeGenFunction::EmitFunctionProlog(const CGFunctionInfo &FI, 2108 llvm::Function *Fn, 2109 const FunctionArgList &Args) { 2110 if (CurCodeDecl && CurCodeDecl->hasAttr<NakedAttr>()) 2111 // Naked functions don't have prologues. 2112 return; 2113 2114 // If this is an implicit-return-zero function, go ahead and 2115 // initialize the return value. TODO: it might be nice to have 2116 // a more general mechanism for this that didn't require synthesized 2117 // return statements. 2118 if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(CurCodeDecl)) { 2119 if (FD->hasImplicitReturnZero()) { 2120 QualType RetTy = FD->getReturnType().getUnqualifiedType(); 2121 llvm::Type* LLVMTy = CGM.getTypes().ConvertType(RetTy); 2122 llvm::Constant* Zero = llvm::Constant::getNullValue(LLVMTy); 2123 Builder.CreateStore(Zero, ReturnValue); 2124 } 2125 } 2126 2127 // FIXME: We no longer need the types from FunctionArgList; lift up and 2128 // simplify. 2129 2130 ClangToLLVMArgMapping IRFunctionArgs(CGM.getContext(), FI); 2131 // Flattened function arguments. 2132 SmallVector<llvm::Value *, 16> FnArgs; 2133 FnArgs.reserve(IRFunctionArgs.totalIRArgs()); 2134 for (auto &Arg : Fn->args()) { 2135 FnArgs.push_back(&Arg); 2136 } 2137 assert(FnArgs.size() == IRFunctionArgs.totalIRArgs()); 2138 2139 // If we're using inalloca, all the memory arguments are GEPs off of the last 2140 // parameter, which is a pointer to the complete memory area. 2141 Address ArgStruct = Address::invalid(); 2142 const llvm::StructLayout *ArgStructLayout = nullptr; 2143 if (IRFunctionArgs.hasInallocaArg()) { 2144 ArgStructLayout = CGM.getDataLayout().getStructLayout(FI.getArgStruct()); 2145 ArgStruct = Address(FnArgs[IRFunctionArgs.getInallocaArgNo()], 2146 FI.getArgStructAlignment()); 2147 2148 assert(ArgStruct.getType() == FI.getArgStruct()->getPointerTo()); 2149 } 2150 2151 // Name the struct return parameter. 2152 if (IRFunctionArgs.hasSRetArg()) { 2153 auto AI = cast<llvm::Argument>(FnArgs[IRFunctionArgs.getSRetArgNo()]); 2154 AI->setName("agg.result"); 2155 AI->addAttr(llvm::AttributeSet::get(getLLVMContext(), AI->getArgNo() + 1, 2156 llvm::Attribute::NoAlias)); 2157 } 2158 2159 // Track if we received the parameter as a pointer (indirect, byval, or 2160 // inalloca). If already have a pointer, EmitParmDecl doesn't need to copy it 2161 // into a local alloca for us. 2162 SmallVector<ParamValue, 16> ArgVals; 2163 ArgVals.reserve(Args.size()); 2164 2165 // Create a pointer value for every parameter declaration. This usually 2166 // entails copying one or more LLVM IR arguments into an alloca. Don't push 2167 // any cleanups or do anything that might unwind. We do that separately, so 2168 // we can push the cleanups in the correct order for the ABI. 2169 assert(FI.arg_size() == Args.size() && 2170 "Mismatch between function signature & arguments."); 2171 unsigned ArgNo = 0; 2172 CGFunctionInfo::const_arg_iterator info_it = FI.arg_begin(); 2173 for (FunctionArgList::const_iterator i = Args.begin(), e = Args.end(); 2174 i != e; ++i, ++info_it, ++ArgNo) { 2175 const VarDecl *Arg = *i; 2176 QualType Ty = info_it->type; 2177 const ABIArgInfo &ArgI = info_it->info; 2178 2179 bool isPromoted = 2180 isa<ParmVarDecl>(Arg) && cast<ParmVarDecl>(Arg)->isKNRPromoted(); 2181 2182 unsigned FirstIRArg, NumIRArgs; 2183 std::tie(FirstIRArg, NumIRArgs) = IRFunctionArgs.getIRArgs(ArgNo); 2184 2185 switch (ArgI.getKind()) { 2186 case ABIArgInfo::InAlloca: { 2187 assert(NumIRArgs == 0); 2188 auto FieldIndex = ArgI.getInAllocaFieldIndex(); 2189 CharUnits FieldOffset = 2190 CharUnits::fromQuantity(ArgStructLayout->getElementOffset(FieldIndex)); 2191 Address V = Builder.CreateStructGEP(ArgStruct, FieldIndex, FieldOffset, 2192 Arg->getName()); 2193 ArgVals.push_back(ParamValue::forIndirect(V)); 2194 break; 2195 } 2196 2197 case ABIArgInfo::Indirect: { 2198 assert(NumIRArgs == 1); 2199 Address ParamAddr = Address(FnArgs[FirstIRArg], ArgI.getIndirectAlign()); 2200 2201 if (!hasScalarEvaluationKind(Ty)) { 2202 // Aggregates and complex variables are accessed by reference. All we 2203 // need to do is realign the value, if requested. 2204 Address V = ParamAddr; 2205 if (ArgI.getIndirectRealign()) { 2206 Address AlignedTemp = CreateMemTemp(Ty, "coerce"); 2207 2208 // Copy from the incoming argument pointer to the temporary with the 2209 // appropriate alignment. 2210 // 2211 // FIXME: We should have a common utility for generating an aggregate 2212 // copy. 2213 CharUnits Size = getContext().getTypeSizeInChars(Ty); 2214 auto SizeVal = llvm::ConstantInt::get(IntPtrTy, Size.getQuantity()); 2215 Address Dst = Builder.CreateBitCast(AlignedTemp, Int8PtrTy); 2216 Address Src = Builder.CreateBitCast(ParamAddr, Int8PtrTy); 2217 Builder.CreateMemCpy(Dst, Src, SizeVal, false); 2218 V = AlignedTemp; 2219 } 2220 ArgVals.push_back(ParamValue::forIndirect(V)); 2221 } else { 2222 // Load scalar value from indirect argument. 2223 llvm::Value *V = 2224 EmitLoadOfScalar(ParamAddr, false, Ty, Arg->getLocStart()); 2225 2226 if (isPromoted) 2227 V = emitArgumentDemotion(*this, Arg, V); 2228 ArgVals.push_back(ParamValue::forDirect(V)); 2229 } 2230 break; 2231 } 2232 2233 case ABIArgInfo::Extend: 2234 case ABIArgInfo::Direct: { 2235 2236 // If we have the trivial case, handle it with no muss and fuss. 2237 if (!isa<llvm::StructType>(ArgI.getCoerceToType()) && 2238 ArgI.getCoerceToType() == ConvertType(Ty) && 2239 ArgI.getDirectOffset() == 0) { 2240 assert(NumIRArgs == 1); 2241 llvm::Value *V = FnArgs[FirstIRArg]; 2242 auto AI = cast<llvm::Argument>(V); 2243 2244 if (const ParmVarDecl *PVD = dyn_cast<ParmVarDecl>(Arg)) { 2245 if (getNonNullAttr(CurCodeDecl, PVD, PVD->getType(), 2246 PVD->getFunctionScopeIndex())) 2247 AI->addAttr(llvm::AttributeSet::get(getLLVMContext(), 2248 AI->getArgNo() + 1, 2249 llvm::Attribute::NonNull)); 2250 2251 QualType OTy = PVD->getOriginalType(); 2252 if (const auto *ArrTy = 2253 getContext().getAsConstantArrayType(OTy)) { 2254 // A C99 array parameter declaration with the static keyword also 2255 // indicates dereferenceability, and if the size is constant we can 2256 // use the dereferenceable attribute (which requires the size in 2257 // bytes). 2258 if (ArrTy->getSizeModifier() == ArrayType::Static) { 2259 QualType ETy = ArrTy->getElementType(); 2260 uint64_t ArrSize = ArrTy->getSize().getZExtValue(); 2261 if (!ETy->isIncompleteType() && ETy->isConstantSizeType() && 2262 ArrSize) { 2263 llvm::AttrBuilder Attrs; 2264 Attrs.addDereferenceableAttr( 2265 getContext().getTypeSizeInChars(ETy).getQuantity()*ArrSize); 2266 AI->addAttr(llvm::AttributeSet::get(getLLVMContext(), 2267 AI->getArgNo() + 1, Attrs)); 2268 } else if (getContext().getTargetAddressSpace(ETy) == 0) { 2269 AI->addAttr(llvm::AttributeSet::get(getLLVMContext(), 2270 AI->getArgNo() + 1, 2271 llvm::Attribute::NonNull)); 2272 } 2273 } 2274 } else if (const auto *ArrTy = 2275 getContext().getAsVariableArrayType(OTy)) { 2276 // For C99 VLAs with the static keyword, we don't know the size so 2277 // we can't use the dereferenceable attribute, but in addrspace(0) 2278 // we know that it must be nonnull. 2279 if (ArrTy->getSizeModifier() == VariableArrayType::Static && 2280 !getContext().getTargetAddressSpace(ArrTy->getElementType())) 2281 AI->addAttr(llvm::AttributeSet::get(getLLVMContext(), 2282 AI->getArgNo() + 1, 2283 llvm::Attribute::NonNull)); 2284 } 2285 2286 const auto *AVAttr = PVD->getAttr<AlignValueAttr>(); 2287 if (!AVAttr) 2288 if (const auto *TOTy = dyn_cast<TypedefType>(OTy)) 2289 AVAttr = TOTy->getDecl()->getAttr<AlignValueAttr>(); 2290 if (AVAttr) { 2291 llvm::Value *AlignmentValue = 2292 EmitScalarExpr(AVAttr->getAlignment()); 2293 llvm::ConstantInt *AlignmentCI = 2294 cast<llvm::ConstantInt>(AlignmentValue); 2295 unsigned Alignment = 2296 std::min((unsigned) AlignmentCI->getZExtValue(), 2297 +llvm::Value::MaximumAlignment); 2298 2299 llvm::AttrBuilder Attrs; 2300 Attrs.addAlignmentAttr(Alignment); 2301 AI->addAttr(llvm::AttributeSet::get(getLLVMContext(), 2302 AI->getArgNo() + 1, Attrs)); 2303 } 2304 } 2305 2306 if (Arg->getType().isRestrictQualified()) 2307 AI->addAttr(llvm::AttributeSet::get(getLLVMContext(), 2308 AI->getArgNo() + 1, 2309 llvm::Attribute::NoAlias)); 2310 2311 // LLVM expects swifterror parameters to be used in very restricted 2312 // ways. Copy the value into a less-restricted temporary. 2313 if (FI.getExtParameterInfo(ArgNo).getABI() 2314 == ParameterABI::SwiftErrorResult) { 2315 QualType pointeeTy = Ty->getPointeeType(); 2316 assert(pointeeTy->isPointerType()); 2317 Address temp = 2318 CreateMemTemp(pointeeTy, getPointerAlign(), "swifterror.temp"); 2319 Address arg = Address(V, getContext().getTypeAlignInChars(pointeeTy)); 2320 llvm::Value *incomingErrorValue = Builder.CreateLoad(arg); 2321 Builder.CreateStore(incomingErrorValue, temp); 2322 V = temp.getPointer(); 2323 2324 // Push a cleanup to copy the value back at the end of the function. 2325 // The convention does not guarantee that the value will be written 2326 // back if the function exits with an unwind exception. 2327 EHStack.pushCleanup<CopyBackSwiftError>(NormalCleanup, temp, arg); 2328 } 2329 2330 // Ensure the argument is the correct type. 2331 if (V->getType() != ArgI.getCoerceToType()) 2332 V = Builder.CreateBitCast(V, ArgI.getCoerceToType()); 2333 2334 if (isPromoted) 2335 V = emitArgumentDemotion(*this, Arg, V); 2336 2337 // Because of merging of function types from multiple decls it is 2338 // possible for the type of an argument to not match the corresponding 2339 // type in the function type. Since we are codegening the callee 2340 // in here, add a cast to the argument type. 2341 llvm::Type *LTy = ConvertType(Arg->getType()); 2342 if (V->getType() != LTy) 2343 V = Builder.CreateBitCast(V, LTy); 2344 2345 ArgVals.push_back(ParamValue::forDirect(V)); 2346 break; 2347 } 2348 2349 Address Alloca = CreateMemTemp(Ty, getContext().getDeclAlign(Arg), 2350 Arg->getName()); 2351 2352 // Pointer to store into. 2353 Address Ptr = emitAddressAtOffset(*this, Alloca, ArgI); 2354 2355 // Fast-isel and the optimizer generally like scalar values better than 2356 // FCAs, so we flatten them if this is safe to do for this argument. 2357 llvm::StructType *STy = dyn_cast<llvm::StructType>(ArgI.getCoerceToType()); 2358 if (ArgI.isDirect() && ArgI.getCanBeFlattened() && STy && 2359 STy->getNumElements() > 1) { 2360 auto SrcLayout = CGM.getDataLayout().getStructLayout(STy); 2361 uint64_t SrcSize = CGM.getDataLayout().getTypeAllocSize(STy); 2362 llvm::Type *DstTy = Ptr.getElementType(); 2363 uint64_t DstSize = CGM.getDataLayout().getTypeAllocSize(DstTy); 2364 2365 Address AddrToStoreInto = Address::invalid(); 2366 if (SrcSize <= DstSize) { 2367 AddrToStoreInto = 2368 Builder.CreateBitCast(Ptr, llvm::PointerType::getUnqual(STy)); 2369 } else { 2370 AddrToStoreInto = 2371 CreateTempAlloca(STy, Alloca.getAlignment(), "coerce"); 2372 } 2373 2374 assert(STy->getNumElements() == NumIRArgs); 2375 for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) { 2376 auto AI = FnArgs[FirstIRArg + i]; 2377 AI->setName(Arg->getName() + ".coerce" + Twine(i)); 2378 auto Offset = CharUnits::fromQuantity(SrcLayout->getElementOffset(i)); 2379 Address EltPtr = 2380 Builder.CreateStructGEP(AddrToStoreInto, i, Offset); 2381 Builder.CreateStore(AI, EltPtr); 2382 } 2383 2384 if (SrcSize > DstSize) { 2385 Builder.CreateMemCpy(Ptr, AddrToStoreInto, DstSize); 2386 } 2387 2388 } else { 2389 // Simple case, just do a coerced store of the argument into the alloca. 2390 assert(NumIRArgs == 1); 2391 auto AI = FnArgs[FirstIRArg]; 2392 AI->setName(Arg->getName() + ".coerce"); 2393 CreateCoercedStore(AI, Ptr, /*DestIsVolatile=*/false, *this); 2394 } 2395 2396 // Match to what EmitParmDecl is expecting for this type. 2397 if (CodeGenFunction::hasScalarEvaluationKind(Ty)) { 2398 llvm::Value *V = 2399 EmitLoadOfScalar(Alloca, false, Ty, Arg->getLocStart()); 2400 if (isPromoted) 2401 V = emitArgumentDemotion(*this, Arg, V); 2402 ArgVals.push_back(ParamValue::forDirect(V)); 2403 } else { 2404 ArgVals.push_back(ParamValue::forIndirect(Alloca)); 2405 } 2406 break; 2407 } 2408 2409 case ABIArgInfo::CoerceAndExpand: { 2410 // Reconstruct into a temporary. 2411 Address alloca = CreateMemTemp(Ty, getContext().getDeclAlign(Arg)); 2412 ArgVals.push_back(ParamValue::forIndirect(alloca)); 2413 2414 auto coercionType = ArgI.getCoerceAndExpandType(); 2415 alloca = Builder.CreateElementBitCast(alloca, coercionType); 2416 auto layout = CGM.getDataLayout().getStructLayout(coercionType); 2417 2418 unsigned argIndex = FirstIRArg; 2419 for (unsigned i = 0, e = coercionType->getNumElements(); i != e; ++i) { 2420 llvm::Type *eltType = coercionType->getElementType(i); 2421 if (ABIArgInfo::isPaddingForCoerceAndExpand(eltType)) 2422 continue; 2423 2424 auto eltAddr = Builder.CreateStructGEP(alloca, i, layout); 2425 auto elt = FnArgs[argIndex++]; 2426 Builder.CreateStore(elt, eltAddr); 2427 } 2428 assert(argIndex == FirstIRArg + NumIRArgs); 2429 break; 2430 } 2431 2432 case ABIArgInfo::Expand: { 2433 // If this structure was expanded into multiple arguments then 2434 // we need to create a temporary and reconstruct it from the 2435 // arguments. 2436 Address Alloca = CreateMemTemp(Ty, getContext().getDeclAlign(Arg)); 2437 LValue LV = MakeAddrLValue(Alloca, Ty); 2438 ArgVals.push_back(ParamValue::forIndirect(Alloca)); 2439 2440 auto FnArgIter = FnArgs.begin() + FirstIRArg; 2441 ExpandTypeFromArgs(Ty, LV, FnArgIter); 2442 assert(FnArgIter == FnArgs.begin() + FirstIRArg + NumIRArgs); 2443 for (unsigned i = 0, e = NumIRArgs; i != e; ++i) { 2444 auto AI = FnArgs[FirstIRArg + i]; 2445 AI->setName(Arg->getName() + "." + Twine(i)); 2446 } 2447 break; 2448 } 2449 2450 case ABIArgInfo::Ignore: 2451 assert(NumIRArgs == 0); 2452 // Initialize the local variable appropriately. 2453 if (!hasScalarEvaluationKind(Ty)) { 2454 ArgVals.push_back(ParamValue::forIndirect(CreateMemTemp(Ty))); 2455 } else { 2456 llvm::Value *U = llvm::UndefValue::get(ConvertType(Arg->getType())); 2457 ArgVals.push_back(ParamValue::forDirect(U)); 2458 } 2459 break; 2460 } 2461 } 2462 2463 if (getTarget().getCXXABI().areArgsDestroyedLeftToRightInCallee()) { 2464 for (int I = Args.size() - 1; I >= 0; --I) 2465 EmitParmDecl(*Args[I], ArgVals[I], I + 1); 2466 } else { 2467 for (unsigned I = 0, E = Args.size(); I != E; ++I) 2468 EmitParmDecl(*Args[I], ArgVals[I], I + 1); 2469 } 2470 } 2471 2472 static void eraseUnusedBitCasts(llvm::Instruction *insn) { 2473 while (insn->use_empty()) { 2474 llvm::BitCastInst *bitcast = dyn_cast<llvm::BitCastInst>(insn); 2475 if (!bitcast) return; 2476 2477 // This is "safe" because we would have used a ConstantExpr otherwise. 2478 insn = cast<llvm::Instruction>(bitcast->getOperand(0)); 2479 bitcast->eraseFromParent(); 2480 } 2481 } 2482 2483 /// Try to emit a fused autorelease of a return result. 2484 static llvm::Value *tryEmitFusedAutoreleaseOfResult(CodeGenFunction &CGF, 2485 llvm::Value *result) { 2486 // We must be immediately followed the cast. 2487 llvm::BasicBlock *BB = CGF.Builder.GetInsertBlock(); 2488 if (BB->empty()) return nullptr; 2489 if (&BB->back() != result) return nullptr; 2490 2491 llvm::Type *resultType = result->getType(); 2492 2493 // result is in a BasicBlock and is therefore an Instruction. 2494 llvm::Instruction *generator = cast<llvm::Instruction>(result); 2495 2496 SmallVector<llvm::Instruction *, 4> InstsToKill; 2497 2498 // Look for: 2499 // %generator = bitcast %type1* %generator2 to %type2* 2500 while (llvm::BitCastInst *bitcast = dyn_cast<llvm::BitCastInst>(generator)) { 2501 // We would have emitted this as a constant if the operand weren't 2502 // an Instruction. 2503 generator = cast<llvm::Instruction>(bitcast->getOperand(0)); 2504 2505 // Require the generator to be immediately followed by the cast. 2506 if (generator->getNextNode() != bitcast) 2507 return nullptr; 2508 2509 InstsToKill.push_back(bitcast); 2510 } 2511 2512 // Look for: 2513 // %generator = call i8* @objc_retain(i8* %originalResult) 2514 // or 2515 // %generator = call i8* @objc_retainAutoreleasedReturnValue(i8* %originalResult) 2516 llvm::CallInst *call = dyn_cast<llvm::CallInst>(generator); 2517 if (!call) return nullptr; 2518 2519 bool doRetainAutorelease; 2520 2521 if (call->getCalledValue() == CGF.CGM.getObjCEntrypoints().objc_retain) { 2522 doRetainAutorelease = true; 2523 } else if (call->getCalledValue() == CGF.CGM.getObjCEntrypoints() 2524 .objc_retainAutoreleasedReturnValue) { 2525 doRetainAutorelease = false; 2526 2527 // If we emitted an assembly marker for this call (and the 2528 // ARCEntrypoints field should have been set if so), go looking 2529 // for that call. If we can't find it, we can't do this 2530 // optimization. But it should always be the immediately previous 2531 // instruction, unless we needed bitcasts around the call. 2532 if (CGF.CGM.getObjCEntrypoints().retainAutoreleasedReturnValueMarker) { 2533 llvm::Instruction *prev = call->getPrevNode(); 2534 assert(prev); 2535 if (isa<llvm::BitCastInst>(prev)) { 2536 prev = prev->getPrevNode(); 2537 assert(prev); 2538 } 2539 assert(isa<llvm::CallInst>(prev)); 2540 assert(cast<llvm::CallInst>(prev)->getCalledValue() == 2541 CGF.CGM.getObjCEntrypoints().retainAutoreleasedReturnValueMarker); 2542 InstsToKill.push_back(prev); 2543 } 2544 } else { 2545 return nullptr; 2546 } 2547 2548 result = call->getArgOperand(0); 2549 InstsToKill.push_back(call); 2550 2551 // Keep killing bitcasts, for sanity. Note that we no longer care 2552 // about precise ordering as long as there's exactly one use. 2553 while (llvm::BitCastInst *bitcast = dyn_cast<llvm::BitCastInst>(result)) { 2554 if (!bitcast->hasOneUse()) break; 2555 InstsToKill.push_back(bitcast); 2556 result = bitcast->getOperand(0); 2557 } 2558 2559 // Delete all the unnecessary instructions, from latest to earliest. 2560 for (auto *I : InstsToKill) 2561 I->eraseFromParent(); 2562 2563 // Do the fused retain/autorelease if we were asked to. 2564 if (doRetainAutorelease) 2565 result = CGF.EmitARCRetainAutoreleaseReturnValue(result); 2566 2567 // Cast back to the result type. 2568 return CGF.Builder.CreateBitCast(result, resultType); 2569 } 2570 2571 /// If this is a +1 of the value of an immutable 'self', remove it. 2572 static llvm::Value *tryRemoveRetainOfSelf(CodeGenFunction &CGF, 2573 llvm::Value *result) { 2574 // This is only applicable to a method with an immutable 'self'. 2575 const ObjCMethodDecl *method = 2576 dyn_cast_or_null<ObjCMethodDecl>(CGF.CurCodeDecl); 2577 if (!method) return nullptr; 2578 const VarDecl *self = method->getSelfDecl(); 2579 if (!self->getType().isConstQualified()) return nullptr; 2580 2581 // Look for a retain call. 2582 llvm::CallInst *retainCall = 2583 dyn_cast<llvm::CallInst>(result->stripPointerCasts()); 2584 if (!retainCall || 2585 retainCall->getCalledValue() != CGF.CGM.getObjCEntrypoints().objc_retain) 2586 return nullptr; 2587 2588 // Look for an ordinary load of 'self'. 2589 llvm::Value *retainedValue = retainCall->getArgOperand(0); 2590 llvm::LoadInst *load = 2591 dyn_cast<llvm::LoadInst>(retainedValue->stripPointerCasts()); 2592 if (!load || load->isAtomic() || load->isVolatile() || 2593 load->getPointerOperand() != CGF.GetAddrOfLocalVar(self).getPointer()) 2594 return nullptr; 2595 2596 // Okay! Burn it all down. This relies for correctness on the 2597 // assumption that the retain is emitted as part of the return and 2598 // that thereafter everything is used "linearly". 2599 llvm::Type *resultType = result->getType(); 2600 eraseUnusedBitCasts(cast<llvm::Instruction>(result)); 2601 assert(retainCall->use_empty()); 2602 retainCall->eraseFromParent(); 2603 eraseUnusedBitCasts(cast<llvm::Instruction>(retainedValue)); 2604 2605 return CGF.Builder.CreateBitCast(load, resultType); 2606 } 2607 2608 /// Emit an ARC autorelease of the result of a function. 2609 /// 2610 /// \return the value to actually return from the function 2611 static llvm::Value *emitAutoreleaseOfResult(CodeGenFunction &CGF, 2612 llvm::Value *result) { 2613 // If we're returning 'self', kill the initial retain. This is a 2614 // heuristic attempt to "encourage correctness" in the really unfortunate 2615 // case where we have a return of self during a dealloc and we desperately 2616 // need to avoid the possible autorelease. 2617 if (llvm::Value *self = tryRemoveRetainOfSelf(CGF, result)) 2618 return self; 2619 2620 // At -O0, try to emit a fused retain/autorelease. 2621 if (CGF.shouldUseFusedARCCalls()) 2622 if (llvm::Value *fused = tryEmitFusedAutoreleaseOfResult(CGF, result)) 2623 return fused; 2624 2625 return CGF.EmitARCAutoreleaseReturnValue(result); 2626 } 2627 2628 /// Heuristically search for a dominating store to the return-value slot. 2629 static llvm::StoreInst *findDominatingStoreToReturnValue(CodeGenFunction &CGF) { 2630 // Check if a User is a store which pointerOperand is the ReturnValue. 2631 // We are looking for stores to the ReturnValue, not for stores of the 2632 // ReturnValue to some other location. 2633 auto GetStoreIfValid = [&CGF](llvm::User *U) -> llvm::StoreInst * { 2634 auto *SI = dyn_cast<llvm::StoreInst>(U); 2635 if (!SI || SI->getPointerOperand() != CGF.ReturnValue.getPointer()) 2636 return nullptr; 2637 // These aren't actually possible for non-coerced returns, and we 2638 // only care about non-coerced returns on this code path. 2639 assert(!SI->isAtomic() && !SI->isVolatile()); 2640 return SI; 2641 }; 2642 // If there are multiple uses of the return-value slot, just check 2643 // for something immediately preceding the IP. Sometimes this can 2644 // happen with how we generate implicit-returns; it can also happen 2645 // with noreturn cleanups. 2646 if (!CGF.ReturnValue.getPointer()->hasOneUse()) { 2647 llvm::BasicBlock *IP = CGF.Builder.GetInsertBlock(); 2648 if (IP->empty()) return nullptr; 2649 llvm::Instruction *I = &IP->back(); 2650 2651 // Skip lifetime markers 2652 for (llvm::BasicBlock::reverse_iterator II = IP->rbegin(), 2653 IE = IP->rend(); 2654 II != IE; ++II) { 2655 if (llvm::IntrinsicInst *Intrinsic = 2656 dyn_cast<llvm::IntrinsicInst>(&*II)) { 2657 if (Intrinsic->getIntrinsicID() == llvm::Intrinsic::lifetime_end) { 2658 const llvm::Value *CastAddr = Intrinsic->getArgOperand(1); 2659 ++II; 2660 if (II == IE) 2661 break; 2662 if (isa<llvm::BitCastInst>(&*II) && (CastAddr == &*II)) 2663 continue; 2664 } 2665 } 2666 I = &*II; 2667 break; 2668 } 2669 2670 return GetStoreIfValid(I); 2671 } 2672 2673 llvm::StoreInst *store = 2674 GetStoreIfValid(CGF.ReturnValue.getPointer()->user_back()); 2675 if (!store) return nullptr; 2676 2677 // Now do a first-and-dirty dominance check: just walk up the 2678 // single-predecessors chain from the current insertion point. 2679 llvm::BasicBlock *StoreBB = store->getParent(); 2680 llvm::BasicBlock *IP = CGF.Builder.GetInsertBlock(); 2681 while (IP != StoreBB) { 2682 if (!(IP = IP->getSinglePredecessor())) 2683 return nullptr; 2684 } 2685 2686 // Okay, the store's basic block dominates the insertion point; we 2687 // can do our thing. 2688 return store; 2689 } 2690 2691 void CodeGenFunction::EmitFunctionEpilog(const CGFunctionInfo &FI, 2692 bool EmitRetDbgLoc, 2693 SourceLocation EndLoc) { 2694 if (CurCodeDecl && CurCodeDecl->hasAttr<NakedAttr>()) { 2695 // Naked functions don't have epilogues. 2696 Builder.CreateUnreachable(); 2697 return; 2698 } 2699 2700 // Functions with no result always return void. 2701 if (!ReturnValue.isValid()) { 2702 Builder.CreateRetVoid(); 2703 return; 2704 } 2705 2706 llvm::DebugLoc RetDbgLoc; 2707 llvm::Value *RV = nullptr; 2708 QualType RetTy = FI.getReturnType(); 2709 const ABIArgInfo &RetAI = FI.getReturnInfo(); 2710 2711 switch (RetAI.getKind()) { 2712 case ABIArgInfo::InAlloca: 2713 // Aggregrates get evaluated directly into the destination. Sometimes we 2714 // need to return the sret value in a register, though. 2715 assert(hasAggregateEvaluationKind(RetTy)); 2716 if (RetAI.getInAllocaSRet()) { 2717 llvm::Function::arg_iterator EI = CurFn->arg_end(); 2718 --EI; 2719 llvm::Value *ArgStruct = &*EI; 2720 llvm::Value *SRet = Builder.CreateStructGEP( 2721 nullptr, ArgStruct, RetAI.getInAllocaFieldIndex()); 2722 RV = Builder.CreateAlignedLoad(SRet, getPointerAlign(), "sret"); 2723 } 2724 break; 2725 2726 case ABIArgInfo::Indirect: { 2727 auto AI = CurFn->arg_begin(); 2728 if (RetAI.isSRetAfterThis()) 2729 ++AI; 2730 switch (getEvaluationKind(RetTy)) { 2731 case TEK_Complex: { 2732 ComplexPairTy RT = 2733 EmitLoadOfComplex(MakeAddrLValue(ReturnValue, RetTy), EndLoc); 2734 EmitStoreOfComplex(RT, MakeNaturalAlignAddrLValue(&*AI, RetTy), 2735 /*isInit*/ true); 2736 break; 2737 } 2738 case TEK_Aggregate: 2739 // Do nothing; aggregrates get evaluated directly into the destination. 2740 break; 2741 case TEK_Scalar: 2742 EmitStoreOfScalar(Builder.CreateLoad(ReturnValue), 2743 MakeNaturalAlignAddrLValue(&*AI, RetTy), 2744 /*isInit*/ true); 2745 break; 2746 } 2747 break; 2748 } 2749 2750 case ABIArgInfo::Extend: 2751 case ABIArgInfo::Direct: 2752 if (RetAI.getCoerceToType() == ConvertType(RetTy) && 2753 RetAI.getDirectOffset() == 0) { 2754 // The internal return value temp always will have pointer-to-return-type 2755 // type, just do a load. 2756 2757 // If there is a dominating store to ReturnValue, we can elide 2758 // the load, zap the store, and usually zap the alloca. 2759 if (llvm::StoreInst *SI = 2760 findDominatingStoreToReturnValue(*this)) { 2761 // Reuse the debug location from the store unless there is 2762 // cleanup code to be emitted between the store and return 2763 // instruction. 2764 if (EmitRetDbgLoc && !AutoreleaseResult) 2765 RetDbgLoc = SI->getDebugLoc(); 2766 // Get the stored value and nuke the now-dead store. 2767 RV = SI->getValueOperand(); 2768 SI->eraseFromParent(); 2769 2770 // If that was the only use of the return value, nuke it as well now. 2771 auto returnValueInst = ReturnValue.getPointer(); 2772 if (returnValueInst->use_empty()) { 2773 if (auto alloca = dyn_cast<llvm::AllocaInst>(returnValueInst)) { 2774 alloca->eraseFromParent(); 2775 ReturnValue = Address::invalid(); 2776 } 2777 } 2778 2779 // Otherwise, we have to do a simple load. 2780 } else { 2781 RV = Builder.CreateLoad(ReturnValue); 2782 } 2783 } else { 2784 // If the value is offset in memory, apply the offset now. 2785 Address V = emitAddressAtOffset(*this, ReturnValue, RetAI); 2786 2787 RV = CreateCoercedLoad(V, RetAI.getCoerceToType(), *this); 2788 } 2789 2790 // In ARC, end functions that return a retainable type with a call 2791 // to objc_autoreleaseReturnValue. 2792 if (AutoreleaseResult) { 2793 #ifndef NDEBUG 2794 // Type::isObjCRetainabletype has to be called on a QualType that hasn't 2795 // been stripped of the typedefs, so we cannot use RetTy here. Get the 2796 // original return type of FunctionDecl, CurCodeDecl, and BlockDecl from 2797 // CurCodeDecl or BlockInfo. 2798 QualType RT; 2799 2800 if (auto *FD = dyn_cast<FunctionDecl>(CurCodeDecl)) 2801 RT = FD->getReturnType(); 2802 else if (auto *MD = dyn_cast<ObjCMethodDecl>(CurCodeDecl)) 2803 RT = MD->getReturnType(); 2804 else if (isa<BlockDecl>(CurCodeDecl)) 2805 RT = BlockInfo->BlockExpression->getFunctionType()->getReturnType(); 2806 else 2807 llvm_unreachable("Unexpected function/method type"); 2808 2809 assert(getLangOpts().ObjCAutoRefCount && 2810 !FI.isReturnsRetained() && 2811 RT->isObjCRetainableType()); 2812 #endif 2813 RV = emitAutoreleaseOfResult(*this, RV); 2814 } 2815 2816 break; 2817 2818 case ABIArgInfo::Ignore: 2819 break; 2820 2821 case ABIArgInfo::CoerceAndExpand: { 2822 auto coercionType = RetAI.getCoerceAndExpandType(); 2823 auto layout = CGM.getDataLayout().getStructLayout(coercionType); 2824 2825 // Load all of the coerced elements out into results. 2826 llvm::SmallVector<llvm::Value*, 4> results; 2827 Address addr = Builder.CreateElementBitCast(ReturnValue, coercionType); 2828 for (unsigned i = 0, e = coercionType->getNumElements(); i != e; ++i) { 2829 auto coercedEltType = coercionType->getElementType(i); 2830 if (ABIArgInfo::isPaddingForCoerceAndExpand(coercedEltType)) 2831 continue; 2832 2833 auto eltAddr = Builder.CreateStructGEP(addr, i, layout); 2834 auto elt = Builder.CreateLoad(eltAddr); 2835 results.push_back(elt); 2836 } 2837 2838 // If we have one result, it's the single direct result type. 2839 if (results.size() == 1) { 2840 RV = results[0]; 2841 2842 // Otherwise, we need to make a first-class aggregate. 2843 } else { 2844 // Construct a return type that lacks padding elements. 2845 llvm::Type *returnType = RetAI.getUnpaddedCoerceAndExpandType(); 2846 2847 RV = llvm::UndefValue::get(returnType); 2848 for (unsigned i = 0, e = results.size(); i != e; ++i) { 2849 RV = Builder.CreateInsertValue(RV, results[i], i); 2850 } 2851 } 2852 break; 2853 } 2854 2855 case ABIArgInfo::Expand: 2856 llvm_unreachable("Invalid ABI kind for return argument"); 2857 } 2858 2859 llvm::Instruction *Ret; 2860 if (RV) { 2861 if (CurCodeDecl && SanOpts.has(SanitizerKind::ReturnsNonnullAttribute)) { 2862 if (auto RetNNAttr = CurCodeDecl->getAttr<ReturnsNonNullAttr>()) { 2863 SanitizerScope SanScope(this); 2864 llvm::Value *Cond = Builder.CreateICmpNE( 2865 RV, llvm::Constant::getNullValue(RV->getType())); 2866 llvm::Constant *StaticData[] = { 2867 EmitCheckSourceLocation(EndLoc), 2868 EmitCheckSourceLocation(RetNNAttr->getLocation()), 2869 }; 2870 EmitCheck(std::make_pair(Cond, SanitizerKind::ReturnsNonnullAttribute), 2871 SanitizerHandler::NonnullReturn, StaticData, None); 2872 } 2873 } 2874 Ret = Builder.CreateRet(RV); 2875 } else { 2876 Ret = Builder.CreateRetVoid(); 2877 } 2878 2879 if (RetDbgLoc) 2880 Ret->setDebugLoc(std::move(RetDbgLoc)); 2881 } 2882 2883 static bool isInAllocaArgument(CGCXXABI &ABI, QualType type) { 2884 const CXXRecordDecl *RD = type->getAsCXXRecordDecl(); 2885 return RD && ABI.getRecordArgABI(RD) == CGCXXABI::RAA_DirectInMemory; 2886 } 2887 2888 static AggValueSlot createPlaceholderSlot(CodeGenFunction &CGF, 2889 QualType Ty) { 2890 // FIXME: Generate IR in one pass, rather than going back and fixing up these 2891 // placeholders. 2892 llvm::Type *IRTy = CGF.ConvertTypeForMem(Ty); 2893 llvm::Type *IRPtrTy = IRTy->getPointerTo(); 2894 llvm::Value *Placeholder = llvm::UndefValue::get(IRPtrTy->getPointerTo()); 2895 2896 // FIXME: When we generate this IR in one pass, we shouldn't need 2897 // this win32-specific alignment hack. 2898 CharUnits Align = CharUnits::fromQuantity(4); 2899 Placeholder = CGF.Builder.CreateAlignedLoad(IRPtrTy, Placeholder, Align); 2900 2901 return AggValueSlot::forAddr(Address(Placeholder, Align), 2902 Ty.getQualifiers(), 2903 AggValueSlot::IsNotDestructed, 2904 AggValueSlot::DoesNotNeedGCBarriers, 2905 AggValueSlot::IsNotAliased); 2906 } 2907 2908 void CodeGenFunction::EmitDelegateCallArg(CallArgList &args, 2909 const VarDecl *param, 2910 SourceLocation loc) { 2911 // StartFunction converted the ABI-lowered parameter(s) into a 2912 // local alloca. We need to turn that into an r-value suitable 2913 // for EmitCall. 2914 Address local = GetAddrOfLocalVar(param); 2915 2916 QualType type = param->getType(); 2917 2918 assert(!isInAllocaArgument(CGM.getCXXABI(), type) && 2919 "cannot emit delegate call arguments for inalloca arguments!"); 2920 2921 // GetAddrOfLocalVar returns a pointer-to-pointer for references, 2922 // but the argument needs to be the original pointer. 2923 if (type->isReferenceType()) { 2924 args.add(RValue::get(Builder.CreateLoad(local)), type); 2925 2926 // In ARC, move out of consumed arguments so that the release cleanup 2927 // entered by StartFunction doesn't cause an over-release. This isn't 2928 // optimal -O0 code generation, but it should get cleaned up when 2929 // optimization is enabled. This also assumes that delegate calls are 2930 // performed exactly once for a set of arguments, but that should be safe. 2931 } else if (getLangOpts().ObjCAutoRefCount && 2932 param->hasAttr<NSConsumedAttr>() && 2933 type->isObjCRetainableType()) { 2934 llvm::Value *ptr = Builder.CreateLoad(local); 2935 auto null = 2936 llvm::ConstantPointerNull::get(cast<llvm::PointerType>(ptr->getType())); 2937 Builder.CreateStore(null, local); 2938 args.add(RValue::get(ptr), type); 2939 2940 // For the most part, we just need to load the alloca, except that 2941 // aggregate r-values are actually pointers to temporaries. 2942 } else { 2943 args.add(convertTempToRValue(local, type, loc), type); 2944 } 2945 } 2946 2947 static bool isProvablyNull(llvm::Value *addr) { 2948 return isa<llvm::ConstantPointerNull>(addr); 2949 } 2950 2951 /// Emit the actual writing-back of a writeback. 2952 static void emitWriteback(CodeGenFunction &CGF, 2953 const CallArgList::Writeback &writeback) { 2954 const LValue &srcLV = writeback.Source; 2955 Address srcAddr = srcLV.getAddress(); 2956 assert(!isProvablyNull(srcAddr.getPointer()) && 2957 "shouldn't have writeback for provably null argument"); 2958 2959 llvm::BasicBlock *contBB = nullptr; 2960 2961 // If the argument wasn't provably non-null, we need to null check 2962 // before doing the store. 2963 bool provablyNonNull = llvm::isKnownNonNull(srcAddr.getPointer()); 2964 if (!provablyNonNull) { 2965 llvm::BasicBlock *writebackBB = CGF.createBasicBlock("icr.writeback"); 2966 contBB = CGF.createBasicBlock("icr.done"); 2967 2968 llvm::Value *isNull = 2969 CGF.Builder.CreateIsNull(srcAddr.getPointer(), "icr.isnull"); 2970 CGF.Builder.CreateCondBr(isNull, contBB, writebackBB); 2971 CGF.EmitBlock(writebackBB); 2972 } 2973 2974 // Load the value to writeback. 2975 llvm::Value *value = CGF.Builder.CreateLoad(writeback.Temporary); 2976 2977 // Cast it back, in case we're writing an id to a Foo* or something. 2978 value = CGF.Builder.CreateBitCast(value, srcAddr.getElementType(), 2979 "icr.writeback-cast"); 2980 2981 // Perform the writeback. 2982 2983 // If we have a "to use" value, it's something we need to emit a use 2984 // of. This has to be carefully threaded in: if it's done after the 2985 // release it's potentially undefined behavior (and the optimizer 2986 // will ignore it), and if it happens before the retain then the 2987 // optimizer could move the release there. 2988 if (writeback.ToUse) { 2989 assert(srcLV.getObjCLifetime() == Qualifiers::OCL_Strong); 2990 2991 // Retain the new value. No need to block-copy here: the block's 2992 // being passed up the stack. 2993 value = CGF.EmitARCRetainNonBlock(value); 2994 2995 // Emit the intrinsic use here. 2996 CGF.EmitARCIntrinsicUse(writeback.ToUse); 2997 2998 // Load the old value (primitively). 2999 llvm::Value *oldValue = CGF.EmitLoadOfScalar(srcLV, SourceLocation()); 3000 3001 // Put the new value in place (primitively). 3002 CGF.EmitStoreOfScalar(value, srcLV, /*init*/ false); 3003 3004 // Release the old value. 3005 CGF.EmitARCRelease(oldValue, srcLV.isARCPreciseLifetime()); 3006 3007 // Otherwise, we can just do a normal lvalue store. 3008 } else { 3009 CGF.EmitStoreThroughLValue(RValue::get(value), srcLV); 3010 } 3011 3012 // Jump to the continuation block. 3013 if (!provablyNonNull) 3014 CGF.EmitBlock(contBB); 3015 } 3016 3017 static void emitWritebacks(CodeGenFunction &CGF, 3018 const CallArgList &args) { 3019 for (const auto &I : args.writebacks()) 3020 emitWriteback(CGF, I); 3021 } 3022 3023 static void deactivateArgCleanupsBeforeCall(CodeGenFunction &CGF, 3024 const CallArgList &CallArgs) { 3025 assert(CGF.getTarget().getCXXABI().areArgsDestroyedLeftToRightInCallee()); 3026 ArrayRef<CallArgList::CallArgCleanup> Cleanups = 3027 CallArgs.getCleanupsToDeactivate(); 3028 // Iterate in reverse to increase the likelihood of popping the cleanup. 3029 for (const auto &I : llvm::reverse(Cleanups)) { 3030 CGF.DeactivateCleanupBlock(I.Cleanup, I.IsActiveIP); 3031 I.IsActiveIP->eraseFromParent(); 3032 } 3033 } 3034 3035 static const Expr *maybeGetUnaryAddrOfOperand(const Expr *E) { 3036 if (const UnaryOperator *uop = dyn_cast<UnaryOperator>(E->IgnoreParens())) 3037 if (uop->getOpcode() == UO_AddrOf) 3038 return uop->getSubExpr(); 3039 return nullptr; 3040 } 3041 3042 /// Emit an argument that's being passed call-by-writeback. That is, 3043 /// we are passing the address of an __autoreleased temporary; it 3044 /// might be copy-initialized with the current value of the given 3045 /// address, but it will definitely be copied out of after the call. 3046 static void emitWritebackArg(CodeGenFunction &CGF, CallArgList &args, 3047 const ObjCIndirectCopyRestoreExpr *CRE) { 3048 LValue srcLV; 3049 3050 // Make an optimistic effort to emit the address as an l-value. 3051 // This can fail if the argument expression is more complicated. 3052 if (const Expr *lvExpr = maybeGetUnaryAddrOfOperand(CRE->getSubExpr())) { 3053 srcLV = CGF.EmitLValue(lvExpr); 3054 3055 // Otherwise, just emit it as a scalar. 3056 } else { 3057 Address srcAddr = CGF.EmitPointerWithAlignment(CRE->getSubExpr()); 3058 3059 QualType srcAddrType = 3060 CRE->getSubExpr()->getType()->castAs<PointerType>()->getPointeeType(); 3061 srcLV = CGF.MakeAddrLValue(srcAddr, srcAddrType); 3062 } 3063 Address srcAddr = srcLV.getAddress(); 3064 3065 // The dest and src types don't necessarily match in LLVM terms 3066 // because of the crazy ObjC compatibility rules. 3067 3068 llvm::PointerType *destType = 3069 cast<llvm::PointerType>(CGF.ConvertType(CRE->getType())); 3070 3071 // If the address is a constant null, just pass the appropriate null. 3072 if (isProvablyNull(srcAddr.getPointer())) { 3073 args.add(RValue::get(llvm::ConstantPointerNull::get(destType)), 3074 CRE->getType()); 3075 return; 3076 } 3077 3078 // Create the temporary. 3079 Address temp = CGF.CreateTempAlloca(destType->getElementType(), 3080 CGF.getPointerAlign(), 3081 "icr.temp"); 3082 // Loading an l-value can introduce a cleanup if the l-value is __weak, 3083 // and that cleanup will be conditional if we can't prove that the l-value 3084 // isn't null, so we need to register a dominating point so that the cleanups 3085 // system will make valid IR. 3086 CodeGenFunction::ConditionalEvaluation condEval(CGF); 3087 3088 // Zero-initialize it if we're not doing a copy-initialization. 3089 bool shouldCopy = CRE->shouldCopy(); 3090 if (!shouldCopy) { 3091 llvm::Value *null = 3092 llvm::ConstantPointerNull::get( 3093 cast<llvm::PointerType>(destType->getElementType())); 3094 CGF.Builder.CreateStore(null, temp); 3095 } 3096 3097 llvm::BasicBlock *contBB = nullptr; 3098 llvm::BasicBlock *originBB = nullptr; 3099 3100 // If the address is *not* known to be non-null, we need to switch. 3101 llvm::Value *finalArgument; 3102 3103 bool provablyNonNull = llvm::isKnownNonNull(srcAddr.getPointer()); 3104 if (provablyNonNull) { 3105 finalArgument = temp.getPointer(); 3106 } else { 3107 llvm::Value *isNull = 3108 CGF.Builder.CreateIsNull(srcAddr.getPointer(), "icr.isnull"); 3109 3110 finalArgument = CGF.Builder.CreateSelect(isNull, 3111 llvm::ConstantPointerNull::get(destType), 3112 temp.getPointer(), "icr.argument"); 3113 3114 // If we need to copy, then the load has to be conditional, which 3115 // means we need control flow. 3116 if (shouldCopy) { 3117 originBB = CGF.Builder.GetInsertBlock(); 3118 contBB = CGF.createBasicBlock("icr.cont"); 3119 llvm::BasicBlock *copyBB = CGF.createBasicBlock("icr.copy"); 3120 CGF.Builder.CreateCondBr(isNull, contBB, copyBB); 3121 CGF.EmitBlock(copyBB); 3122 condEval.begin(CGF); 3123 } 3124 } 3125 3126 llvm::Value *valueToUse = nullptr; 3127 3128 // Perform a copy if necessary. 3129 if (shouldCopy) { 3130 RValue srcRV = CGF.EmitLoadOfLValue(srcLV, SourceLocation()); 3131 assert(srcRV.isScalar()); 3132 3133 llvm::Value *src = srcRV.getScalarVal(); 3134 src = CGF.Builder.CreateBitCast(src, destType->getElementType(), 3135 "icr.cast"); 3136 3137 // Use an ordinary store, not a store-to-lvalue. 3138 CGF.Builder.CreateStore(src, temp); 3139 3140 // If optimization is enabled, and the value was held in a 3141 // __strong variable, we need to tell the optimizer that this 3142 // value has to stay alive until we're doing the store back. 3143 // This is because the temporary is effectively unretained, 3144 // and so otherwise we can violate the high-level semantics. 3145 if (CGF.CGM.getCodeGenOpts().OptimizationLevel != 0 && 3146 srcLV.getObjCLifetime() == Qualifiers::OCL_Strong) { 3147 valueToUse = src; 3148 } 3149 } 3150 3151 // Finish the control flow if we needed it. 3152 if (shouldCopy && !provablyNonNull) { 3153 llvm::BasicBlock *copyBB = CGF.Builder.GetInsertBlock(); 3154 CGF.EmitBlock(contBB); 3155 3156 // Make a phi for the value to intrinsically use. 3157 if (valueToUse) { 3158 llvm::PHINode *phiToUse = CGF.Builder.CreatePHI(valueToUse->getType(), 2, 3159 "icr.to-use"); 3160 phiToUse->addIncoming(valueToUse, copyBB); 3161 phiToUse->addIncoming(llvm::UndefValue::get(valueToUse->getType()), 3162 originBB); 3163 valueToUse = phiToUse; 3164 } 3165 3166 condEval.end(CGF); 3167 } 3168 3169 args.addWriteback(srcLV, temp, valueToUse); 3170 args.add(RValue::get(finalArgument), CRE->getType()); 3171 } 3172 3173 void CallArgList::allocateArgumentMemory(CodeGenFunction &CGF) { 3174 assert(!StackBase); 3175 3176 // Save the stack. 3177 llvm::Function *F = CGF.CGM.getIntrinsic(llvm::Intrinsic::stacksave); 3178 StackBase = CGF.Builder.CreateCall(F, {}, "inalloca.save"); 3179 } 3180 3181 void CallArgList::freeArgumentMemory(CodeGenFunction &CGF) const { 3182 if (StackBase) { 3183 // Restore the stack after the call. 3184 llvm::Value *F = CGF.CGM.getIntrinsic(llvm::Intrinsic::stackrestore); 3185 CGF.Builder.CreateCall(F, StackBase); 3186 } 3187 } 3188 3189 void CodeGenFunction::EmitNonNullArgCheck(RValue RV, QualType ArgType, 3190 SourceLocation ArgLoc, 3191 const FunctionDecl *FD, 3192 unsigned ParmNum) { 3193 if (!SanOpts.has(SanitizerKind::NonnullAttribute) || !FD) 3194 return; 3195 auto PVD = ParmNum < FD->getNumParams() ? FD->getParamDecl(ParmNum) : nullptr; 3196 unsigned ArgNo = PVD ? PVD->getFunctionScopeIndex() : ParmNum; 3197 auto NNAttr = getNonNullAttr(FD, PVD, ArgType, ArgNo); 3198 if (!NNAttr) 3199 return; 3200 SanitizerScope SanScope(this); 3201 assert(RV.isScalar()); 3202 llvm::Value *V = RV.getScalarVal(); 3203 llvm::Value *Cond = 3204 Builder.CreateICmpNE(V, llvm::Constant::getNullValue(V->getType())); 3205 llvm::Constant *StaticData[] = { 3206 EmitCheckSourceLocation(ArgLoc), 3207 EmitCheckSourceLocation(NNAttr->getLocation()), 3208 llvm::ConstantInt::get(Int32Ty, ArgNo + 1), 3209 }; 3210 EmitCheck(std::make_pair(Cond, SanitizerKind::NonnullAttribute), 3211 SanitizerHandler::NonnullArg, StaticData, None); 3212 } 3213 3214 void CodeGenFunction::EmitCallArgs( 3215 CallArgList &Args, ArrayRef<QualType> ArgTypes, 3216 llvm::iterator_range<CallExpr::const_arg_iterator> ArgRange, 3217 const FunctionDecl *CalleeDecl, unsigned ParamsToSkip, 3218 EvaluationOrder Order) { 3219 assert((int)ArgTypes.size() == (ArgRange.end() - ArgRange.begin())); 3220 3221 auto MaybeEmitImplicitObjectSize = [&](unsigned I, const Expr *Arg) { 3222 if (CalleeDecl == nullptr || I >= CalleeDecl->getNumParams()) 3223 return; 3224 auto *PS = CalleeDecl->getParamDecl(I)->getAttr<PassObjectSizeAttr>(); 3225 if (PS == nullptr) 3226 return; 3227 3228 const auto &Context = getContext(); 3229 auto SizeTy = Context.getSizeType(); 3230 auto T = Builder.getIntNTy(Context.getTypeSize(SizeTy)); 3231 llvm::Value *V = evaluateOrEmitBuiltinObjectSize(Arg, PS->getType(), T); 3232 Args.add(RValue::get(V), SizeTy); 3233 }; 3234 3235 // We *have* to evaluate arguments from right to left in the MS C++ ABI, 3236 // because arguments are destroyed left to right in the callee. As a special 3237 // case, there are certain language constructs that require left-to-right 3238 // evaluation, and in those cases we consider the evaluation order requirement 3239 // to trump the "destruction order is reverse construction order" guarantee. 3240 bool LeftToRight = 3241 CGM.getTarget().getCXXABI().areArgsDestroyedLeftToRightInCallee() 3242 ? Order == EvaluationOrder::ForceLeftToRight 3243 : Order != EvaluationOrder::ForceRightToLeft; 3244 3245 // Insert a stack save if we're going to need any inalloca args. 3246 bool HasInAllocaArgs = false; 3247 if (CGM.getTarget().getCXXABI().isMicrosoft()) { 3248 for (ArrayRef<QualType>::iterator I = ArgTypes.begin(), E = ArgTypes.end(); 3249 I != E && !HasInAllocaArgs; ++I) 3250 HasInAllocaArgs = isInAllocaArgument(CGM.getCXXABI(), *I); 3251 if (HasInAllocaArgs) { 3252 assert(getTarget().getTriple().getArch() == llvm::Triple::x86); 3253 Args.allocateArgumentMemory(*this); 3254 } 3255 } 3256 3257 // Evaluate each argument in the appropriate order. 3258 size_t CallArgsStart = Args.size(); 3259 for (unsigned I = 0, E = ArgTypes.size(); I != E; ++I) { 3260 unsigned Idx = LeftToRight ? I : E - I - 1; 3261 CallExpr::const_arg_iterator Arg = ArgRange.begin() + Idx; 3262 if (!LeftToRight) MaybeEmitImplicitObjectSize(Idx, *Arg); 3263 EmitCallArg(Args, *Arg, ArgTypes[Idx]); 3264 EmitNonNullArgCheck(Args.back().RV, ArgTypes[Idx], (*Arg)->getExprLoc(), 3265 CalleeDecl, ParamsToSkip + Idx); 3266 if (LeftToRight) MaybeEmitImplicitObjectSize(Idx, *Arg); 3267 } 3268 3269 if (!LeftToRight) { 3270 // Un-reverse the arguments we just evaluated so they match up with the LLVM 3271 // IR function. 3272 std::reverse(Args.begin() + CallArgsStart, Args.end()); 3273 } 3274 } 3275 3276 namespace { 3277 3278 struct DestroyUnpassedArg final : EHScopeStack::Cleanup { 3279 DestroyUnpassedArg(Address Addr, QualType Ty) 3280 : Addr(Addr), Ty(Ty) {} 3281 3282 Address Addr; 3283 QualType Ty; 3284 3285 void Emit(CodeGenFunction &CGF, Flags flags) override { 3286 const CXXDestructorDecl *Dtor = Ty->getAsCXXRecordDecl()->getDestructor(); 3287 assert(!Dtor->isTrivial()); 3288 CGF.EmitCXXDestructorCall(Dtor, Dtor_Complete, /*for vbase*/ false, 3289 /*Delegating=*/false, Addr); 3290 } 3291 }; 3292 3293 struct DisableDebugLocationUpdates { 3294 CodeGenFunction &CGF; 3295 bool disabledDebugInfo; 3296 DisableDebugLocationUpdates(CodeGenFunction &CGF, const Expr *E) : CGF(CGF) { 3297 if ((disabledDebugInfo = isa<CXXDefaultArgExpr>(E) && CGF.getDebugInfo())) 3298 CGF.disableDebugInfo(); 3299 } 3300 ~DisableDebugLocationUpdates() { 3301 if (disabledDebugInfo) 3302 CGF.enableDebugInfo(); 3303 } 3304 }; 3305 3306 } // end anonymous namespace 3307 3308 void CodeGenFunction::EmitCallArg(CallArgList &args, const Expr *E, 3309 QualType type) { 3310 DisableDebugLocationUpdates Dis(*this, E); 3311 if (const ObjCIndirectCopyRestoreExpr *CRE 3312 = dyn_cast<ObjCIndirectCopyRestoreExpr>(E)) { 3313 assert(getLangOpts().ObjCAutoRefCount); 3314 assert(getContext().hasSameUnqualifiedType(E->getType(), type)); 3315 return emitWritebackArg(*this, args, CRE); 3316 } 3317 3318 assert(type->isReferenceType() == E->isGLValue() && 3319 "reference binding to unmaterialized r-value!"); 3320 3321 if (E->isGLValue()) { 3322 assert(E->getObjectKind() == OK_Ordinary); 3323 return args.add(EmitReferenceBindingToExpr(E), type); 3324 } 3325 3326 bool HasAggregateEvalKind = hasAggregateEvaluationKind(type); 3327 3328 // In the Microsoft C++ ABI, aggregate arguments are destructed by the callee. 3329 // However, we still have to push an EH-only cleanup in case we unwind before 3330 // we make it to the call. 3331 if (HasAggregateEvalKind && 3332 CGM.getTarget().getCXXABI().areArgsDestroyedLeftToRightInCallee()) { 3333 // If we're using inalloca, use the argument memory. Otherwise, use a 3334 // temporary. 3335 AggValueSlot Slot; 3336 if (args.isUsingInAlloca()) 3337 Slot = createPlaceholderSlot(*this, type); 3338 else 3339 Slot = CreateAggTemp(type, "agg.tmp"); 3340 3341 const CXXRecordDecl *RD = type->getAsCXXRecordDecl(); 3342 bool DestroyedInCallee = 3343 RD && RD->hasNonTrivialDestructor() && 3344 CGM.getCXXABI().getRecordArgABI(RD) != CGCXXABI::RAA_Default; 3345 if (DestroyedInCallee) 3346 Slot.setExternallyDestructed(); 3347 3348 EmitAggExpr(E, Slot); 3349 RValue RV = Slot.asRValue(); 3350 args.add(RV, type); 3351 3352 if (DestroyedInCallee) { 3353 // Create a no-op GEP between the placeholder and the cleanup so we can 3354 // RAUW it successfully. It also serves as a marker of the first 3355 // instruction where the cleanup is active. 3356 pushFullExprCleanup<DestroyUnpassedArg>(EHCleanup, Slot.getAddress(), 3357 type); 3358 // This unreachable is a temporary marker which will be removed later. 3359 llvm::Instruction *IsActive = Builder.CreateUnreachable(); 3360 args.addArgCleanupDeactivation(EHStack.getInnermostEHScope(), IsActive); 3361 } 3362 return; 3363 } 3364 3365 if (HasAggregateEvalKind && isa<ImplicitCastExpr>(E) && 3366 cast<CastExpr>(E)->getCastKind() == CK_LValueToRValue) { 3367 LValue L = EmitLValue(cast<CastExpr>(E)->getSubExpr()); 3368 assert(L.isSimple()); 3369 if (L.getAlignment() >= getContext().getTypeAlignInChars(type)) { 3370 args.add(L.asAggregateRValue(), type, /*NeedsCopy*/true); 3371 } else { 3372 // We can't represent a misaligned lvalue in the CallArgList, so copy 3373 // to an aligned temporary now. 3374 Address tmp = CreateMemTemp(type); 3375 EmitAggregateCopy(tmp, L.getAddress(), type, L.isVolatile()); 3376 args.add(RValue::getAggregate(tmp), type); 3377 } 3378 return; 3379 } 3380 3381 args.add(EmitAnyExprToTemp(E), type); 3382 } 3383 3384 QualType CodeGenFunction::getVarArgType(const Expr *Arg) { 3385 // System headers on Windows define NULL to 0 instead of 0LL on Win64. MSVC 3386 // implicitly widens null pointer constants that are arguments to varargs 3387 // functions to pointer-sized ints. 3388 if (!getTarget().getTriple().isOSWindows()) 3389 return Arg->getType(); 3390 3391 if (Arg->getType()->isIntegerType() && 3392 getContext().getTypeSize(Arg->getType()) < 3393 getContext().getTargetInfo().getPointerWidth(0) && 3394 Arg->isNullPointerConstant(getContext(), 3395 Expr::NPC_ValueDependentIsNotNull)) { 3396 return getContext().getIntPtrType(); 3397 } 3398 3399 return Arg->getType(); 3400 } 3401 3402 // In ObjC ARC mode with no ObjC ARC exception safety, tell the ARC 3403 // optimizer it can aggressively ignore unwind edges. 3404 void 3405 CodeGenFunction::AddObjCARCExceptionMetadata(llvm::Instruction *Inst) { 3406 if (CGM.getCodeGenOpts().OptimizationLevel != 0 && 3407 !CGM.getCodeGenOpts().ObjCAutoRefCountExceptions) 3408 Inst->setMetadata("clang.arc.no_objc_arc_exceptions", 3409 CGM.getNoObjCARCExceptionsMetadata()); 3410 } 3411 3412 /// Emits a call to the given no-arguments nounwind runtime function. 3413 llvm::CallInst * 3414 CodeGenFunction::EmitNounwindRuntimeCall(llvm::Value *callee, 3415 const llvm::Twine &name) { 3416 return EmitNounwindRuntimeCall(callee, None, name); 3417 } 3418 3419 /// Emits a call to the given nounwind runtime function. 3420 llvm::CallInst * 3421 CodeGenFunction::EmitNounwindRuntimeCall(llvm::Value *callee, 3422 ArrayRef<llvm::Value*> args, 3423 const llvm::Twine &name) { 3424 llvm::CallInst *call = EmitRuntimeCall(callee, args, name); 3425 call->setDoesNotThrow(); 3426 return call; 3427 } 3428 3429 /// Emits a simple call (never an invoke) to the given no-arguments 3430 /// runtime function. 3431 llvm::CallInst * 3432 CodeGenFunction::EmitRuntimeCall(llvm::Value *callee, 3433 const llvm::Twine &name) { 3434 return EmitRuntimeCall(callee, None, name); 3435 } 3436 3437 // Calls which may throw must have operand bundles indicating which funclet 3438 // they are nested within. 3439 static void 3440 getBundlesForFunclet(llvm::Value *Callee, llvm::Instruction *CurrentFuncletPad, 3441 SmallVectorImpl<llvm::OperandBundleDef> &BundleList) { 3442 // There is no need for a funclet operand bundle if we aren't inside a 3443 // funclet. 3444 if (!CurrentFuncletPad) 3445 return; 3446 3447 // Skip intrinsics which cannot throw. 3448 auto *CalleeFn = dyn_cast<llvm::Function>(Callee->stripPointerCasts()); 3449 if (CalleeFn && CalleeFn->isIntrinsic() && CalleeFn->doesNotThrow()) 3450 return; 3451 3452 BundleList.emplace_back("funclet", CurrentFuncletPad); 3453 } 3454 3455 /// Emits a simple call (never an invoke) to the given runtime function. 3456 llvm::CallInst * 3457 CodeGenFunction::EmitRuntimeCall(llvm::Value *callee, 3458 ArrayRef<llvm::Value*> args, 3459 const llvm::Twine &name) { 3460 SmallVector<llvm::OperandBundleDef, 1> BundleList; 3461 getBundlesForFunclet(callee, CurrentFuncletPad, BundleList); 3462 3463 llvm::CallInst *call = Builder.CreateCall(callee, args, BundleList, name); 3464 call->setCallingConv(getRuntimeCC()); 3465 return call; 3466 } 3467 3468 /// Emits a call or invoke to the given noreturn runtime function. 3469 void CodeGenFunction::EmitNoreturnRuntimeCallOrInvoke(llvm::Value *callee, 3470 ArrayRef<llvm::Value*> args) { 3471 SmallVector<llvm::OperandBundleDef, 1> BundleList; 3472 getBundlesForFunclet(callee, CurrentFuncletPad, BundleList); 3473 3474 if (getInvokeDest()) { 3475 llvm::InvokeInst *invoke = 3476 Builder.CreateInvoke(callee, 3477 getUnreachableBlock(), 3478 getInvokeDest(), 3479 args, 3480 BundleList); 3481 invoke->setDoesNotReturn(); 3482 invoke->setCallingConv(getRuntimeCC()); 3483 } else { 3484 llvm::CallInst *call = Builder.CreateCall(callee, args, BundleList); 3485 call->setDoesNotReturn(); 3486 call->setCallingConv(getRuntimeCC()); 3487 Builder.CreateUnreachable(); 3488 } 3489 } 3490 3491 /// Emits a call or invoke instruction to the given nullary runtime function. 3492 llvm::CallSite 3493 CodeGenFunction::EmitRuntimeCallOrInvoke(llvm::Value *callee, 3494 const Twine &name) { 3495 return EmitRuntimeCallOrInvoke(callee, None, name); 3496 } 3497 3498 /// Emits a call or invoke instruction to the given runtime function. 3499 llvm::CallSite 3500 CodeGenFunction::EmitRuntimeCallOrInvoke(llvm::Value *callee, 3501 ArrayRef<llvm::Value*> args, 3502 const Twine &name) { 3503 llvm::CallSite callSite = EmitCallOrInvoke(callee, args, name); 3504 callSite.setCallingConv(getRuntimeCC()); 3505 return callSite; 3506 } 3507 3508 /// Emits a call or invoke instruction to the given function, depending 3509 /// on the current state of the EH stack. 3510 llvm::CallSite 3511 CodeGenFunction::EmitCallOrInvoke(llvm::Value *Callee, 3512 ArrayRef<llvm::Value *> Args, 3513 const Twine &Name) { 3514 llvm::BasicBlock *InvokeDest = getInvokeDest(); 3515 SmallVector<llvm::OperandBundleDef, 1> BundleList; 3516 getBundlesForFunclet(Callee, CurrentFuncletPad, BundleList); 3517 3518 llvm::Instruction *Inst; 3519 if (!InvokeDest) 3520 Inst = Builder.CreateCall(Callee, Args, BundleList, Name); 3521 else { 3522 llvm::BasicBlock *ContBB = createBasicBlock("invoke.cont"); 3523 Inst = Builder.CreateInvoke(Callee, ContBB, InvokeDest, Args, BundleList, 3524 Name); 3525 EmitBlock(ContBB); 3526 } 3527 3528 // In ObjC ARC mode with no ObjC ARC exception safety, tell the ARC 3529 // optimizer it can aggressively ignore unwind edges. 3530 if (CGM.getLangOpts().ObjCAutoRefCount) 3531 AddObjCARCExceptionMetadata(Inst); 3532 3533 return llvm::CallSite(Inst); 3534 } 3535 3536 /// \brief Store a non-aggregate value to an address to initialize it. For 3537 /// initialization, a non-atomic store will be used. 3538 static void EmitInitStoreOfNonAggregate(CodeGenFunction &CGF, RValue Src, 3539 LValue Dst) { 3540 if (Src.isScalar()) 3541 CGF.EmitStoreOfScalar(Src.getScalarVal(), Dst, /*init=*/true); 3542 else 3543 CGF.EmitStoreOfComplex(Src.getComplexVal(), Dst, /*init=*/true); 3544 } 3545 3546 void CodeGenFunction::deferPlaceholderReplacement(llvm::Instruction *Old, 3547 llvm::Value *New) { 3548 DeferredReplacements.push_back(std::make_pair(Old, New)); 3549 } 3550 3551 RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo, 3552 const CGCallee &Callee, 3553 ReturnValueSlot ReturnValue, 3554 const CallArgList &CallArgs, 3555 llvm::Instruction **callOrInvoke) { 3556 // FIXME: We no longer need the types from CallArgs; lift up and simplify. 3557 3558 assert(Callee.isOrdinary()); 3559 3560 // Handle struct-return functions by passing a pointer to the 3561 // location that we would like to return into. 3562 QualType RetTy = CallInfo.getReturnType(); 3563 const ABIArgInfo &RetAI = CallInfo.getReturnInfo(); 3564 3565 llvm::FunctionType *IRFuncTy = Callee.getFunctionType(); 3566 3567 // 1. Set up the arguments. 3568 3569 // If we're using inalloca, insert the allocation after the stack save. 3570 // FIXME: Do this earlier rather than hacking it in here! 3571 Address ArgMemory = Address::invalid(); 3572 const llvm::StructLayout *ArgMemoryLayout = nullptr; 3573 if (llvm::StructType *ArgStruct = CallInfo.getArgStruct()) { 3574 ArgMemoryLayout = CGM.getDataLayout().getStructLayout(ArgStruct); 3575 llvm::Instruction *IP = CallArgs.getStackBase(); 3576 llvm::AllocaInst *AI; 3577 if (IP) { 3578 IP = IP->getNextNode(); 3579 AI = new llvm::AllocaInst(ArgStruct, "argmem", IP); 3580 } else { 3581 AI = CreateTempAlloca(ArgStruct, "argmem"); 3582 } 3583 auto Align = CallInfo.getArgStructAlignment(); 3584 AI->setAlignment(Align.getQuantity()); 3585 AI->setUsedWithInAlloca(true); 3586 assert(AI->isUsedWithInAlloca() && !AI->isStaticAlloca()); 3587 ArgMemory = Address(AI, Align); 3588 } 3589 3590 // Helper function to drill into the inalloca allocation. 3591 auto createInAllocaStructGEP = [&](unsigned FieldIndex) -> Address { 3592 auto FieldOffset = 3593 CharUnits::fromQuantity(ArgMemoryLayout->getElementOffset(FieldIndex)); 3594 return Builder.CreateStructGEP(ArgMemory, FieldIndex, FieldOffset); 3595 }; 3596 3597 ClangToLLVMArgMapping IRFunctionArgs(CGM.getContext(), CallInfo); 3598 SmallVector<llvm::Value *, 16> IRCallArgs(IRFunctionArgs.totalIRArgs()); 3599 3600 // If the call returns a temporary with struct return, create a temporary 3601 // alloca to hold the result, unless one is given to us. 3602 Address SRetPtr = Address::invalid(); 3603 size_t UnusedReturnSize = 0; 3604 if (RetAI.isIndirect() || RetAI.isInAlloca() || RetAI.isCoerceAndExpand()) { 3605 if (!ReturnValue.isNull()) { 3606 SRetPtr = ReturnValue.getValue(); 3607 } else { 3608 SRetPtr = CreateMemTemp(RetTy); 3609 if (HaveInsertPoint() && ReturnValue.isUnused()) { 3610 uint64_t size = 3611 CGM.getDataLayout().getTypeAllocSize(ConvertTypeForMem(RetTy)); 3612 if (EmitLifetimeStart(size, SRetPtr.getPointer())) 3613 UnusedReturnSize = size; 3614 } 3615 } 3616 if (IRFunctionArgs.hasSRetArg()) { 3617 IRCallArgs[IRFunctionArgs.getSRetArgNo()] = SRetPtr.getPointer(); 3618 } else if (RetAI.isInAlloca()) { 3619 Address Addr = createInAllocaStructGEP(RetAI.getInAllocaFieldIndex()); 3620 Builder.CreateStore(SRetPtr.getPointer(), Addr); 3621 } 3622 } 3623 3624 Address swiftErrorTemp = Address::invalid(); 3625 Address swiftErrorArg = Address::invalid(); 3626 3627 // Translate all of the arguments as necessary to match the IR lowering. 3628 assert(CallInfo.arg_size() == CallArgs.size() && 3629 "Mismatch between function signature & arguments."); 3630 unsigned ArgNo = 0; 3631 CGFunctionInfo::const_arg_iterator info_it = CallInfo.arg_begin(); 3632 for (CallArgList::const_iterator I = CallArgs.begin(), E = CallArgs.end(); 3633 I != E; ++I, ++info_it, ++ArgNo) { 3634 const ABIArgInfo &ArgInfo = info_it->info; 3635 RValue RV = I->RV; 3636 3637 // Insert a padding argument to ensure proper alignment. 3638 if (IRFunctionArgs.hasPaddingArg(ArgNo)) 3639 IRCallArgs[IRFunctionArgs.getPaddingArgNo(ArgNo)] = 3640 llvm::UndefValue::get(ArgInfo.getPaddingType()); 3641 3642 unsigned FirstIRArg, NumIRArgs; 3643 std::tie(FirstIRArg, NumIRArgs) = IRFunctionArgs.getIRArgs(ArgNo); 3644 3645 switch (ArgInfo.getKind()) { 3646 case ABIArgInfo::InAlloca: { 3647 assert(NumIRArgs == 0); 3648 assert(getTarget().getTriple().getArch() == llvm::Triple::x86); 3649 if (RV.isAggregate()) { 3650 // Replace the placeholder with the appropriate argument slot GEP. 3651 llvm::Instruction *Placeholder = 3652 cast<llvm::Instruction>(RV.getAggregatePointer()); 3653 CGBuilderTy::InsertPoint IP = Builder.saveIP(); 3654 Builder.SetInsertPoint(Placeholder); 3655 Address Addr = createInAllocaStructGEP(ArgInfo.getInAllocaFieldIndex()); 3656 Builder.restoreIP(IP); 3657 deferPlaceholderReplacement(Placeholder, Addr.getPointer()); 3658 } else { 3659 // Store the RValue into the argument struct. 3660 Address Addr = createInAllocaStructGEP(ArgInfo.getInAllocaFieldIndex()); 3661 unsigned AS = Addr.getType()->getPointerAddressSpace(); 3662 llvm::Type *MemType = ConvertTypeForMem(I->Ty)->getPointerTo(AS); 3663 // There are some cases where a trivial bitcast is not avoidable. The 3664 // definition of a type later in a translation unit may change it's type 3665 // from {}* to (%struct.foo*)*. 3666 if (Addr.getType() != MemType) 3667 Addr = Builder.CreateBitCast(Addr, MemType); 3668 LValue argLV = MakeAddrLValue(Addr, I->Ty); 3669 EmitInitStoreOfNonAggregate(*this, RV, argLV); 3670 } 3671 break; 3672 } 3673 3674 case ABIArgInfo::Indirect: { 3675 assert(NumIRArgs == 1); 3676 if (RV.isScalar() || RV.isComplex()) { 3677 // Make a temporary alloca to pass the argument. 3678 Address Addr = CreateMemTemp(I->Ty, ArgInfo.getIndirectAlign()); 3679 IRCallArgs[FirstIRArg] = Addr.getPointer(); 3680 3681 LValue argLV = MakeAddrLValue(Addr, I->Ty); 3682 EmitInitStoreOfNonAggregate(*this, RV, argLV); 3683 } else { 3684 // We want to avoid creating an unnecessary temporary+copy here; 3685 // however, we need one in three cases: 3686 // 1. If the argument is not byval, and we are required to copy the 3687 // source. (This case doesn't occur on any common architecture.) 3688 // 2. If the argument is byval, RV is not sufficiently aligned, and 3689 // we cannot force it to be sufficiently aligned. 3690 // 3. If the argument is byval, but RV is located in an address space 3691 // different than that of the argument (0). 3692 Address Addr = RV.getAggregateAddress(); 3693 CharUnits Align = ArgInfo.getIndirectAlign(); 3694 const llvm::DataLayout *TD = &CGM.getDataLayout(); 3695 const unsigned RVAddrSpace = Addr.getType()->getAddressSpace(); 3696 const unsigned ArgAddrSpace = 3697 (FirstIRArg < IRFuncTy->getNumParams() 3698 ? IRFuncTy->getParamType(FirstIRArg)->getPointerAddressSpace() 3699 : 0); 3700 if ((!ArgInfo.getIndirectByVal() && I->NeedsCopy) || 3701 (ArgInfo.getIndirectByVal() && Addr.getAlignment() < Align && 3702 llvm::getOrEnforceKnownAlignment(Addr.getPointer(), 3703 Align.getQuantity(), *TD) 3704 < Align.getQuantity()) || 3705 (ArgInfo.getIndirectByVal() && (RVAddrSpace != ArgAddrSpace))) { 3706 // Create an aligned temporary, and copy to it. 3707 Address AI = CreateMemTemp(I->Ty, ArgInfo.getIndirectAlign()); 3708 IRCallArgs[FirstIRArg] = AI.getPointer(); 3709 EmitAggregateCopy(AI, Addr, I->Ty, RV.isVolatileQualified()); 3710 } else { 3711 // Skip the extra memcpy call. 3712 IRCallArgs[FirstIRArg] = Addr.getPointer(); 3713 } 3714 } 3715 break; 3716 } 3717 3718 case ABIArgInfo::Ignore: 3719 assert(NumIRArgs == 0); 3720 break; 3721 3722 case ABIArgInfo::Extend: 3723 case ABIArgInfo::Direct: { 3724 if (!isa<llvm::StructType>(ArgInfo.getCoerceToType()) && 3725 ArgInfo.getCoerceToType() == ConvertType(info_it->type) && 3726 ArgInfo.getDirectOffset() == 0) { 3727 assert(NumIRArgs == 1); 3728 llvm::Value *V; 3729 if (RV.isScalar()) 3730 V = RV.getScalarVal(); 3731 else 3732 V = Builder.CreateLoad(RV.getAggregateAddress()); 3733 3734 // Implement swifterror by copying into a new swifterror argument. 3735 // We'll write back in the normal path out of the call. 3736 if (CallInfo.getExtParameterInfo(ArgNo).getABI() 3737 == ParameterABI::SwiftErrorResult) { 3738 assert(!swiftErrorTemp.isValid() && "multiple swifterror args"); 3739 3740 QualType pointeeTy = I->Ty->getPointeeType(); 3741 swiftErrorArg = 3742 Address(V, getContext().getTypeAlignInChars(pointeeTy)); 3743 3744 swiftErrorTemp = 3745 CreateMemTemp(pointeeTy, getPointerAlign(), "swifterror.temp"); 3746 V = swiftErrorTemp.getPointer(); 3747 cast<llvm::AllocaInst>(V)->setSwiftError(true); 3748 3749 llvm::Value *errorValue = Builder.CreateLoad(swiftErrorArg); 3750 Builder.CreateStore(errorValue, swiftErrorTemp); 3751 } 3752 3753 // We might have to widen integers, but we should never truncate. 3754 if (ArgInfo.getCoerceToType() != V->getType() && 3755 V->getType()->isIntegerTy()) 3756 V = Builder.CreateZExt(V, ArgInfo.getCoerceToType()); 3757 3758 // If the argument doesn't match, perform a bitcast to coerce it. This 3759 // can happen due to trivial type mismatches. 3760 if (FirstIRArg < IRFuncTy->getNumParams() && 3761 V->getType() != IRFuncTy->getParamType(FirstIRArg)) 3762 V = Builder.CreateBitCast(V, IRFuncTy->getParamType(FirstIRArg)); 3763 3764 IRCallArgs[FirstIRArg] = V; 3765 break; 3766 } 3767 3768 // FIXME: Avoid the conversion through memory if possible. 3769 Address Src = Address::invalid(); 3770 if (RV.isScalar() || RV.isComplex()) { 3771 Src = CreateMemTemp(I->Ty, "coerce"); 3772 LValue SrcLV = MakeAddrLValue(Src, I->Ty); 3773 EmitInitStoreOfNonAggregate(*this, RV, SrcLV); 3774 } else { 3775 Src = RV.getAggregateAddress(); 3776 } 3777 3778 // If the value is offset in memory, apply the offset now. 3779 Src = emitAddressAtOffset(*this, Src, ArgInfo); 3780 3781 // Fast-isel and the optimizer generally like scalar values better than 3782 // FCAs, so we flatten them if this is safe to do for this argument. 3783 llvm::StructType *STy = 3784 dyn_cast<llvm::StructType>(ArgInfo.getCoerceToType()); 3785 if (STy && ArgInfo.isDirect() && ArgInfo.getCanBeFlattened()) { 3786 llvm::Type *SrcTy = Src.getType()->getElementType(); 3787 uint64_t SrcSize = CGM.getDataLayout().getTypeAllocSize(SrcTy); 3788 uint64_t DstSize = CGM.getDataLayout().getTypeAllocSize(STy); 3789 3790 // If the source type is smaller than the destination type of the 3791 // coerce-to logic, copy the source value into a temp alloca the size 3792 // of the destination type to allow loading all of it. The bits past 3793 // the source value are left undef. 3794 if (SrcSize < DstSize) { 3795 Address TempAlloca 3796 = CreateTempAlloca(STy, Src.getAlignment(), 3797 Src.getName() + ".coerce"); 3798 Builder.CreateMemCpy(TempAlloca, Src, SrcSize); 3799 Src = TempAlloca; 3800 } else { 3801 Src = Builder.CreateBitCast(Src, llvm::PointerType::getUnqual(STy)); 3802 } 3803 3804 auto SrcLayout = CGM.getDataLayout().getStructLayout(STy); 3805 assert(NumIRArgs == STy->getNumElements()); 3806 for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) { 3807 auto Offset = CharUnits::fromQuantity(SrcLayout->getElementOffset(i)); 3808 Address EltPtr = Builder.CreateStructGEP(Src, i, Offset); 3809 llvm::Value *LI = Builder.CreateLoad(EltPtr); 3810 IRCallArgs[FirstIRArg + i] = LI; 3811 } 3812 } else { 3813 // In the simple case, just pass the coerced loaded value. 3814 assert(NumIRArgs == 1); 3815 IRCallArgs[FirstIRArg] = 3816 CreateCoercedLoad(Src, ArgInfo.getCoerceToType(), *this); 3817 } 3818 3819 break; 3820 } 3821 3822 case ABIArgInfo::CoerceAndExpand: { 3823 auto coercionType = ArgInfo.getCoerceAndExpandType(); 3824 auto layout = CGM.getDataLayout().getStructLayout(coercionType); 3825 3826 llvm::Value *tempSize = nullptr; 3827 Address addr = Address::invalid(); 3828 if (RV.isAggregate()) { 3829 addr = RV.getAggregateAddress(); 3830 } else { 3831 assert(RV.isScalar()); // complex should always just be direct 3832 3833 llvm::Type *scalarType = RV.getScalarVal()->getType(); 3834 auto scalarSize = CGM.getDataLayout().getTypeAllocSize(scalarType); 3835 auto scalarAlign = CGM.getDataLayout().getPrefTypeAlignment(scalarType); 3836 3837 tempSize = llvm::ConstantInt::get(CGM.Int64Ty, scalarSize); 3838 3839 // Materialize to a temporary. 3840 addr = CreateTempAlloca(RV.getScalarVal()->getType(), 3841 CharUnits::fromQuantity(std::max(layout->getAlignment(), 3842 scalarAlign))); 3843 EmitLifetimeStart(scalarSize, addr.getPointer()); 3844 3845 Builder.CreateStore(RV.getScalarVal(), addr); 3846 } 3847 3848 addr = Builder.CreateElementBitCast(addr, coercionType); 3849 3850 unsigned IRArgPos = FirstIRArg; 3851 for (unsigned i = 0, e = coercionType->getNumElements(); i != e; ++i) { 3852 llvm::Type *eltType = coercionType->getElementType(i); 3853 if (ABIArgInfo::isPaddingForCoerceAndExpand(eltType)) continue; 3854 Address eltAddr = Builder.CreateStructGEP(addr, i, layout); 3855 llvm::Value *elt = Builder.CreateLoad(eltAddr); 3856 IRCallArgs[IRArgPos++] = elt; 3857 } 3858 assert(IRArgPos == FirstIRArg + NumIRArgs); 3859 3860 if (tempSize) { 3861 EmitLifetimeEnd(tempSize, addr.getPointer()); 3862 } 3863 3864 break; 3865 } 3866 3867 case ABIArgInfo::Expand: 3868 unsigned IRArgPos = FirstIRArg; 3869 ExpandTypeToArgs(I->Ty, RV, IRFuncTy, IRCallArgs, IRArgPos); 3870 assert(IRArgPos == FirstIRArg + NumIRArgs); 3871 break; 3872 } 3873 } 3874 3875 llvm::Value *CalleePtr = Callee.getFunctionPointer(); 3876 3877 // If we're using inalloca, set up that argument. 3878 if (ArgMemory.isValid()) { 3879 llvm::Value *Arg = ArgMemory.getPointer(); 3880 if (CallInfo.isVariadic()) { 3881 // When passing non-POD arguments by value to variadic functions, we will 3882 // end up with a variadic prototype and an inalloca call site. In such 3883 // cases, we can't do any parameter mismatch checks. Give up and bitcast 3884 // the callee. 3885 unsigned CalleeAS = CalleePtr->getType()->getPointerAddressSpace(); 3886 auto FnTy = getTypes().GetFunctionType(CallInfo)->getPointerTo(CalleeAS); 3887 CalleePtr = Builder.CreateBitCast(CalleePtr, FnTy); 3888 } else { 3889 llvm::Type *LastParamTy = 3890 IRFuncTy->getParamType(IRFuncTy->getNumParams() - 1); 3891 if (Arg->getType() != LastParamTy) { 3892 #ifndef NDEBUG 3893 // Assert that these structs have equivalent element types. 3894 llvm::StructType *FullTy = CallInfo.getArgStruct(); 3895 llvm::StructType *DeclaredTy = cast<llvm::StructType>( 3896 cast<llvm::PointerType>(LastParamTy)->getElementType()); 3897 assert(DeclaredTy->getNumElements() == FullTy->getNumElements()); 3898 for (llvm::StructType::element_iterator DI = DeclaredTy->element_begin(), 3899 DE = DeclaredTy->element_end(), 3900 FI = FullTy->element_begin(); 3901 DI != DE; ++DI, ++FI) 3902 assert(*DI == *FI); 3903 #endif 3904 Arg = Builder.CreateBitCast(Arg, LastParamTy); 3905 } 3906 } 3907 assert(IRFunctionArgs.hasInallocaArg()); 3908 IRCallArgs[IRFunctionArgs.getInallocaArgNo()] = Arg; 3909 } 3910 3911 // 2. Prepare the function pointer. 3912 3913 // If the callee is a bitcast of a non-variadic function to have a 3914 // variadic function pointer type, check to see if we can remove the 3915 // bitcast. This comes up with unprototyped functions. 3916 // 3917 // This makes the IR nicer, but more importantly it ensures that we 3918 // can inline the function at -O0 if it is marked always_inline. 3919 auto simplifyVariadicCallee = [](llvm::Value *Ptr) -> llvm::Value* { 3920 llvm::FunctionType *CalleeFT = 3921 cast<llvm::FunctionType>(Ptr->getType()->getPointerElementType()); 3922 if (!CalleeFT->isVarArg()) 3923 return Ptr; 3924 3925 llvm::ConstantExpr *CE = dyn_cast<llvm::ConstantExpr>(Ptr); 3926 if (!CE || CE->getOpcode() != llvm::Instruction::BitCast) 3927 return Ptr; 3928 3929 llvm::Function *OrigFn = dyn_cast<llvm::Function>(CE->getOperand(0)); 3930 if (!OrigFn) 3931 return Ptr; 3932 3933 llvm::FunctionType *OrigFT = OrigFn->getFunctionType(); 3934 3935 // If the original type is variadic, or if any of the component types 3936 // disagree, we cannot remove the cast. 3937 if (OrigFT->isVarArg() || 3938 OrigFT->getNumParams() != CalleeFT->getNumParams() || 3939 OrigFT->getReturnType() != CalleeFT->getReturnType()) 3940 return Ptr; 3941 3942 for (unsigned i = 0, e = OrigFT->getNumParams(); i != e; ++i) 3943 if (OrigFT->getParamType(i) != CalleeFT->getParamType(i)) 3944 return Ptr; 3945 3946 return OrigFn; 3947 }; 3948 CalleePtr = simplifyVariadicCallee(CalleePtr); 3949 3950 // 3. Perform the actual call. 3951 3952 // Deactivate any cleanups that we're supposed to do immediately before 3953 // the call. 3954 if (!CallArgs.getCleanupsToDeactivate().empty()) 3955 deactivateArgCleanupsBeforeCall(*this, CallArgs); 3956 3957 // Assert that the arguments we computed match up. The IR verifier 3958 // will catch this, but this is a common enough source of problems 3959 // during IRGen changes that it's way better for debugging to catch 3960 // it ourselves here. 3961 #ifndef NDEBUG 3962 assert(IRCallArgs.size() == IRFuncTy->getNumParams() || IRFuncTy->isVarArg()); 3963 for (unsigned i = 0; i < IRCallArgs.size(); ++i) { 3964 // Inalloca argument can have different type. 3965 if (IRFunctionArgs.hasInallocaArg() && 3966 i == IRFunctionArgs.getInallocaArgNo()) 3967 continue; 3968 if (i < IRFuncTy->getNumParams()) 3969 assert(IRCallArgs[i]->getType() == IRFuncTy->getParamType(i)); 3970 } 3971 #endif 3972 3973 // Compute the calling convention and attributes. 3974 unsigned CallingConv; 3975 CodeGen::AttributeListType AttributeList; 3976 CGM.ConstructAttributeList(CalleePtr->getName(), CallInfo, 3977 Callee.getAbstractInfo(), 3978 AttributeList, CallingConv, 3979 /*AttrOnCallSite=*/true); 3980 llvm::AttributeSet Attrs = llvm::AttributeSet::get(getLLVMContext(), 3981 AttributeList); 3982 3983 // Apply some call-site-specific attributes. 3984 // TODO: work this into building the attribute set. 3985 3986 // Apply always_inline to all calls within flatten functions. 3987 // FIXME: should this really take priority over __try, below? 3988 if (CurCodeDecl && CurCodeDecl->hasAttr<FlattenAttr>() && 3989 !(Callee.getAbstractInfo().getCalleeDecl() && 3990 Callee.getAbstractInfo().getCalleeDecl()->hasAttr<NoInlineAttr>())) { 3991 Attrs = 3992 Attrs.addAttribute(getLLVMContext(), 3993 llvm::AttributeSet::FunctionIndex, 3994 llvm::Attribute::AlwaysInline); 3995 } 3996 3997 // Disable inlining inside SEH __try blocks. 3998 if (isSEHTryScope()) { 3999 Attrs = 4000 Attrs.addAttribute(getLLVMContext(), llvm::AttributeSet::FunctionIndex, 4001 llvm::Attribute::NoInline); 4002 } 4003 4004 // Decide whether to use a call or an invoke. 4005 bool CannotThrow; 4006 if (currentFunctionUsesSEHTry()) { 4007 // SEH cares about asynchronous exceptions, so everything can "throw." 4008 CannotThrow = false; 4009 } else if (isCleanupPadScope() && 4010 EHPersonality::get(*this).isMSVCXXPersonality()) { 4011 // The MSVC++ personality will implicitly terminate the program if an 4012 // exception is thrown during a cleanup outside of a try/catch. 4013 // We don't need to model anything in IR to get this behavior. 4014 CannotThrow = true; 4015 } else { 4016 // Otherwise, nounwind call sites will never throw. 4017 CannotThrow = Attrs.hasAttribute(llvm::AttributeSet::FunctionIndex, 4018 llvm::Attribute::NoUnwind); 4019 } 4020 llvm::BasicBlock *InvokeDest = CannotThrow ? nullptr : getInvokeDest(); 4021 4022 SmallVector<llvm::OperandBundleDef, 1> BundleList; 4023 getBundlesForFunclet(CalleePtr, CurrentFuncletPad, BundleList); 4024 4025 // Emit the actual call/invoke instruction. 4026 llvm::CallSite CS; 4027 if (!InvokeDest) { 4028 CS = Builder.CreateCall(CalleePtr, IRCallArgs, BundleList); 4029 } else { 4030 llvm::BasicBlock *Cont = createBasicBlock("invoke.cont"); 4031 CS = Builder.CreateInvoke(CalleePtr, Cont, InvokeDest, IRCallArgs, 4032 BundleList); 4033 EmitBlock(Cont); 4034 } 4035 llvm::Instruction *CI = CS.getInstruction(); 4036 if (callOrInvoke) 4037 *callOrInvoke = CI; 4038 4039 // Apply the attributes and calling convention. 4040 CS.setAttributes(Attrs); 4041 CS.setCallingConv(static_cast<llvm::CallingConv::ID>(CallingConv)); 4042 4043 // Apply various metadata. 4044 4045 if (!CI->getType()->isVoidTy()) 4046 CI->setName("call"); 4047 4048 // Insert instrumentation or attach profile metadata at indirect call sites. 4049 // For more details, see the comment before the definition of 4050 // IPVK_IndirectCallTarget in InstrProfData.inc. 4051 if (!CS.getCalledFunction()) 4052 PGO.valueProfile(Builder, llvm::IPVK_IndirectCallTarget, 4053 CI, CalleePtr); 4054 4055 // In ObjC ARC mode with no ObjC ARC exception safety, tell the ARC 4056 // optimizer it can aggressively ignore unwind edges. 4057 if (CGM.getLangOpts().ObjCAutoRefCount) 4058 AddObjCARCExceptionMetadata(CI); 4059 4060 // Suppress tail calls if requested. 4061 if (llvm::CallInst *Call = dyn_cast<llvm::CallInst>(CI)) { 4062 const Decl *TargetDecl = Callee.getAbstractInfo().getCalleeDecl(); 4063 if (TargetDecl && TargetDecl->hasAttr<NotTailCalledAttr>()) 4064 Call->setTailCallKind(llvm::CallInst::TCK_NoTail); 4065 } 4066 4067 // 4. Finish the call. 4068 4069 // If the call doesn't return, finish the basic block and clear the 4070 // insertion point; this allows the rest of IRGen to discard 4071 // unreachable code. 4072 if (CS.doesNotReturn()) { 4073 if (UnusedReturnSize) 4074 EmitLifetimeEnd(llvm::ConstantInt::get(Int64Ty, UnusedReturnSize), 4075 SRetPtr.getPointer()); 4076 4077 Builder.CreateUnreachable(); 4078 Builder.ClearInsertionPoint(); 4079 4080 // FIXME: For now, emit a dummy basic block because expr emitters in 4081 // generally are not ready to handle emitting expressions at unreachable 4082 // points. 4083 EnsureInsertPoint(); 4084 4085 // Return a reasonable RValue. 4086 return GetUndefRValue(RetTy); 4087 } 4088 4089 // Perform the swifterror writeback. 4090 if (swiftErrorTemp.isValid()) { 4091 llvm::Value *errorResult = Builder.CreateLoad(swiftErrorTemp); 4092 Builder.CreateStore(errorResult, swiftErrorArg); 4093 } 4094 4095 // Emit any call-associated writebacks immediately. Arguably this 4096 // should happen after any return-value munging. 4097 if (CallArgs.hasWritebacks()) 4098 emitWritebacks(*this, CallArgs); 4099 4100 // The stack cleanup for inalloca arguments has to run out of the normal 4101 // lexical order, so deactivate it and run it manually here. 4102 CallArgs.freeArgumentMemory(*this); 4103 4104 // Extract the return value. 4105 RValue Ret = [&] { 4106 switch (RetAI.getKind()) { 4107 case ABIArgInfo::CoerceAndExpand: { 4108 auto coercionType = RetAI.getCoerceAndExpandType(); 4109 auto layout = CGM.getDataLayout().getStructLayout(coercionType); 4110 4111 Address addr = SRetPtr; 4112 addr = Builder.CreateElementBitCast(addr, coercionType); 4113 4114 assert(CI->getType() == RetAI.getUnpaddedCoerceAndExpandType()); 4115 bool requiresExtract = isa<llvm::StructType>(CI->getType()); 4116 4117 unsigned unpaddedIndex = 0; 4118 for (unsigned i = 0, e = coercionType->getNumElements(); i != e; ++i) { 4119 llvm::Type *eltType = coercionType->getElementType(i); 4120 if (ABIArgInfo::isPaddingForCoerceAndExpand(eltType)) continue; 4121 Address eltAddr = Builder.CreateStructGEP(addr, i, layout); 4122 llvm::Value *elt = CI; 4123 if (requiresExtract) 4124 elt = Builder.CreateExtractValue(elt, unpaddedIndex++); 4125 else 4126 assert(unpaddedIndex == 0); 4127 Builder.CreateStore(elt, eltAddr); 4128 } 4129 // FALLTHROUGH 4130 } 4131 4132 case ABIArgInfo::InAlloca: 4133 case ABIArgInfo::Indirect: { 4134 RValue ret = convertTempToRValue(SRetPtr, RetTy, SourceLocation()); 4135 if (UnusedReturnSize) 4136 EmitLifetimeEnd(llvm::ConstantInt::get(Int64Ty, UnusedReturnSize), 4137 SRetPtr.getPointer()); 4138 return ret; 4139 } 4140 4141 case ABIArgInfo::Ignore: 4142 // If we are ignoring an argument that had a result, make sure to 4143 // construct the appropriate return value for our caller. 4144 return GetUndefRValue(RetTy); 4145 4146 case ABIArgInfo::Extend: 4147 case ABIArgInfo::Direct: { 4148 llvm::Type *RetIRTy = ConvertType(RetTy); 4149 if (RetAI.getCoerceToType() == RetIRTy && RetAI.getDirectOffset() == 0) { 4150 switch (getEvaluationKind(RetTy)) { 4151 case TEK_Complex: { 4152 llvm::Value *Real = Builder.CreateExtractValue(CI, 0); 4153 llvm::Value *Imag = Builder.CreateExtractValue(CI, 1); 4154 return RValue::getComplex(std::make_pair(Real, Imag)); 4155 } 4156 case TEK_Aggregate: { 4157 Address DestPtr = ReturnValue.getValue(); 4158 bool DestIsVolatile = ReturnValue.isVolatile(); 4159 4160 if (!DestPtr.isValid()) { 4161 DestPtr = CreateMemTemp(RetTy, "agg.tmp"); 4162 DestIsVolatile = false; 4163 } 4164 BuildAggStore(*this, CI, DestPtr, DestIsVolatile); 4165 return RValue::getAggregate(DestPtr); 4166 } 4167 case TEK_Scalar: { 4168 // If the argument doesn't match, perform a bitcast to coerce it. This 4169 // can happen due to trivial type mismatches. 4170 llvm::Value *V = CI; 4171 if (V->getType() != RetIRTy) 4172 V = Builder.CreateBitCast(V, RetIRTy); 4173 return RValue::get(V); 4174 } 4175 } 4176 llvm_unreachable("bad evaluation kind"); 4177 } 4178 4179 Address DestPtr = ReturnValue.getValue(); 4180 bool DestIsVolatile = ReturnValue.isVolatile(); 4181 4182 if (!DestPtr.isValid()) { 4183 DestPtr = CreateMemTemp(RetTy, "coerce"); 4184 DestIsVolatile = false; 4185 } 4186 4187 // If the value is offset in memory, apply the offset now. 4188 Address StorePtr = emitAddressAtOffset(*this, DestPtr, RetAI); 4189 CreateCoercedStore(CI, StorePtr, DestIsVolatile, *this); 4190 4191 return convertTempToRValue(DestPtr, RetTy, SourceLocation()); 4192 } 4193 4194 case ABIArgInfo::Expand: 4195 llvm_unreachable("Invalid ABI kind for return argument"); 4196 } 4197 4198 llvm_unreachable("Unhandled ABIArgInfo::Kind"); 4199 } (); 4200 4201 // Emit the assume_aligned check on the return value. 4202 const Decl *TargetDecl = Callee.getAbstractInfo().getCalleeDecl(); 4203 if (Ret.isScalar() && TargetDecl) { 4204 if (const auto *AA = TargetDecl->getAttr<AssumeAlignedAttr>()) { 4205 llvm::Value *OffsetValue = nullptr; 4206 if (const auto *Offset = AA->getOffset()) 4207 OffsetValue = EmitScalarExpr(Offset); 4208 4209 llvm::Value *Alignment = EmitScalarExpr(AA->getAlignment()); 4210 llvm::ConstantInt *AlignmentCI = cast<llvm::ConstantInt>(Alignment); 4211 EmitAlignmentAssumption(Ret.getScalarVal(), AlignmentCI->getZExtValue(), 4212 OffsetValue); 4213 } 4214 } 4215 4216 return Ret; 4217 } 4218 4219 /* VarArg handling */ 4220 4221 Address CodeGenFunction::EmitVAArg(VAArgExpr *VE, Address &VAListAddr) { 4222 VAListAddr = VE->isMicrosoftABI() 4223 ? EmitMSVAListRef(VE->getSubExpr()) 4224 : EmitVAListRef(VE->getSubExpr()); 4225 QualType Ty = VE->getType(); 4226 if (VE->isMicrosoftABI()) 4227 return CGM.getTypes().getABIInfo().EmitMSVAArg(*this, VAListAddr, Ty); 4228 return CGM.getTypes().getABIInfo().EmitVAArg(*this, VAListAddr, Ty); 4229 } 4230