1 //===--- CGCall.cpp - Encapsulate calling convention details --------------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // These classes wrap the information about a call or function 11 // definition used to handle ABI compliancy. 12 // 13 //===----------------------------------------------------------------------===// 14 15 #include "CGCall.h" 16 #include "ABIInfo.h" 17 #include "CGBlocks.h" 18 #include "CGCXXABI.h" 19 #include "CGCleanup.h" 20 #include "CodeGenFunction.h" 21 #include "CodeGenModule.h" 22 #include "TargetInfo.h" 23 #include "clang/AST/Decl.h" 24 #include "clang/AST/DeclCXX.h" 25 #include "clang/AST/DeclObjC.h" 26 #include "clang/Basic/TargetBuiltins.h" 27 #include "clang/Basic/TargetInfo.h" 28 #include "clang/CodeGen/CGFunctionInfo.h" 29 #include "clang/CodeGen/SwiftCallingConv.h" 30 #include "clang/Frontend/CodeGenOptions.h" 31 #include "llvm/ADT/StringExtras.h" 32 #include "llvm/IR/Attributes.h" 33 #include "llvm/IR/CallSite.h" 34 #include "llvm/IR/DataLayout.h" 35 #include "llvm/IR/InlineAsm.h" 36 #include "llvm/IR/Intrinsics.h" 37 #include "llvm/IR/IntrinsicInst.h" 38 #include "llvm/Transforms/Utils/Local.h" 39 using namespace clang; 40 using namespace CodeGen; 41 42 /***/ 43 44 static unsigned ClangCallConvToLLVMCallConv(CallingConv CC) { 45 switch (CC) { 46 default: return llvm::CallingConv::C; 47 case CC_X86StdCall: return llvm::CallingConv::X86_StdCall; 48 case CC_X86FastCall: return llvm::CallingConv::X86_FastCall; 49 case CC_X86ThisCall: return llvm::CallingConv::X86_ThisCall; 50 case CC_X86_64Win64: return llvm::CallingConv::X86_64_Win64; 51 case CC_X86_64SysV: return llvm::CallingConv::X86_64_SysV; 52 case CC_AAPCS: return llvm::CallingConv::ARM_AAPCS; 53 case CC_AAPCS_VFP: return llvm::CallingConv::ARM_AAPCS_VFP; 54 case CC_IntelOclBicc: return llvm::CallingConv::Intel_OCL_BI; 55 // TODO: Add support for __pascal to LLVM. 56 case CC_X86Pascal: return llvm::CallingConv::C; 57 // TODO: Add support for __vectorcall to LLVM. 58 case CC_X86VectorCall: return llvm::CallingConv::X86_VectorCall; 59 case CC_SpirFunction: return llvm::CallingConv::SPIR_FUNC; 60 case CC_SpirKernel: return llvm::CallingConv::SPIR_KERNEL; 61 case CC_PreserveMost: return llvm::CallingConv::PreserveMost; 62 case CC_PreserveAll: return llvm::CallingConv::PreserveAll; 63 case CC_Swift: return llvm::CallingConv::Swift; 64 } 65 } 66 67 /// Derives the 'this' type for codegen purposes, i.e. ignoring method 68 /// qualification. 69 /// FIXME: address space qualification? 70 static CanQualType GetThisType(ASTContext &Context, const CXXRecordDecl *RD) { 71 QualType RecTy = Context.getTagDeclType(RD)->getCanonicalTypeInternal(); 72 return Context.getPointerType(CanQualType::CreateUnsafe(RecTy)); 73 } 74 75 /// Returns the canonical formal type of the given C++ method. 76 static CanQual<FunctionProtoType> GetFormalType(const CXXMethodDecl *MD) { 77 return MD->getType()->getCanonicalTypeUnqualified() 78 .getAs<FunctionProtoType>(); 79 } 80 81 /// Returns the "extra-canonicalized" return type, which discards 82 /// qualifiers on the return type. Codegen doesn't care about them, 83 /// and it makes ABI code a little easier to be able to assume that 84 /// all parameter and return types are top-level unqualified. 85 static CanQualType GetReturnType(QualType RetTy) { 86 return RetTy->getCanonicalTypeUnqualified().getUnqualifiedType(); 87 } 88 89 /// Arrange the argument and result information for a value of the given 90 /// unprototyped freestanding function type. 91 const CGFunctionInfo & 92 CodeGenTypes::arrangeFreeFunctionType(CanQual<FunctionNoProtoType> FTNP) { 93 // When translating an unprototyped function type, always use a 94 // variadic type. 95 return arrangeLLVMFunctionInfo(FTNP->getReturnType().getUnqualifiedType(), 96 /*instanceMethod=*/false, 97 /*chainCall=*/false, None, 98 FTNP->getExtInfo(), {}, RequiredArgs(0)); 99 } 100 101 /// Adds the formal paramaters in FPT to the given prefix. If any parameter in 102 /// FPT has pass_object_size attrs, then we'll add parameters for those, too. 103 static void appendParameterTypes(const CodeGenTypes &CGT, 104 SmallVectorImpl<CanQualType> &prefix, 105 SmallVectorImpl<FunctionProtoType::ExtParameterInfo> ¶mInfos, 106 CanQual<FunctionProtoType> FPT, 107 const FunctionDecl *FD) { 108 // Fill out paramInfos. 109 if (FPT->hasExtParameterInfos() || !paramInfos.empty()) { 110 assert(paramInfos.size() <= prefix.size()); 111 auto protoParamInfos = FPT->getExtParameterInfos(); 112 paramInfos.reserve(prefix.size() + protoParamInfos.size()); 113 paramInfos.resize(prefix.size()); 114 paramInfos.append(protoParamInfos.begin(), protoParamInfos.end()); 115 } 116 117 // Fast path: unknown target. 118 if (FD == nullptr) { 119 prefix.append(FPT->param_type_begin(), FPT->param_type_end()); 120 return; 121 } 122 123 // In the vast majority cases, we'll have precisely FPT->getNumParams() 124 // parameters; the only thing that can change this is the presence of 125 // pass_object_size. So, we preallocate for the common case. 126 prefix.reserve(prefix.size() + FPT->getNumParams()); 127 128 assert(FD->getNumParams() == FPT->getNumParams()); 129 for (unsigned I = 0, E = FPT->getNumParams(); I != E; ++I) { 130 prefix.push_back(FPT->getParamType(I)); 131 if (FD->getParamDecl(I)->hasAttr<PassObjectSizeAttr>()) 132 prefix.push_back(CGT.getContext().getSizeType()); 133 } 134 } 135 136 /// Arrange the LLVM function layout for a value of the given function 137 /// type, on top of any implicit parameters already stored. 138 static const CGFunctionInfo & 139 arrangeLLVMFunctionInfo(CodeGenTypes &CGT, bool instanceMethod, 140 SmallVectorImpl<CanQualType> &prefix, 141 CanQual<FunctionProtoType> FTP, 142 const FunctionDecl *FD) { 143 SmallVector<FunctionProtoType::ExtParameterInfo, 16> paramInfos; 144 RequiredArgs Required = 145 RequiredArgs::forPrototypePlus(FTP, prefix.size(), FD); 146 // FIXME: Kill copy. 147 appendParameterTypes(CGT, prefix, paramInfos, FTP, FD); 148 CanQualType resultType = FTP->getReturnType().getUnqualifiedType(); 149 150 return CGT.arrangeLLVMFunctionInfo(resultType, instanceMethod, 151 /*chainCall=*/false, prefix, 152 FTP->getExtInfo(), paramInfos, 153 Required); 154 } 155 156 /// Arrange the argument and result information for a value of the 157 /// given freestanding function type. 158 const CGFunctionInfo & 159 CodeGenTypes::arrangeFreeFunctionType(CanQual<FunctionProtoType> FTP, 160 const FunctionDecl *FD) { 161 SmallVector<CanQualType, 16> argTypes; 162 return ::arrangeLLVMFunctionInfo(*this, /*instanceMethod=*/false, argTypes, 163 FTP, FD); 164 } 165 166 static CallingConv getCallingConventionForDecl(const Decl *D, bool IsWindows) { 167 // Set the appropriate calling convention for the Function. 168 if (D->hasAttr<StdCallAttr>()) 169 return CC_X86StdCall; 170 171 if (D->hasAttr<FastCallAttr>()) 172 return CC_X86FastCall; 173 174 if (D->hasAttr<ThisCallAttr>()) 175 return CC_X86ThisCall; 176 177 if (D->hasAttr<VectorCallAttr>()) 178 return CC_X86VectorCall; 179 180 if (D->hasAttr<PascalAttr>()) 181 return CC_X86Pascal; 182 183 if (PcsAttr *PCS = D->getAttr<PcsAttr>()) 184 return (PCS->getPCS() == PcsAttr::AAPCS ? CC_AAPCS : CC_AAPCS_VFP); 185 186 if (D->hasAttr<IntelOclBiccAttr>()) 187 return CC_IntelOclBicc; 188 189 if (D->hasAttr<MSABIAttr>()) 190 return IsWindows ? CC_C : CC_X86_64Win64; 191 192 if (D->hasAttr<SysVABIAttr>()) 193 return IsWindows ? CC_X86_64SysV : CC_C; 194 195 if (D->hasAttr<PreserveMostAttr>()) 196 return CC_PreserveMost; 197 198 if (D->hasAttr<PreserveAllAttr>()) 199 return CC_PreserveAll; 200 201 return CC_C; 202 } 203 204 /// Arrange the argument and result information for a call to an 205 /// unknown C++ non-static member function of the given abstract type. 206 /// (Zero value of RD means we don't have any meaningful "this" argument type, 207 /// so fall back to a generic pointer type). 208 /// The member function must be an ordinary function, i.e. not a 209 /// constructor or destructor. 210 const CGFunctionInfo & 211 CodeGenTypes::arrangeCXXMethodType(const CXXRecordDecl *RD, 212 const FunctionProtoType *FTP, 213 const CXXMethodDecl *MD) { 214 SmallVector<CanQualType, 16> argTypes; 215 216 // Add the 'this' pointer. 217 if (RD) 218 argTypes.push_back(GetThisType(Context, RD)); 219 else 220 argTypes.push_back(Context.VoidPtrTy); 221 222 return ::arrangeLLVMFunctionInfo( 223 *this, true, argTypes, 224 FTP->getCanonicalTypeUnqualified().getAs<FunctionProtoType>(), MD); 225 } 226 227 /// Arrange the argument and result information for a declaration or 228 /// definition of the given C++ non-static member function. The 229 /// member function must be an ordinary function, i.e. not a 230 /// constructor or destructor. 231 const CGFunctionInfo & 232 CodeGenTypes::arrangeCXXMethodDeclaration(const CXXMethodDecl *MD) { 233 assert(!isa<CXXConstructorDecl>(MD) && "wrong method for constructors!"); 234 assert(!isa<CXXDestructorDecl>(MD) && "wrong method for destructors!"); 235 236 CanQual<FunctionProtoType> prototype = GetFormalType(MD); 237 238 if (MD->isInstance()) { 239 // The abstract case is perfectly fine. 240 const CXXRecordDecl *ThisType = TheCXXABI.getThisArgumentTypeForMethod(MD); 241 return arrangeCXXMethodType(ThisType, prototype.getTypePtr(), MD); 242 } 243 244 return arrangeFreeFunctionType(prototype, MD); 245 } 246 247 bool CodeGenTypes::inheritingCtorHasParams( 248 const InheritedConstructor &Inherited, CXXCtorType Type) { 249 // Parameters are unnecessary if we're constructing a base class subobject 250 // and the inherited constructor lives in a virtual base. 251 return Type == Ctor_Complete || 252 !Inherited.getShadowDecl()->constructsVirtualBase() || 253 !Target.getCXXABI().hasConstructorVariants(); 254 } 255 256 const CGFunctionInfo & 257 CodeGenTypes::arrangeCXXStructorDeclaration(const CXXMethodDecl *MD, 258 StructorType Type) { 259 260 SmallVector<CanQualType, 16> argTypes; 261 SmallVector<FunctionProtoType::ExtParameterInfo, 16> paramInfos; 262 argTypes.push_back(GetThisType(Context, MD->getParent())); 263 264 bool PassParams = true; 265 266 GlobalDecl GD; 267 if (auto *CD = dyn_cast<CXXConstructorDecl>(MD)) { 268 GD = GlobalDecl(CD, toCXXCtorType(Type)); 269 270 // A base class inheriting constructor doesn't get forwarded arguments 271 // needed to construct a virtual base (or base class thereof). 272 if (auto Inherited = CD->getInheritedConstructor()) 273 PassParams = inheritingCtorHasParams(Inherited, toCXXCtorType(Type)); 274 } else { 275 auto *DD = dyn_cast<CXXDestructorDecl>(MD); 276 GD = GlobalDecl(DD, toCXXDtorType(Type)); 277 } 278 279 CanQual<FunctionProtoType> FTP = GetFormalType(MD); 280 281 // Add the formal parameters. 282 if (PassParams) 283 appendParameterTypes(*this, argTypes, paramInfos, FTP, MD); 284 285 TheCXXABI.buildStructorSignature(MD, Type, argTypes); 286 287 RequiredArgs required = 288 (PassParams && MD->isVariadic() ? RequiredArgs(argTypes.size()) 289 : RequiredArgs::All); 290 291 FunctionType::ExtInfo extInfo = FTP->getExtInfo(); 292 CanQualType resultType = TheCXXABI.HasThisReturn(GD) 293 ? argTypes.front() 294 : TheCXXABI.hasMostDerivedReturn(GD) 295 ? CGM.getContext().VoidPtrTy 296 : Context.VoidTy; 297 return arrangeLLVMFunctionInfo(resultType, /*instanceMethod=*/true, 298 /*chainCall=*/false, argTypes, extInfo, 299 paramInfos, required); 300 } 301 302 static SmallVector<CanQualType, 16> 303 getArgTypesForCall(ASTContext &ctx, const CallArgList &args) { 304 SmallVector<CanQualType, 16> argTypes; 305 for (auto &arg : args) 306 argTypes.push_back(ctx.getCanonicalParamType(arg.Ty)); 307 return argTypes; 308 } 309 310 static SmallVector<CanQualType, 16> 311 getArgTypesForDeclaration(ASTContext &ctx, const FunctionArgList &args) { 312 SmallVector<CanQualType, 16> argTypes; 313 for (auto &arg : args) 314 argTypes.push_back(ctx.getCanonicalParamType(arg->getType())); 315 return argTypes; 316 } 317 318 static void addExtParameterInfosForCall( 319 llvm::SmallVectorImpl<FunctionProtoType::ExtParameterInfo> ¶mInfos, 320 const FunctionProtoType *proto, 321 unsigned prefixArgs, 322 unsigned totalArgs) { 323 assert(proto->hasExtParameterInfos()); 324 assert(paramInfos.size() <= prefixArgs); 325 assert(proto->getNumParams() + prefixArgs <= totalArgs); 326 327 // Add default infos for any prefix args that don't already have infos. 328 paramInfos.resize(prefixArgs); 329 330 // Add infos for the prototype. 331 auto protoInfos = proto->getExtParameterInfos(); 332 paramInfos.append(protoInfos.begin(), protoInfos.end()); 333 334 // Add default infos for the variadic arguments. 335 paramInfos.resize(totalArgs); 336 } 337 338 static llvm::SmallVector<FunctionProtoType::ExtParameterInfo, 16> 339 getExtParameterInfosForCall(const FunctionProtoType *proto, 340 unsigned prefixArgs, unsigned totalArgs) { 341 llvm::SmallVector<FunctionProtoType::ExtParameterInfo, 16> result; 342 if (proto->hasExtParameterInfos()) { 343 addExtParameterInfosForCall(result, proto, prefixArgs, totalArgs); 344 } 345 return result; 346 } 347 348 /// Arrange a call to a C++ method, passing the given arguments. 349 const CGFunctionInfo & 350 CodeGenTypes::arrangeCXXConstructorCall(const CallArgList &args, 351 const CXXConstructorDecl *D, 352 CXXCtorType CtorKind, 353 unsigned ExtraArgs) { 354 // FIXME: Kill copy. 355 SmallVector<CanQualType, 16> ArgTypes; 356 for (const auto &Arg : args) 357 ArgTypes.push_back(Context.getCanonicalParamType(Arg.Ty)); 358 359 CanQual<FunctionProtoType> FPT = GetFormalType(D); 360 RequiredArgs Required = RequiredArgs::forPrototypePlus(FPT, 1 + ExtraArgs, D); 361 GlobalDecl GD(D, CtorKind); 362 CanQualType ResultType = TheCXXABI.HasThisReturn(GD) 363 ? ArgTypes.front() 364 : TheCXXABI.hasMostDerivedReturn(GD) 365 ? CGM.getContext().VoidPtrTy 366 : Context.VoidTy; 367 368 FunctionType::ExtInfo Info = FPT->getExtInfo(); 369 auto ParamInfos = getExtParameterInfosForCall(FPT.getTypePtr(), 1 + ExtraArgs, 370 ArgTypes.size()); 371 return arrangeLLVMFunctionInfo(ResultType, /*instanceMethod=*/true, 372 /*chainCall=*/false, ArgTypes, Info, 373 ParamInfos, Required); 374 } 375 376 /// Arrange the argument and result information for the declaration or 377 /// definition of the given function. 378 const CGFunctionInfo & 379 CodeGenTypes::arrangeFunctionDeclaration(const FunctionDecl *FD) { 380 if (const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(FD)) 381 if (MD->isInstance()) 382 return arrangeCXXMethodDeclaration(MD); 383 384 CanQualType FTy = FD->getType()->getCanonicalTypeUnqualified(); 385 386 assert(isa<FunctionType>(FTy)); 387 388 // When declaring a function without a prototype, always use a 389 // non-variadic type. 390 if (isa<FunctionNoProtoType>(FTy)) { 391 CanQual<FunctionNoProtoType> noProto = FTy.getAs<FunctionNoProtoType>(); 392 return arrangeLLVMFunctionInfo( 393 noProto->getReturnType(), /*instanceMethod=*/false, 394 /*chainCall=*/false, None, noProto->getExtInfo(), {},RequiredArgs::All); 395 } 396 397 assert(isa<FunctionProtoType>(FTy)); 398 return arrangeFreeFunctionType(FTy.getAs<FunctionProtoType>(), FD); 399 } 400 401 /// Arrange the argument and result information for the declaration or 402 /// definition of an Objective-C method. 403 const CGFunctionInfo & 404 CodeGenTypes::arrangeObjCMethodDeclaration(const ObjCMethodDecl *MD) { 405 // It happens that this is the same as a call with no optional 406 // arguments, except also using the formal 'self' type. 407 return arrangeObjCMessageSendSignature(MD, MD->getSelfDecl()->getType()); 408 } 409 410 /// Arrange the argument and result information for the function type 411 /// through which to perform a send to the given Objective-C method, 412 /// using the given receiver type. The receiver type is not always 413 /// the 'self' type of the method or even an Objective-C pointer type. 414 /// This is *not* the right method for actually performing such a 415 /// message send, due to the possibility of optional arguments. 416 const CGFunctionInfo & 417 CodeGenTypes::arrangeObjCMessageSendSignature(const ObjCMethodDecl *MD, 418 QualType receiverType) { 419 SmallVector<CanQualType, 16> argTys; 420 argTys.push_back(Context.getCanonicalParamType(receiverType)); 421 argTys.push_back(Context.getCanonicalParamType(Context.getObjCSelType())); 422 // FIXME: Kill copy? 423 for (const auto *I : MD->parameters()) { 424 argTys.push_back(Context.getCanonicalParamType(I->getType())); 425 } 426 427 FunctionType::ExtInfo einfo; 428 bool IsWindows = getContext().getTargetInfo().getTriple().isOSWindows(); 429 einfo = einfo.withCallingConv(getCallingConventionForDecl(MD, IsWindows)); 430 431 if (getContext().getLangOpts().ObjCAutoRefCount && 432 MD->hasAttr<NSReturnsRetainedAttr>()) 433 einfo = einfo.withProducesResult(true); 434 435 RequiredArgs required = 436 (MD->isVariadic() ? RequiredArgs(argTys.size()) : RequiredArgs::All); 437 438 return arrangeLLVMFunctionInfo( 439 GetReturnType(MD->getReturnType()), /*instanceMethod=*/false, 440 /*chainCall=*/false, argTys, einfo, {}, required); 441 } 442 443 const CGFunctionInfo & 444 CodeGenTypes::arrangeUnprototypedObjCMessageSend(QualType returnType, 445 const CallArgList &args) { 446 auto argTypes = getArgTypesForCall(Context, args); 447 FunctionType::ExtInfo einfo; 448 449 return arrangeLLVMFunctionInfo( 450 GetReturnType(returnType), /*instanceMethod=*/false, 451 /*chainCall=*/false, argTypes, einfo, {}, RequiredArgs::All); 452 } 453 454 const CGFunctionInfo & 455 CodeGenTypes::arrangeGlobalDeclaration(GlobalDecl GD) { 456 // FIXME: Do we need to handle ObjCMethodDecl? 457 const FunctionDecl *FD = cast<FunctionDecl>(GD.getDecl()); 458 459 if (const CXXConstructorDecl *CD = dyn_cast<CXXConstructorDecl>(FD)) 460 return arrangeCXXStructorDeclaration(CD, getFromCtorType(GD.getCtorType())); 461 462 if (const CXXDestructorDecl *DD = dyn_cast<CXXDestructorDecl>(FD)) 463 return arrangeCXXStructorDeclaration(DD, getFromDtorType(GD.getDtorType())); 464 465 return arrangeFunctionDeclaration(FD); 466 } 467 468 /// Arrange a thunk that takes 'this' as the first parameter followed by 469 /// varargs. Return a void pointer, regardless of the actual return type. 470 /// The body of the thunk will end in a musttail call to a function of the 471 /// correct type, and the caller will bitcast the function to the correct 472 /// prototype. 473 const CGFunctionInfo & 474 CodeGenTypes::arrangeMSMemberPointerThunk(const CXXMethodDecl *MD) { 475 assert(MD->isVirtual() && "only virtual memptrs have thunks"); 476 CanQual<FunctionProtoType> FTP = GetFormalType(MD); 477 CanQualType ArgTys[] = { GetThisType(Context, MD->getParent()) }; 478 return arrangeLLVMFunctionInfo(Context.VoidTy, /*instanceMethod=*/false, 479 /*chainCall=*/false, ArgTys, 480 FTP->getExtInfo(), {}, RequiredArgs(1)); 481 } 482 483 const CGFunctionInfo & 484 CodeGenTypes::arrangeMSCtorClosure(const CXXConstructorDecl *CD, 485 CXXCtorType CT) { 486 assert(CT == Ctor_CopyingClosure || CT == Ctor_DefaultClosure); 487 488 CanQual<FunctionProtoType> FTP = GetFormalType(CD); 489 SmallVector<CanQualType, 2> ArgTys; 490 const CXXRecordDecl *RD = CD->getParent(); 491 ArgTys.push_back(GetThisType(Context, RD)); 492 if (CT == Ctor_CopyingClosure) 493 ArgTys.push_back(*FTP->param_type_begin()); 494 if (RD->getNumVBases() > 0) 495 ArgTys.push_back(Context.IntTy); 496 CallingConv CC = Context.getDefaultCallingConvention( 497 /*IsVariadic=*/false, /*IsCXXMethod=*/true); 498 return arrangeLLVMFunctionInfo(Context.VoidTy, /*instanceMethod=*/true, 499 /*chainCall=*/false, ArgTys, 500 FunctionType::ExtInfo(CC), {}, 501 RequiredArgs::All); 502 } 503 504 /// Arrange a call as unto a free function, except possibly with an 505 /// additional number of formal parameters considered required. 506 static const CGFunctionInfo & 507 arrangeFreeFunctionLikeCall(CodeGenTypes &CGT, 508 CodeGenModule &CGM, 509 const CallArgList &args, 510 const FunctionType *fnType, 511 unsigned numExtraRequiredArgs, 512 bool chainCall) { 513 assert(args.size() >= numExtraRequiredArgs); 514 515 llvm::SmallVector<FunctionProtoType::ExtParameterInfo, 16> paramInfos; 516 517 // In most cases, there are no optional arguments. 518 RequiredArgs required = RequiredArgs::All; 519 520 // If we have a variadic prototype, the required arguments are the 521 // extra prefix plus the arguments in the prototype. 522 if (const FunctionProtoType *proto = dyn_cast<FunctionProtoType>(fnType)) { 523 if (proto->isVariadic()) 524 required = RequiredArgs(proto->getNumParams() + numExtraRequiredArgs); 525 526 if (proto->hasExtParameterInfos()) 527 addExtParameterInfosForCall(paramInfos, proto, numExtraRequiredArgs, 528 args.size()); 529 530 // If we don't have a prototype at all, but we're supposed to 531 // explicitly use the variadic convention for unprototyped calls, 532 // treat all of the arguments as required but preserve the nominal 533 // possibility of variadics. 534 } else if (CGM.getTargetCodeGenInfo() 535 .isNoProtoCallVariadic(args, 536 cast<FunctionNoProtoType>(fnType))) { 537 required = RequiredArgs(args.size()); 538 } 539 540 // FIXME: Kill copy. 541 SmallVector<CanQualType, 16> argTypes; 542 for (const auto &arg : args) 543 argTypes.push_back(CGT.getContext().getCanonicalParamType(arg.Ty)); 544 return CGT.arrangeLLVMFunctionInfo(GetReturnType(fnType->getReturnType()), 545 /*instanceMethod=*/false, chainCall, 546 argTypes, fnType->getExtInfo(), paramInfos, 547 required); 548 } 549 550 /// Figure out the rules for calling a function with the given formal 551 /// type using the given arguments. The arguments are necessary 552 /// because the function might be unprototyped, in which case it's 553 /// target-dependent in crazy ways. 554 const CGFunctionInfo & 555 CodeGenTypes::arrangeFreeFunctionCall(const CallArgList &args, 556 const FunctionType *fnType, 557 bool chainCall) { 558 return arrangeFreeFunctionLikeCall(*this, CGM, args, fnType, 559 chainCall ? 1 : 0, chainCall); 560 } 561 562 /// A block function is essentially a free function with an 563 /// extra implicit argument. 564 const CGFunctionInfo & 565 CodeGenTypes::arrangeBlockFunctionCall(const CallArgList &args, 566 const FunctionType *fnType) { 567 return arrangeFreeFunctionLikeCall(*this, CGM, args, fnType, 1, 568 /*chainCall=*/false); 569 } 570 571 const CGFunctionInfo & 572 CodeGenTypes::arrangeBlockFunctionDeclaration(const FunctionProtoType *proto, 573 const FunctionArgList ¶ms) { 574 auto paramInfos = getExtParameterInfosForCall(proto, 1, params.size()); 575 auto argTypes = getArgTypesForDeclaration(Context, params); 576 577 return arrangeLLVMFunctionInfo( 578 GetReturnType(proto->getReturnType()), 579 /*instanceMethod*/ false, /*chainCall*/ false, argTypes, 580 proto->getExtInfo(), paramInfos, 581 RequiredArgs::forPrototypePlus(proto, 1, nullptr)); 582 } 583 584 const CGFunctionInfo & 585 CodeGenTypes::arrangeBuiltinFunctionCall(QualType resultType, 586 const CallArgList &args) { 587 // FIXME: Kill copy. 588 SmallVector<CanQualType, 16> argTypes; 589 for (const auto &Arg : args) 590 argTypes.push_back(Context.getCanonicalParamType(Arg.Ty)); 591 return arrangeLLVMFunctionInfo( 592 GetReturnType(resultType), /*instanceMethod=*/false, 593 /*chainCall=*/false, argTypes, FunctionType::ExtInfo(), 594 /*paramInfos=*/ {}, RequiredArgs::All); 595 } 596 597 const CGFunctionInfo & 598 CodeGenTypes::arrangeBuiltinFunctionDeclaration(QualType resultType, 599 const FunctionArgList &args) { 600 auto argTypes = getArgTypesForDeclaration(Context, args); 601 602 return arrangeLLVMFunctionInfo( 603 GetReturnType(resultType), /*instanceMethod=*/false, /*chainCall=*/false, 604 argTypes, FunctionType::ExtInfo(), {}, RequiredArgs::All); 605 } 606 607 const CGFunctionInfo & 608 CodeGenTypes::arrangeBuiltinFunctionDeclaration(CanQualType resultType, 609 ArrayRef<CanQualType> argTypes) { 610 return arrangeLLVMFunctionInfo( 611 resultType, /*instanceMethod=*/false, /*chainCall=*/false, 612 argTypes, FunctionType::ExtInfo(), {}, RequiredArgs::All); 613 } 614 615 /// Arrange a call to a C++ method, passing the given arguments. 616 const CGFunctionInfo & 617 CodeGenTypes::arrangeCXXMethodCall(const CallArgList &args, 618 const FunctionProtoType *proto, 619 RequiredArgs required) { 620 unsigned numRequiredArgs = 621 (proto->isVariadic() ? required.getNumRequiredArgs() : args.size()); 622 unsigned numPrefixArgs = numRequiredArgs - proto->getNumParams(); 623 auto paramInfos = 624 getExtParameterInfosForCall(proto, numPrefixArgs, args.size()); 625 626 // FIXME: Kill copy. 627 auto argTypes = getArgTypesForCall(Context, args); 628 629 FunctionType::ExtInfo info = proto->getExtInfo(); 630 return arrangeLLVMFunctionInfo( 631 GetReturnType(proto->getReturnType()), /*instanceMethod=*/true, 632 /*chainCall=*/false, argTypes, info, paramInfos, required); 633 } 634 635 const CGFunctionInfo &CodeGenTypes::arrangeNullaryFunction() { 636 return arrangeLLVMFunctionInfo( 637 getContext().VoidTy, /*instanceMethod=*/false, /*chainCall=*/false, 638 None, FunctionType::ExtInfo(), {}, RequiredArgs::All); 639 } 640 641 const CGFunctionInfo & 642 CodeGenTypes::arrangeCall(const CGFunctionInfo &signature, 643 const CallArgList &args) { 644 assert(signature.arg_size() <= args.size()); 645 if (signature.arg_size() == args.size()) 646 return signature; 647 648 SmallVector<FunctionProtoType::ExtParameterInfo, 16> paramInfos; 649 auto sigParamInfos = signature.getExtParameterInfos(); 650 if (!sigParamInfos.empty()) { 651 paramInfos.append(sigParamInfos.begin(), sigParamInfos.end()); 652 paramInfos.resize(args.size()); 653 } 654 655 auto argTypes = getArgTypesForCall(Context, args); 656 657 assert(signature.getRequiredArgs().allowsOptionalArgs()); 658 return arrangeLLVMFunctionInfo(signature.getReturnType(), 659 signature.isInstanceMethod(), 660 signature.isChainCall(), 661 argTypes, 662 signature.getExtInfo(), 663 paramInfos, 664 signature.getRequiredArgs()); 665 } 666 667 /// Arrange the argument and result information for an abstract value 668 /// of a given function type. This is the method which all of the 669 /// above functions ultimately defer to. 670 const CGFunctionInfo & 671 CodeGenTypes::arrangeLLVMFunctionInfo(CanQualType resultType, 672 bool instanceMethod, 673 bool chainCall, 674 ArrayRef<CanQualType> argTypes, 675 FunctionType::ExtInfo info, 676 ArrayRef<FunctionProtoType::ExtParameterInfo> paramInfos, 677 RequiredArgs required) { 678 assert(std::all_of(argTypes.begin(), argTypes.end(), 679 std::mem_fun_ref(&CanQualType::isCanonicalAsParam))); 680 681 // Lookup or create unique function info. 682 llvm::FoldingSetNodeID ID; 683 CGFunctionInfo::Profile(ID, instanceMethod, chainCall, info, paramInfos, 684 required, resultType, argTypes); 685 686 void *insertPos = nullptr; 687 CGFunctionInfo *FI = FunctionInfos.FindNodeOrInsertPos(ID, insertPos); 688 if (FI) 689 return *FI; 690 691 unsigned CC = ClangCallConvToLLVMCallConv(info.getCC()); 692 693 // Construct the function info. We co-allocate the ArgInfos. 694 FI = CGFunctionInfo::create(CC, instanceMethod, chainCall, info, 695 paramInfos, resultType, argTypes, required); 696 FunctionInfos.InsertNode(FI, insertPos); 697 698 bool inserted = FunctionsBeingProcessed.insert(FI).second; 699 (void)inserted; 700 assert(inserted && "Recursively being processed?"); 701 702 // Compute ABI information. 703 if (info.getCC() != CC_Swift) { 704 getABIInfo().computeInfo(*FI); 705 } else { 706 swiftcall::computeABIInfo(CGM, *FI); 707 } 708 709 // Loop over all of the computed argument and return value info. If any of 710 // them are direct or extend without a specified coerce type, specify the 711 // default now. 712 ABIArgInfo &retInfo = FI->getReturnInfo(); 713 if (retInfo.canHaveCoerceToType() && retInfo.getCoerceToType() == nullptr) 714 retInfo.setCoerceToType(ConvertType(FI->getReturnType())); 715 716 for (auto &I : FI->arguments()) 717 if (I.info.canHaveCoerceToType() && I.info.getCoerceToType() == nullptr) 718 I.info.setCoerceToType(ConvertType(I.type)); 719 720 bool erased = FunctionsBeingProcessed.erase(FI); (void)erased; 721 assert(erased && "Not in set?"); 722 723 return *FI; 724 } 725 726 CGFunctionInfo *CGFunctionInfo::create(unsigned llvmCC, 727 bool instanceMethod, 728 bool chainCall, 729 const FunctionType::ExtInfo &info, 730 ArrayRef<ExtParameterInfo> paramInfos, 731 CanQualType resultType, 732 ArrayRef<CanQualType> argTypes, 733 RequiredArgs required) { 734 assert(paramInfos.empty() || paramInfos.size() == argTypes.size()); 735 736 void *buffer = 737 operator new(totalSizeToAlloc<ArgInfo, ExtParameterInfo>( 738 argTypes.size() + 1, paramInfos.size())); 739 740 CGFunctionInfo *FI = new(buffer) CGFunctionInfo(); 741 FI->CallingConvention = llvmCC; 742 FI->EffectiveCallingConvention = llvmCC; 743 FI->ASTCallingConvention = info.getCC(); 744 FI->InstanceMethod = instanceMethod; 745 FI->ChainCall = chainCall; 746 FI->NoReturn = info.getNoReturn(); 747 FI->ReturnsRetained = info.getProducesResult(); 748 FI->Required = required; 749 FI->HasRegParm = info.getHasRegParm(); 750 FI->RegParm = info.getRegParm(); 751 FI->ArgStruct = nullptr; 752 FI->ArgStructAlign = 0; 753 FI->NumArgs = argTypes.size(); 754 FI->HasExtParameterInfos = !paramInfos.empty(); 755 FI->getArgsBuffer()[0].type = resultType; 756 for (unsigned i = 0, e = argTypes.size(); i != e; ++i) 757 FI->getArgsBuffer()[i + 1].type = argTypes[i]; 758 for (unsigned i = 0, e = paramInfos.size(); i != e; ++i) 759 FI->getExtParameterInfosBuffer()[i] = paramInfos[i]; 760 return FI; 761 } 762 763 /***/ 764 765 namespace { 766 // ABIArgInfo::Expand implementation. 767 768 // Specifies the way QualType passed as ABIArgInfo::Expand is expanded. 769 struct TypeExpansion { 770 enum TypeExpansionKind { 771 // Elements of constant arrays are expanded recursively. 772 TEK_ConstantArray, 773 // Record fields are expanded recursively (but if record is a union, only 774 // the field with the largest size is expanded). 775 TEK_Record, 776 // For complex types, real and imaginary parts are expanded recursively. 777 TEK_Complex, 778 // All other types are not expandable. 779 TEK_None 780 }; 781 782 const TypeExpansionKind Kind; 783 784 TypeExpansion(TypeExpansionKind K) : Kind(K) {} 785 virtual ~TypeExpansion() {} 786 }; 787 788 struct ConstantArrayExpansion : TypeExpansion { 789 QualType EltTy; 790 uint64_t NumElts; 791 792 ConstantArrayExpansion(QualType EltTy, uint64_t NumElts) 793 : TypeExpansion(TEK_ConstantArray), EltTy(EltTy), NumElts(NumElts) {} 794 static bool classof(const TypeExpansion *TE) { 795 return TE->Kind == TEK_ConstantArray; 796 } 797 }; 798 799 struct RecordExpansion : TypeExpansion { 800 SmallVector<const CXXBaseSpecifier *, 1> Bases; 801 802 SmallVector<const FieldDecl *, 1> Fields; 803 804 RecordExpansion(SmallVector<const CXXBaseSpecifier *, 1> &&Bases, 805 SmallVector<const FieldDecl *, 1> &&Fields) 806 : TypeExpansion(TEK_Record), Bases(std::move(Bases)), 807 Fields(std::move(Fields)) {} 808 static bool classof(const TypeExpansion *TE) { 809 return TE->Kind == TEK_Record; 810 } 811 }; 812 813 struct ComplexExpansion : TypeExpansion { 814 QualType EltTy; 815 816 ComplexExpansion(QualType EltTy) : TypeExpansion(TEK_Complex), EltTy(EltTy) {} 817 static bool classof(const TypeExpansion *TE) { 818 return TE->Kind == TEK_Complex; 819 } 820 }; 821 822 struct NoExpansion : TypeExpansion { 823 NoExpansion() : TypeExpansion(TEK_None) {} 824 static bool classof(const TypeExpansion *TE) { 825 return TE->Kind == TEK_None; 826 } 827 }; 828 } // namespace 829 830 static std::unique_ptr<TypeExpansion> 831 getTypeExpansion(QualType Ty, const ASTContext &Context) { 832 if (const ConstantArrayType *AT = Context.getAsConstantArrayType(Ty)) { 833 return llvm::make_unique<ConstantArrayExpansion>( 834 AT->getElementType(), AT->getSize().getZExtValue()); 835 } 836 if (const RecordType *RT = Ty->getAs<RecordType>()) { 837 SmallVector<const CXXBaseSpecifier *, 1> Bases; 838 SmallVector<const FieldDecl *, 1> Fields; 839 const RecordDecl *RD = RT->getDecl(); 840 assert(!RD->hasFlexibleArrayMember() && 841 "Cannot expand structure with flexible array."); 842 if (RD->isUnion()) { 843 // Unions can be here only in degenerative cases - all the fields are same 844 // after flattening. Thus we have to use the "largest" field. 845 const FieldDecl *LargestFD = nullptr; 846 CharUnits UnionSize = CharUnits::Zero(); 847 848 for (const auto *FD : RD->fields()) { 849 // Skip zero length bitfields. 850 if (FD->isBitField() && FD->getBitWidthValue(Context) == 0) 851 continue; 852 assert(!FD->isBitField() && 853 "Cannot expand structure with bit-field members."); 854 CharUnits FieldSize = Context.getTypeSizeInChars(FD->getType()); 855 if (UnionSize < FieldSize) { 856 UnionSize = FieldSize; 857 LargestFD = FD; 858 } 859 } 860 if (LargestFD) 861 Fields.push_back(LargestFD); 862 } else { 863 if (const auto *CXXRD = dyn_cast<CXXRecordDecl>(RD)) { 864 assert(!CXXRD->isDynamicClass() && 865 "cannot expand vtable pointers in dynamic classes"); 866 for (const CXXBaseSpecifier &BS : CXXRD->bases()) 867 Bases.push_back(&BS); 868 } 869 870 for (const auto *FD : RD->fields()) { 871 // Skip zero length bitfields. 872 if (FD->isBitField() && FD->getBitWidthValue(Context) == 0) 873 continue; 874 assert(!FD->isBitField() && 875 "Cannot expand structure with bit-field members."); 876 Fields.push_back(FD); 877 } 878 } 879 return llvm::make_unique<RecordExpansion>(std::move(Bases), 880 std::move(Fields)); 881 } 882 if (const ComplexType *CT = Ty->getAs<ComplexType>()) { 883 return llvm::make_unique<ComplexExpansion>(CT->getElementType()); 884 } 885 return llvm::make_unique<NoExpansion>(); 886 } 887 888 static int getExpansionSize(QualType Ty, const ASTContext &Context) { 889 auto Exp = getTypeExpansion(Ty, Context); 890 if (auto CAExp = dyn_cast<ConstantArrayExpansion>(Exp.get())) { 891 return CAExp->NumElts * getExpansionSize(CAExp->EltTy, Context); 892 } 893 if (auto RExp = dyn_cast<RecordExpansion>(Exp.get())) { 894 int Res = 0; 895 for (auto BS : RExp->Bases) 896 Res += getExpansionSize(BS->getType(), Context); 897 for (auto FD : RExp->Fields) 898 Res += getExpansionSize(FD->getType(), Context); 899 return Res; 900 } 901 if (isa<ComplexExpansion>(Exp.get())) 902 return 2; 903 assert(isa<NoExpansion>(Exp.get())); 904 return 1; 905 } 906 907 void 908 CodeGenTypes::getExpandedTypes(QualType Ty, 909 SmallVectorImpl<llvm::Type *>::iterator &TI) { 910 auto Exp = getTypeExpansion(Ty, Context); 911 if (auto CAExp = dyn_cast<ConstantArrayExpansion>(Exp.get())) { 912 for (int i = 0, n = CAExp->NumElts; i < n; i++) { 913 getExpandedTypes(CAExp->EltTy, TI); 914 } 915 } else if (auto RExp = dyn_cast<RecordExpansion>(Exp.get())) { 916 for (auto BS : RExp->Bases) 917 getExpandedTypes(BS->getType(), TI); 918 for (auto FD : RExp->Fields) 919 getExpandedTypes(FD->getType(), TI); 920 } else if (auto CExp = dyn_cast<ComplexExpansion>(Exp.get())) { 921 llvm::Type *EltTy = ConvertType(CExp->EltTy); 922 *TI++ = EltTy; 923 *TI++ = EltTy; 924 } else { 925 assert(isa<NoExpansion>(Exp.get())); 926 *TI++ = ConvertType(Ty); 927 } 928 } 929 930 static void forConstantArrayExpansion(CodeGenFunction &CGF, 931 ConstantArrayExpansion *CAE, 932 Address BaseAddr, 933 llvm::function_ref<void(Address)> Fn) { 934 CharUnits EltSize = CGF.getContext().getTypeSizeInChars(CAE->EltTy); 935 CharUnits EltAlign = 936 BaseAddr.getAlignment().alignmentOfArrayElement(EltSize); 937 938 for (int i = 0, n = CAE->NumElts; i < n; i++) { 939 llvm::Value *EltAddr = 940 CGF.Builder.CreateConstGEP2_32(nullptr, BaseAddr.getPointer(), 0, i); 941 Fn(Address(EltAddr, EltAlign)); 942 } 943 } 944 945 void CodeGenFunction::ExpandTypeFromArgs( 946 QualType Ty, LValue LV, SmallVectorImpl<llvm::Value *>::iterator &AI) { 947 assert(LV.isSimple() && 948 "Unexpected non-simple lvalue during struct expansion."); 949 950 auto Exp = getTypeExpansion(Ty, getContext()); 951 if (auto CAExp = dyn_cast<ConstantArrayExpansion>(Exp.get())) { 952 forConstantArrayExpansion(*this, CAExp, LV.getAddress(), 953 [&](Address EltAddr) { 954 LValue LV = MakeAddrLValue(EltAddr, CAExp->EltTy); 955 ExpandTypeFromArgs(CAExp->EltTy, LV, AI); 956 }); 957 } else if (auto RExp = dyn_cast<RecordExpansion>(Exp.get())) { 958 Address This = LV.getAddress(); 959 for (const CXXBaseSpecifier *BS : RExp->Bases) { 960 // Perform a single step derived-to-base conversion. 961 Address Base = 962 GetAddressOfBaseClass(This, Ty->getAsCXXRecordDecl(), &BS, &BS + 1, 963 /*NullCheckValue=*/false, SourceLocation()); 964 LValue SubLV = MakeAddrLValue(Base, BS->getType()); 965 966 // Recurse onto bases. 967 ExpandTypeFromArgs(BS->getType(), SubLV, AI); 968 } 969 for (auto FD : RExp->Fields) { 970 // FIXME: What are the right qualifiers here? 971 LValue SubLV = EmitLValueForFieldInitialization(LV, FD); 972 ExpandTypeFromArgs(FD->getType(), SubLV, AI); 973 } 974 } else if (isa<ComplexExpansion>(Exp.get())) { 975 auto realValue = *AI++; 976 auto imagValue = *AI++; 977 EmitStoreOfComplex(ComplexPairTy(realValue, imagValue), LV, /*init*/ true); 978 } else { 979 assert(isa<NoExpansion>(Exp.get())); 980 EmitStoreThroughLValue(RValue::get(*AI++), LV); 981 } 982 } 983 984 void CodeGenFunction::ExpandTypeToArgs( 985 QualType Ty, RValue RV, llvm::FunctionType *IRFuncTy, 986 SmallVectorImpl<llvm::Value *> &IRCallArgs, unsigned &IRCallArgPos) { 987 auto Exp = getTypeExpansion(Ty, getContext()); 988 if (auto CAExp = dyn_cast<ConstantArrayExpansion>(Exp.get())) { 989 forConstantArrayExpansion(*this, CAExp, RV.getAggregateAddress(), 990 [&](Address EltAddr) { 991 RValue EltRV = 992 convertTempToRValue(EltAddr, CAExp->EltTy, SourceLocation()); 993 ExpandTypeToArgs(CAExp->EltTy, EltRV, IRFuncTy, IRCallArgs, IRCallArgPos); 994 }); 995 } else if (auto RExp = dyn_cast<RecordExpansion>(Exp.get())) { 996 Address This = RV.getAggregateAddress(); 997 for (const CXXBaseSpecifier *BS : RExp->Bases) { 998 // Perform a single step derived-to-base conversion. 999 Address Base = 1000 GetAddressOfBaseClass(This, Ty->getAsCXXRecordDecl(), &BS, &BS + 1, 1001 /*NullCheckValue=*/false, SourceLocation()); 1002 RValue BaseRV = RValue::getAggregate(Base); 1003 1004 // Recurse onto bases. 1005 ExpandTypeToArgs(BS->getType(), BaseRV, IRFuncTy, IRCallArgs, 1006 IRCallArgPos); 1007 } 1008 1009 LValue LV = MakeAddrLValue(This, Ty); 1010 for (auto FD : RExp->Fields) { 1011 RValue FldRV = EmitRValueForField(LV, FD, SourceLocation()); 1012 ExpandTypeToArgs(FD->getType(), FldRV, IRFuncTy, IRCallArgs, 1013 IRCallArgPos); 1014 } 1015 } else if (isa<ComplexExpansion>(Exp.get())) { 1016 ComplexPairTy CV = RV.getComplexVal(); 1017 IRCallArgs[IRCallArgPos++] = CV.first; 1018 IRCallArgs[IRCallArgPos++] = CV.second; 1019 } else { 1020 assert(isa<NoExpansion>(Exp.get())); 1021 assert(RV.isScalar() && 1022 "Unexpected non-scalar rvalue during struct expansion."); 1023 1024 // Insert a bitcast as needed. 1025 llvm::Value *V = RV.getScalarVal(); 1026 if (IRCallArgPos < IRFuncTy->getNumParams() && 1027 V->getType() != IRFuncTy->getParamType(IRCallArgPos)) 1028 V = Builder.CreateBitCast(V, IRFuncTy->getParamType(IRCallArgPos)); 1029 1030 IRCallArgs[IRCallArgPos++] = V; 1031 } 1032 } 1033 1034 /// Create a temporary allocation for the purposes of coercion. 1035 static Address CreateTempAllocaForCoercion(CodeGenFunction &CGF, llvm::Type *Ty, 1036 CharUnits MinAlign) { 1037 // Don't use an alignment that's worse than what LLVM would prefer. 1038 auto PrefAlign = CGF.CGM.getDataLayout().getPrefTypeAlignment(Ty); 1039 CharUnits Align = std::max(MinAlign, CharUnits::fromQuantity(PrefAlign)); 1040 1041 return CGF.CreateTempAlloca(Ty, Align); 1042 } 1043 1044 /// EnterStructPointerForCoercedAccess - Given a struct pointer that we are 1045 /// accessing some number of bytes out of it, try to gep into the struct to get 1046 /// at its inner goodness. Dive as deep as possible without entering an element 1047 /// with an in-memory size smaller than DstSize. 1048 static Address 1049 EnterStructPointerForCoercedAccess(Address SrcPtr, 1050 llvm::StructType *SrcSTy, 1051 uint64_t DstSize, CodeGenFunction &CGF) { 1052 // We can't dive into a zero-element struct. 1053 if (SrcSTy->getNumElements() == 0) return SrcPtr; 1054 1055 llvm::Type *FirstElt = SrcSTy->getElementType(0); 1056 1057 // If the first elt is at least as large as what we're looking for, or if the 1058 // first element is the same size as the whole struct, we can enter it. The 1059 // comparison must be made on the store size and not the alloca size. Using 1060 // the alloca size may overstate the size of the load. 1061 uint64_t FirstEltSize = 1062 CGF.CGM.getDataLayout().getTypeStoreSize(FirstElt); 1063 if (FirstEltSize < DstSize && 1064 FirstEltSize < CGF.CGM.getDataLayout().getTypeStoreSize(SrcSTy)) 1065 return SrcPtr; 1066 1067 // GEP into the first element. 1068 SrcPtr = CGF.Builder.CreateStructGEP(SrcPtr, 0, CharUnits(), "coerce.dive"); 1069 1070 // If the first element is a struct, recurse. 1071 llvm::Type *SrcTy = SrcPtr.getElementType(); 1072 if (llvm::StructType *SrcSTy = dyn_cast<llvm::StructType>(SrcTy)) 1073 return EnterStructPointerForCoercedAccess(SrcPtr, SrcSTy, DstSize, CGF); 1074 1075 return SrcPtr; 1076 } 1077 1078 /// CoerceIntOrPtrToIntOrPtr - Convert a value Val to the specific Ty where both 1079 /// are either integers or pointers. This does a truncation of the value if it 1080 /// is too large or a zero extension if it is too small. 1081 /// 1082 /// This behaves as if the value were coerced through memory, so on big-endian 1083 /// targets the high bits are preserved in a truncation, while little-endian 1084 /// targets preserve the low bits. 1085 static llvm::Value *CoerceIntOrPtrToIntOrPtr(llvm::Value *Val, 1086 llvm::Type *Ty, 1087 CodeGenFunction &CGF) { 1088 if (Val->getType() == Ty) 1089 return Val; 1090 1091 if (isa<llvm::PointerType>(Val->getType())) { 1092 // If this is Pointer->Pointer avoid conversion to and from int. 1093 if (isa<llvm::PointerType>(Ty)) 1094 return CGF.Builder.CreateBitCast(Val, Ty, "coerce.val"); 1095 1096 // Convert the pointer to an integer so we can play with its width. 1097 Val = CGF.Builder.CreatePtrToInt(Val, CGF.IntPtrTy, "coerce.val.pi"); 1098 } 1099 1100 llvm::Type *DestIntTy = Ty; 1101 if (isa<llvm::PointerType>(DestIntTy)) 1102 DestIntTy = CGF.IntPtrTy; 1103 1104 if (Val->getType() != DestIntTy) { 1105 const llvm::DataLayout &DL = CGF.CGM.getDataLayout(); 1106 if (DL.isBigEndian()) { 1107 // Preserve the high bits on big-endian targets. 1108 // That is what memory coercion does. 1109 uint64_t SrcSize = DL.getTypeSizeInBits(Val->getType()); 1110 uint64_t DstSize = DL.getTypeSizeInBits(DestIntTy); 1111 1112 if (SrcSize > DstSize) { 1113 Val = CGF.Builder.CreateLShr(Val, SrcSize - DstSize, "coerce.highbits"); 1114 Val = CGF.Builder.CreateTrunc(Val, DestIntTy, "coerce.val.ii"); 1115 } else { 1116 Val = CGF.Builder.CreateZExt(Val, DestIntTy, "coerce.val.ii"); 1117 Val = CGF.Builder.CreateShl(Val, DstSize - SrcSize, "coerce.highbits"); 1118 } 1119 } else { 1120 // Little-endian targets preserve the low bits. No shifts required. 1121 Val = CGF.Builder.CreateIntCast(Val, DestIntTy, false, "coerce.val.ii"); 1122 } 1123 } 1124 1125 if (isa<llvm::PointerType>(Ty)) 1126 Val = CGF.Builder.CreateIntToPtr(Val, Ty, "coerce.val.ip"); 1127 return Val; 1128 } 1129 1130 1131 1132 /// CreateCoercedLoad - Create a load from \arg SrcPtr interpreted as 1133 /// a pointer to an object of type \arg Ty, known to be aligned to 1134 /// \arg SrcAlign bytes. 1135 /// 1136 /// This safely handles the case when the src type is smaller than the 1137 /// destination type; in this situation the values of bits which not 1138 /// present in the src are undefined. 1139 static llvm::Value *CreateCoercedLoad(Address Src, llvm::Type *Ty, 1140 CodeGenFunction &CGF) { 1141 llvm::Type *SrcTy = Src.getElementType(); 1142 1143 // If SrcTy and Ty are the same, just do a load. 1144 if (SrcTy == Ty) 1145 return CGF.Builder.CreateLoad(Src); 1146 1147 uint64_t DstSize = CGF.CGM.getDataLayout().getTypeAllocSize(Ty); 1148 1149 if (llvm::StructType *SrcSTy = dyn_cast<llvm::StructType>(SrcTy)) { 1150 Src = EnterStructPointerForCoercedAccess(Src, SrcSTy, DstSize, CGF); 1151 SrcTy = Src.getType()->getElementType(); 1152 } 1153 1154 uint64_t SrcSize = CGF.CGM.getDataLayout().getTypeAllocSize(SrcTy); 1155 1156 // If the source and destination are integer or pointer types, just do an 1157 // extension or truncation to the desired type. 1158 if ((isa<llvm::IntegerType>(Ty) || isa<llvm::PointerType>(Ty)) && 1159 (isa<llvm::IntegerType>(SrcTy) || isa<llvm::PointerType>(SrcTy))) { 1160 llvm::Value *Load = CGF.Builder.CreateLoad(Src); 1161 return CoerceIntOrPtrToIntOrPtr(Load, Ty, CGF); 1162 } 1163 1164 // If load is legal, just bitcast the src pointer. 1165 if (SrcSize >= DstSize) { 1166 // Generally SrcSize is never greater than DstSize, since this means we are 1167 // losing bits. However, this can happen in cases where the structure has 1168 // additional padding, for example due to a user specified alignment. 1169 // 1170 // FIXME: Assert that we aren't truncating non-padding bits when have access 1171 // to that information. 1172 Src = CGF.Builder.CreateBitCast(Src, llvm::PointerType::getUnqual(Ty)); 1173 return CGF.Builder.CreateLoad(Src); 1174 } 1175 1176 // Otherwise do coercion through memory. This is stupid, but simple. 1177 Address Tmp = CreateTempAllocaForCoercion(CGF, Ty, Src.getAlignment()); 1178 Address Casted = CGF.Builder.CreateBitCast(Tmp, CGF.Int8PtrTy); 1179 Address SrcCasted = CGF.Builder.CreateBitCast(Src, CGF.Int8PtrTy); 1180 CGF.Builder.CreateMemCpy(Casted, SrcCasted, 1181 llvm::ConstantInt::get(CGF.IntPtrTy, SrcSize), 1182 false); 1183 return CGF.Builder.CreateLoad(Tmp); 1184 } 1185 1186 // Function to store a first-class aggregate into memory. We prefer to 1187 // store the elements rather than the aggregate to be more friendly to 1188 // fast-isel. 1189 // FIXME: Do we need to recurse here? 1190 static void BuildAggStore(CodeGenFunction &CGF, llvm::Value *Val, 1191 Address Dest, bool DestIsVolatile) { 1192 // Prefer scalar stores to first-class aggregate stores. 1193 if (llvm::StructType *STy = 1194 dyn_cast<llvm::StructType>(Val->getType())) { 1195 const llvm::StructLayout *Layout = 1196 CGF.CGM.getDataLayout().getStructLayout(STy); 1197 1198 for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) { 1199 auto EltOffset = CharUnits::fromQuantity(Layout->getElementOffset(i)); 1200 Address EltPtr = CGF.Builder.CreateStructGEP(Dest, i, EltOffset); 1201 llvm::Value *Elt = CGF.Builder.CreateExtractValue(Val, i); 1202 CGF.Builder.CreateStore(Elt, EltPtr, DestIsVolatile); 1203 } 1204 } else { 1205 CGF.Builder.CreateStore(Val, Dest, DestIsVolatile); 1206 } 1207 } 1208 1209 /// CreateCoercedStore - Create a store to \arg DstPtr from \arg Src, 1210 /// where the source and destination may have different types. The 1211 /// destination is known to be aligned to \arg DstAlign bytes. 1212 /// 1213 /// This safely handles the case when the src type is larger than the 1214 /// destination type; the upper bits of the src will be lost. 1215 static void CreateCoercedStore(llvm::Value *Src, 1216 Address Dst, 1217 bool DstIsVolatile, 1218 CodeGenFunction &CGF) { 1219 llvm::Type *SrcTy = Src->getType(); 1220 llvm::Type *DstTy = Dst.getType()->getElementType(); 1221 if (SrcTy == DstTy) { 1222 CGF.Builder.CreateStore(Src, Dst, DstIsVolatile); 1223 return; 1224 } 1225 1226 uint64_t SrcSize = CGF.CGM.getDataLayout().getTypeAllocSize(SrcTy); 1227 1228 if (llvm::StructType *DstSTy = dyn_cast<llvm::StructType>(DstTy)) { 1229 Dst = EnterStructPointerForCoercedAccess(Dst, DstSTy, SrcSize, CGF); 1230 DstTy = Dst.getType()->getElementType(); 1231 } 1232 1233 // If the source and destination are integer or pointer types, just do an 1234 // extension or truncation to the desired type. 1235 if ((isa<llvm::IntegerType>(SrcTy) || isa<llvm::PointerType>(SrcTy)) && 1236 (isa<llvm::IntegerType>(DstTy) || isa<llvm::PointerType>(DstTy))) { 1237 Src = CoerceIntOrPtrToIntOrPtr(Src, DstTy, CGF); 1238 CGF.Builder.CreateStore(Src, Dst, DstIsVolatile); 1239 return; 1240 } 1241 1242 uint64_t DstSize = CGF.CGM.getDataLayout().getTypeAllocSize(DstTy); 1243 1244 // If store is legal, just bitcast the src pointer. 1245 if (SrcSize <= DstSize) { 1246 Dst = CGF.Builder.CreateBitCast(Dst, llvm::PointerType::getUnqual(SrcTy)); 1247 BuildAggStore(CGF, Src, Dst, DstIsVolatile); 1248 } else { 1249 // Otherwise do coercion through memory. This is stupid, but 1250 // simple. 1251 1252 // Generally SrcSize is never greater than DstSize, since this means we are 1253 // losing bits. However, this can happen in cases where the structure has 1254 // additional padding, for example due to a user specified alignment. 1255 // 1256 // FIXME: Assert that we aren't truncating non-padding bits when have access 1257 // to that information. 1258 Address Tmp = CreateTempAllocaForCoercion(CGF, SrcTy, Dst.getAlignment()); 1259 CGF.Builder.CreateStore(Src, Tmp); 1260 Address Casted = CGF.Builder.CreateBitCast(Tmp, CGF.Int8PtrTy); 1261 Address DstCasted = CGF.Builder.CreateBitCast(Dst, CGF.Int8PtrTy); 1262 CGF.Builder.CreateMemCpy(DstCasted, Casted, 1263 llvm::ConstantInt::get(CGF.IntPtrTy, DstSize), 1264 false); 1265 } 1266 } 1267 1268 static Address emitAddressAtOffset(CodeGenFunction &CGF, Address addr, 1269 const ABIArgInfo &info) { 1270 if (unsigned offset = info.getDirectOffset()) { 1271 addr = CGF.Builder.CreateElementBitCast(addr, CGF.Int8Ty); 1272 addr = CGF.Builder.CreateConstInBoundsByteGEP(addr, 1273 CharUnits::fromQuantity(offset)); 1274 addr = CGF.Builder.CreateElementBitCast(addr, info.getCoerceToType()); 1275 } 1276 return addr; 1277 } 1278 1279 namespace { 1280 1281 /// Encapsulates information about the way function arguments from 1282 /// CGFunctionInfo should be passed to actual LLVM IR function. 1283 class ClangToLLVMArgMapping { 1284 static const unsigned InvalidIndex = ~0U; 1285 unsigned InallocaArgNo; 1286 unsigned SRetArgNo; 1287 unsigned TotalIRArgs; 1288 1289 /// Arguments of LLVM IR function corresponding to single Clang argument. 1290 struct IRArgs { 1291 unsigned PaddingArgIndex; 1292 // Argument is expanded to IR arguments at positions 1293 // [FirstArgIndex, FirstArgIndex + NumberOfArgs). 1294 unsigned FirstArgIndex; 1295 unsigned NumberOfArgs; 1296 1297 IRArgs() 1298 : PaddingArgIndex(InvalidIndex), FirstArgIndex(InvalidIndex), 1299 NumberOfArgs(0) {} 1300 }; 1301 1302 SmallVector<IRArgs, 8> ArgInfo; 1303 1304 public: 1305 ClangToLLVMArgMapping(const ASTContext &Context, const CGFunctionInfo &FI, 1306 bool OnlyRequiredArgs = false) 1307 : InallocaArgNo(InvalidIndex), SRetArgNo(InvalidIndex), TotalIRArgs(0), 1308 ArgInfo(OnlyRequiredArgs ? FI.getNumRequiredArgs() : FI.arg_size()) { 1309 construct(Context, FI, OnlyRequiredArgs); 1310 } 1311 1312 bool hasInallocaArg() const { return InallocaArgNo != InvalidIndex; } 1313 unsigned getInallocaArgNo() const { 1314 assert(hasInallocaArg()); 1315 return InallocaArgNo; 1316 } 1317 1318 bool hasSRetArg() const { return SRetArgNo != InvalidIndex; } 1319 unsigned getSRetArgNo() const { 1320 assert(hasSRetArg()); 1321 return SRetArgNo; 1322 } 1323 1324 unsigned totalIRArgs() const { return TotalIRArgs; } 1325 1326 bool hasPaddingArg(unsigned ArgNo) const { 1327 assert(ArgNo < ArgInfo.size()); 1328 return ArgInfo[ArgNo].PaddingArgIndex != InvalidIndex; 1329 } 1330 unsigned getPaddingArgNo(unsigned ArgNo) const { 1331 assert(hasPaddingArg(ArgNo)); 1332 return ArgInfo[ArgNo].PaddingArgIndex; 1333 } 1334 1335 /// Returns index of first IR argument corresponding to ArgNo, and their 1336 /// quantity. 1337 std::pair<unsigned, unsigned> getIRArgs(unsigned ArgNo) const { 1338 assert(ArgNo < ArgInfo.size()); 1339 return std::make_pair(ArgInfo[ArgNo].FirstArgIndex, 1340 ArgInfo[ArgNo].NumberOfArgs); 1341 } 1342 1343 private: 1344 void construct(const ASTContext &Context, const CGFunctionInfo &FI, 1345 bool OnlyRequiredArgs); 1346 }; 1347 1348 void ClangToLLVMArgMapping::construct(const ASTContext &Context, 1349 const CGFunctionInfo &FI, 1350 bool OnlyRequiredArgs) { 1351 unsigned IRArgNo = 0; 1352 bool SwapThisWithSRet = false; 1353 const ABIArgInfo &RetAI = FI.getReturnInfo(); 1354 1355 if (RetAI.getKind() == ABIArgInfo::Indirect) { 1356 SwapThisWithSRet = RetAI.isSRetAfterThis(); 1357 SRetArgNo = SwapThisWithSRet ? 1 : IRArgNo++; 1358 } 1359 1360 unsigned ArgNo = 0; 1361 unsigned NumArgs = OnlyRequiredArgs ? FI.getNumRequiredArgs() : FI.arg_size(); 1362 for (CGFunctionInfo::const_arg_iterator I = FI.arg_begin(); ArgNo < NumArgs; 1363 ++I, ++ArgNo) { 1364 assert(I != FI.arg_end()); 1365 QualType ArgType = I->type; 1366 const ABIArgInfo &AI = I->info; 1367 // Collect data about IR arguments corresponding to Clang argument ArgNo. 1368 auto &IRArgs = ArgInfo[ArgNo]; 1369 1370 if (AI.getPaddingType()) 1371 IRArgs.PaddingArgIndex = IRArgNo++; 1372 1373 switch (AI.getKind()) { 1374 case ABIArgInfo::Extend: 1375 case ABIArgInfo::Direct: { 1376 // FIXME: handle sseregparm someday... 1377 llvm::StructType *STy = dyn_cast<llvm::StructType>(AI.getCoerceToType()); 1378 if (AI.isDirect() && AI.getCanBeFlattened() && STy) { 1379 IRArgs.NumberOfArgs = STy->getNumElements(); 1380 } else { 1381 IRArgs.NumberOfArgs = 1; 1382 } 1383 break; 1384 } 1385 case ABIArgInfo::Indirect: 1386 IRArgs.NumberOfArgs = 1; 1387 break; 1388 case ABIArgInfo::Ignore: 1389 case ABIArgInfo::InAlloca: 1390 // ignore and inalloca doesn't have matching LLVM parameters. 1391 IRArgs.NumberOfArgs = 0; 1392 break; 1393 case ABIArgInfo::CoerceAndExpand: 1394 IRArgs.NumberOfArgs = AI.getCoerceAndExpandTypeSequence().size(); 1395 break; 1396 case ABIArgInfo::Expand: 1397 IRArgs.NumberOfArgs = getExpansionSize(ArgType, Context); 1398 break; 1399 } 1400 1401 if (IRArgs.NumberOfArgs > 0) { 1402 IRArgs.FirstArgIndex = IRArgNo; 1403 IRArgNo += IRArgs.NumberOfArgs; 1404 } 1405 1406 // Skip over the sret parameter when it comes second. We already handled it 1407 // above. 1408 if (IRArgNo == 1 && SwapThisWithSRet) 1409 IRArgNo++; 1410 } 1411 assert(ArgNo == ArgInfo.size()); 1412 1413 if (FI.usesInAlloca()) 1414 InallocaArgNo = IRArgNo++; 1415 1416 TotalIRArgs = IRArgNo; 1417 } 1418 } // namespace 1419 1420 /***/ 1421 1422 bool CodeGenModule::ReturnTypeUsesSRet(const CGFunctionInfo &FI) { 1423 return FI.getReturnInfo().isIndirect(); 1424 } 1425 1426 bool CodeGenModule::ReturnSlotInterferesWithArgs(const CGFunctionInfo &FI) { 1427 return ReturnTypeUsesSRet(FI) && 1428 getTargetCodeGenInfo().doesReturnSlotInterfereWithArgs(); 1429 } 1430 1431 bool CodeGenModule::ReturnTypeUsesFPRet(QualType ResultType) { 1432 if (const BuiltinType *BT = ResultType->getAs<BuiltinType>()) { 1433 switch (BT->getKind()) { 1434 default: 1435 return false; 1436 case BuiltinType::Float: 1437 return getTarget().useObjCFPRetForRealType(TargetInfo::Float); 1438 case BuiltinType::Double: 1439 return getTarget().useObjCFPRetForRealType(TargetInfo::Double); 1440 case BuiltinType::LongDouble: 1441 return getTarget().useObjCFPRetForRealType(TargetInfo::LongDouble); 1442 } 1443 } 1444 1445 return false; 1446 } 1447 1448 bool CodeGenModule::ReturnTypeUsesFP2Ret(QualType ResultType) { 1449 if (const ComplexType *CT = ResultType->getAs<ComplexType>()) { 1450 if (const BuiltinType *BT = CT->getElementType()->getAs<BuiltinType>()) { 1451 if (BT->getKind() == BuiltinType::LongDouble) 1452 return getTarget().useObjCFP2RetForComplexLongDouble(); 1453 } 1454 } 1455 1456 return false; 1457 } 1458 1459 llvm::FunctionType *CodeGenTypes::GetFunctionType(GlobalDecl GD) { 1460 const CGFunctionInfo &FI = arrangeGlobalDeclaration(GD); 1461 return GetFunctionType(FI); 1462 } 1463 1464 llvm::FunctionType * 1465 CodeGenTypes::GetFunctionType(const CGFunctionInfo &FI) { 1466 1467 bool Inserted = FunctionsBeingProcessed.insert(&FI).second; 1468 (void)Inserted; 1469 assert(Inserted && "Recursively being processed?"); 1470 1471 llvm::Type *resultType = nullptr; 1472 const ABIArgInfo &retAI = FI.getReturnInfo(); 1473 switch (retAI.getKind()) { 1474 case ABIArgInfo::Expand: 1475 llvm_unreachable("Invalid ABI kind for return argument"); 1476 1477 case ABIArgInfo::Extend: 1478 case ABIArgInfo::Direct: 1479 resultType = retAI.getCoerceToType(); 1480 break; 1481 1482 case ABIArgInfo::InAlloca: 1483 if (retAI.getInAllocaSRet()) { 1484 // sret things on win32 aren't void, they return the sret pointer. 1485 QualType ret = FI.getReturnType(); 1486 llvm::Type *ty = ConvertType(ret); 1487 unsigned addressSpace = Context.getTargetAddressSpace(ret); 1488 resultType = llvm::PointerType::get(ty, addressSpace); 1489 } else { 1490 resultType = llvm::Type::getVoidTy(getLLVMContext()); 1491 } 1492 break; 1493 1494 case ABIArgInfo::Indirect: 1495 case ABIArgInfo::Ignore: 1496 resultType = llvm::Type::getVoidTy(getLLVMContext()); 1497 break; 1498 1499 case ABIArgInfo::CoerceAndExpand: 1500 resultType = retAI.getUnpaddedCoerceAndExpandType(); 1501 break; 1502 } 1503 1504 ClangToLLVMArgMapping IRFunctionArgs(getContext(), FI, true); 1505 SmallVector<llvm::Type*, 8> ArgTypes(IRFunctionArgs.totalIRArgs()); 1506 1507 // Add type for sret argument. 1508 if (IRFunctionArgs.hasSRetArg()) { 1509 QualType Ret = FI.getReturnType(); 1510 llvm::Type *Ty = ConvertType(Ret); 1511 unsigned AddressSpace = Context.getTargetAddressSpace(Ret); 1512 ArgTypes[IRFunctionArgs.getSRetArgNo()] = 1513 llvm::PointerType::get(Ty, AddressSpace); 1514 } 1515 1516 // Add type for inalloca argument. 1517 if (IRFunctionArgs.hasInallocaArg()) { 1518 auto ArgStruct = FI.getArgStruct(); 1519 assert(ArgStruct); 1520 ArgTypes[IRFunctionArgs.getInallocaArgNo()] = ArgStruct->getPointerTo(); 1521 } 1522 1523 // Add in all of the required arguments. 1524 unsigned ArgNo = 0; 1525 CGFunctionInfo::const_arg_iterator it = FI.arg_begin(), 1526 ie = it + FI.getNumRequiredArgs(); 1527 for (; it != ie; ++it, ++ArgNo) { 1528 const ABIArgInfo &ArgInfo = it->info; 1529 1530 // Insert a padding type to ensure proper alignment. 1531 if (IRFunctionArgs.hasPaddingArg(ArgNo)) 1532 ArgTypes[IRFunctionArgs.getPaddingArgNo(ArgNo)] = 1533 ArgInfo.getPaddingType(); 1534 1535 unsigned FirstIRArg, NumIRArgs; 1536 std::tie(FirstIRArg, NumIRArgs) = IRFunctionArgs.getIRArgs(ArgNo); 1537 1538 switch (ArgInfo.getKind()) { 1539 case ABIArgInfo::Ignore: 1540 case ABIArgInfo::InAlloca: 1541 assert(NumIRArgs == 0); 1542 break; 1543 1544 case ABIArgInfo::Indirect: { 1545 assert(NumIRArgs == 1); 1546 // indirect arguments are always on the stack, which is addr space #0. 1547 llvm::Type *LTy = ConvertTypeForMem(it->type); 1548 ArgTypes[FirstIRArg] = LTy->getPointerTo(); 1549 break; 1550 } 1551 1552 case ABIArgInfo::Extend: 1553 case ABIArgInfo::Direct: { 1554 // Fast-isel and the optimizer generally like scalar values better than 1555 // FCAs, so we flatten them if this is safe to do for this argument. 1556 llvm::Type *argType = ArgInfo.getCoerceToType(); 1557 llvm::StructType *st = dyn_cast<llvm::StructType>(argType); 1558 if (st && ArgInfo.isDirect() && ArgInfo.getCanBeFlattened()) { 1559 assert(NumIRArgs == st->getNumElements()); 1560 for (unsigned i = 0, e = st->getNumElements(); i != e; ++i) 1561 ArgTypes[FirstIRArg + i] = st->getElementType(i); 1562 } else { 1563 assert(NumIRArgs == 1); 1564 ArgTypes[FirstIRArg] = argType; 1565 } 1566 break; 1567 } 1568 1569 case ABIArgInfo::CoerceAndExpand: { 1570 auto ArgTypesIter = ArgTypes.begin() + FirstIRArg; 1571 for (auto EltTy : ArgInfo.getCoerceAndExpandTypeSequence()) { 1572 *ArgTypesIter++ = EltTy; 1573 } 1574 assert(ArgTypesIter == ArgTypes.begin() + FirstIRArg + NumIRArgs); 1575 break; 1576 } 1577 1578 case ABIArgInfo::Expand: 1579 auto ArgTypesIter = ArgTypes.begin() + FirstIRArg; 1580 getExpandedTypes(it->type, ArgTypesIter); 1581 assert(ArgTypesIter == ArgTypes.begin() + FirstIRArg + NumIRArgs); 1582 break; 1583 } 1584 } 1585 1586 bool Erased = FunctionsBeingProcessed.erase(&FI); (void)Erased; 1587 assert(Erased && "Not in set?"); 1588 1589 return llvm::FunctionType::get(resultType, ArgTypes, FI.isVariadic()); 1590 } 1591 1592 llvm::Type *CodeGenTypes::GetFunctionTypeForVTable(GlobalDecl GD) { 1593 const CXXMethodDecl *MD = cast<CXXMethodDecl>(GD.getDecl()); 1594 const FunctionProtoType *FPT = MD->getType()->getAs<FunctionProtoType>(); 1595 1596 if (!isFuncTypeConvertible(FPT)) 1597 return llvm::StructType::get(getLLVMContext()); 1598 1599 const CGFunctionInfo *Info; 1600 if (isa<CXXDestructorDecl>(MD)) 1601 Info = 1602 &arrangeCXXStructorDeclaration(MD, getFromDtorType(GD.getDtorType())); 1603 else 1604 Info = &arrangeCXXMethodDeclaration(MD); 1605 return GetFunctionType(*Info); 1606 } 1607 1608 static void AddAttributesFromFunctionProtoType(ASTContext &Ctx, 1609 llvm::AttrBuilder &FuncAttrs, 1610 const FunctionProtoType *FPT) { 1611 if (!FPT) 1612 return; 1613 1614 if (!isUnresolvedExceptionSpec(FPT->getExceptionSpecType()) && 1615 FPT->isNothrow(Ctx)) 1616 FuncAttrs.addAttribute(llvm::Attribute::NoUnwind); 1617 } 1618 1619 void CodeGenModule::ConstructAttributeList( 1620 StringRef Name, const CGFunctionInfo &FI, CGCalleeInfo CalleeInfo, 1621 AttributeListType &PAL, unsigned &CallingConv, bool AttrOnCallSite) { 1622 llvm::AttrBuilder FuncAttrs; 1623 llvm::AttrBuilder RetAttrs; 1624 bool HasOptnone = false; 1625 1626 CallingConv = FI.getEffectiveCallingConvention(); 1627 1628 if (FI.isNoReturn()) 1629 FuncAttrs.addAttribute(llvm::Attribute::NoReturn); 1630 1631 // If we have information about the function prototype, we can learn 1632 // attributes form there. 1633 AddAttributesFromFunctionProtoType(getContext(), FuncAttrs, 1634 CalleeInfo.getCalleeFunctionProtoType()); 1635 1636 const Decl *TargetDecl = CalleeInfo.getCalleeDecl(); 1637 1638 bool HasAnyX86InterruptAttr = false; 1639 // FIXME: handle sseregparm someday... 1640 if (TargetDecl) { 1641 if (TargetDecl->hasAttr<ReturnsTwiceAttr>()) 1642 FuncAttrs.addAttribute(llvm::Attribute::ReturnsTwice); 1643 if (TargetDecl->hasAttr<NoThrowAttr>()) 1644 FuncAttrs.addAttribute(llvm::Attribute::NoUnwind); 1645 if (TargetDecl->hasAttr<NoReturnAttr>()) 1646 FuncAttrs.addAttribute(llvm::Attribute::NoReturn); 1647 if (TargetDecl->hasAttr<NoDuplicateAttr>()) 1648 FuncAttrs.addAttribute(llvm::Attribute::NoDuplicate); 1649 1650 if (const FunctionDecl *Fn = dyn_cast<FunctionDecl>(TargetDecl)) { 1651 AddAttributesFromFunctionProtoType( 1652 getContext(), FuncAttrs, Fn->getType()->getAs<FunctionProtoType>()); 1653 // Don't use [[noreturn]] or _Noreturn for a call to a virtual function. 1654 // These attributes are not inherited by overloads. 1655 const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(Fn); 1656 if (Fn->isNoReturn() && !(AttrOnCallSite && MD && MD->isVirtual())) 1657 FuncAttrs.addAttribute(llvm::Attribute::NoReturn); 1658 } 1659 1660 // 'const', 'pure' and 'noalias' attributed functions are also nounwind. 1661 if (TargetDecl->hasAttr<ConstAttr>()) { 1662 FuncAttrs.addAttribute(llvm::Attribute::ReadNone); 1663 FuncAttrs.addAttribute(llvm::Attribute::NoUnwind); 1664 } else if (TargetDecl->hasAttr<PureAttr>()) { 1665 FuncAttrs.addAttribute(llvm::Attribute::ReadOnly); 1666 FuncAttrs.addAttribute(llvm::Attribute::NoUnwind); 1667 } else if (TargetDecl->hasAttr<NoAliasAttr>()) { 1668 FuncAttrs.addAttribute(llvm::Attribute::ArgMemOnly); 1669 FuncAttrs.addAttribute(llvm::Attribute::NoUnwind); 1670 } 1671 if (TargetDecl->hasAttr<RestrictAttr>()) 1672 RetAttrs.addAttribute(llvm::Attribute::NoAlias); 1673 if (TargetDecl->hasAttr<ReturnsNonNullAttr>()) 1674 RetAttrs.addAttribute(llvm::Attribute::NonNull); 1675 1676 HasAnyX86InterruptAttr = TargetDecl->hasAttr<AnyX86InterruptAttr>(); 1677 HasOptnone = TargetDecl->hasAttr<OptimizeNoneAttr>(); 1678 } 1679 1680 // OptimizeNoneAttr takes precedence over -Os or -Oz. No warning needed. 1681 if (!HasOptnone) { 1682 if (CodeGenOpts.OptimizeSize) 1683 FuncAttrs.addAttribute(llvm::Attribute::OptimizeForSize); 1684 if (CodeGenOpts.OptimizeSize == 2) 1685 FuncAttrs.addAttribute(llvm::Attribute::MinSize); 1686 } 1687 1688 if (CodeGenOpts.DisableRedZone) 1689 FuncAttrs.addAttribute(llvm::Attribute::NoRedZone); 1690 if (CodeGenOpts.NoImplicitFloat) 1691 FuncAttrs.addAttribute(llvm::Attribute::NoImplicitFloat); 1692 if (CodeGenOpts.EnableSegmentedStacks && 1693 !(TargetDecl && TargetDecl->hasAttr<NoSplitStackAttr>())) 1694 FuncAttrs.addAttribute("split-stack"); 1695 1696 if (AttrOnCallSite) { 1697 // Attributes that should go on the call site only. 1698 if (!CodeGenOpts.SimplifyLibCalls || 1699 CodeGenOpts.isNoBuiltinFunc(Name.data())) 1700 FuncAttrs.addAttribute(llvm::Attribute::NoBuiltin); 1701 if (!CodeGenOpts.TrapFuncName.empty()) 1702 FuncAttrs.addAttribute("trap-func-name", CodeGenOpts.TrapFuncName); 1703 } else { 1704 // Attributes that should go on the function, but not the call site. 1705 if (!CodeGenOpts.DisableFPElim) { 1706 FuncAttrs.addAttribute("no-frame-pointer-elim", "false"); 1707 } else if (CodeGenOpts.OmitLeafFramePointer) { 1708 FuncAttrs.addAttribute("no-frame-pointer-elim", "false"); 1709 FuncAttrs.addAttribute("no-frame-pointer-elim-non-leaf"); 1710 } else { 1711 FuncAttrs.addAttribute("no-frame-pointer-elim", "true"); 1712 FuncAttrs.addAttribute("no-frame-pointer-elim-non-leaf"); 1713 } 1714 1715 bool DisableTailCalls = 1716 CodeGenOpts.DisableTailCalls || HasAnyX86InterruptAttr || 1717 (TargetDecl && TargetDecl->hasAttr<DisableTailCallsAttr>()); 1718 FuncAttrs.addAttribute( 1719 "disable-tail-calls", 1720 llvm::toStringRef(DisableTailCalls)); 1721 1722 FuncAttrs.addAttribute("less-precise-fpmad", 1723 llvm::toStringRef(CodeGenOpts.LessPreciseFPMAD)); 1724 FuncAttrs.addAttribute("no-infs-fp-math", 1725 llvm::toStringRef(CodeGenOpts.NoInfsFPMath)); 1726 FuncAttrs.addAttribute("no-nans-fp-math", 1727 llvm::toStringRef(CodeGenOpts.NoNaNsFPMath)); 1728 FuncAttrs.addAttribute("unsafe-fp-math", 1729 llvm::toStringRef(CodeGenOpts.UnsafeFPMath)); 1730 FuncAttrs.addAttribute("use-soft-float", 1731 llvm::toStringRef(CodeGenOpts.SoftFloat)); 1732 FuncAttrs.addAttribute("stack-protector-buffer-size", 1733 llvm::utostr(CodeGenOpts.SSPBufferSize)); 1734 1735 if (CodeGenOpts.StackRealignment) 1736 FuncAttrs.addAttribute("stackrealign"); 1737 if (CodeGenOpts.Backchain) 1738 FuncAttrs.addAttribute("backchain"); 1739 1740 // Add target-cpu and target-features attributes to functions. If 1741 // we have a decl for the function and it has a target attribute then 1742 // parse that and add it to the feature set. 1743 StringRef TargetCPU = getTarget().getTargetOpts().CPU; 1744 const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(TargetDecl); 1745 if (FD && FD->hasAttr<TargetAttr>()) { 1746 llvm::StringMap<bool> FeatureMap; 1747 getFunctionFeatureMap(FeatureMap, FD); 1748 1749 // Produce the canonical string for this set of features. 1750 std::vector<std::string> Features; 1751 for (llvm::StringMap<bool>::const_iterator it = FeatureMap.begin(), 1752 ie = FeatureMap.end(); 1753 it != ie; ++it) 1754 Features.push_back((it->second ? "+" : "-") + it->first().str()); 1755 1756 // Now add the target-cpu and target-features to the function. 1757 // While we populated the feature map above, we still need to 1758 // get and parse the target attribute so we can get the cpu for 1759 // the function. 1760 const auto *TD = FD->getAttr<TargetAttr>(); 1761 TargetAttr::ParsedTargetAttr ParsedAttr = TD->parse(); 1762 if (ParsedAttr.second != "") 1763 TargetCPU = ParsedAttr.second; 1764 if (TargetCPU != "") 1765 FuncAttrs.addAttribute("target-cpu", TargetCPU); 1766 if (!Features.empty()) { 1767 std::sort(Features.begin(), Features.end()); 1768 FuncAttrs.addAttribute( 1769 "target-features", 1770 llvm::join(Features.begin(), Features.end(), ",")); 1771 } 1772 } else { 1773 // Otherwise just add the existing target cpu and target features to the 1774 // function. 1775 std::vector<std::string> &Features = getTarget().getTargetOpts().Features; 1776 if (TargetCPU != "") 1777 FuncAttrs.addAttribute("target-cpu", TargetCPU); 1778 if (!Features.empty()) { 1779 std::sort(Features.begin(), Features.end()); 1780 FuncAttrs.addAttribute( 1781 "target-features", 1782 llvm::join(Features.begin(), Features.end(), ",")); 1783 } 1784 } 1785 } 1786 1787 if (getLangOpts().CUDA && getLangOpts().CUDAIsDevice) { 1788 // Conservatively, mark all functions and calls in CUDA as convergent 1789 // (meaning, they may call an intrinsically convergent op, such as 1790 // __syncthreads(), and so can't have certain optimizations applied around 1791 // them). LLVM will remove this attribute where it safely can. 1792 FuncAttrs.addAttribute(llvm::Attribute::Convergent); 1793 1794 // Respect -fcuda-flush-denormals-to-zero. 1795 if (getLangOpts().CUDADeviceFlushDenormalsToZero) 1796 FuncAttrs.addAttribute("nvptx-f32ftz", "true"); 1797 } 1798 1799 ClangToLLVMArgMapping IRFunctionArgs(getContext(), FI); 1800 1801 QualType RetTy = FI.getReturnType(); 1802 const ABIArgInfo &RetAI = FI.getReturnInfo(); 1803 switch (RetAI.getKind()) { 1804 case ABIArgInfo::Extend: 1805 if (RetTy->hasSignedIntegerRepresentation()) 1806 RetAttrs.addAttribute(llvm::Attribute::SExt); 1807 else if (RetTy->hasUnsignedIntegerRepresentation()) 1808 RetAttrs.addAttribute(llvm::Attribute::ZExt); 1809 // FALL THROUGH 1810 case ABIArgInfo::Direct: 1811 if (RetAI.getInReg()) 1812 RetAttrs.addAttribute(llvm::Attribute::InReg); 1813 break; 1814 case ABIArgInfo::Ignore: 1815 break; 1816 1817 case ABIArgInfo::InAlloca: 1818 case ABIArgInfo::Indirect: { 1819 // inalloca and sret disable readnone and readonly 1820 FuncAttrs.removeAttribute(llvm::Attribute::ReadOnly) 1821 .removeAttribute(llvm::Attribute::ReadNone); 1822 break; 1823 } 1824 1825 case ABIArgInfo::CoerceAndExpand: 1826 break; 1827 1828 case ABIArgInfo::Expand: 1829 llvm_unreachable("Invalid ABI kind for return argument"); 1830 } 1831 1832 if (const auto *RefTy = RetTy->getAs<ReferenceType>()) { 1833 QualType PTy = RefTy->getPointeeType(); 1834 if (!PTy->isIncompleteType() && PTy->isConstantSizeType()) 1835 RetAttrs.addDereferenceableAttr(getContext().getTypeSizeInChars(PTy) 1836 .getQuantity()); 1837 else if (getContext().getTargetAddressSpace(PTy) == 0) 1838 RetAttrs.addAttribute(llvm::Attribute::NonNull); 1839 } 1840 1841 // Attach return attributes. 1842 if (RetAttrs.hasAttributes()) { 1843 PAL.push_back(llvm::AttributeSet::get( 1844 getLLVMContext(), llvm::AttributeSet::ReturnIndex, RetAttrs)); 1845 } 1846 1847 bool hasUsedSRet = false; 1848 1849 // Attach attributes to sret. 1850 if (IRFunctionArgs.hasSRetArg()) { 1851 llvm::AttrBuilder SRETAttrs; 1852 SRETAttrs.addAttribute(llvm::Attribute::StructRet); 1853 hasUsedSRet = true; 1854 if (RetAI.getInReg()) 1855 SRETAttrs.addAttribute(llvm::Attribute::InReg); 1856 PAL.push_back(llvm::AttributeSet::get( 1857 getLLVMContext(), IRFunctionArgs.getSRetArgNo() + 1, SRETAttrs)); 1858 } 1859 1860 // Attach attributes to inalloca argument. 1861 if (IRFunctionArgs.hasInallocaArg()) { 1862 llvm::AttrBuilder Attrs; 1863 Attrs.addAttribute(llvm::Attribute::InAlloca); 1864 PAL.push_back(llvm::AttributeSet::get( 1865 getLLVMContext(), IRFunctionArgs.getInallocaArgNo() + 1, Attrs)); 1866 } 1867 1868 unsigned ArgNo = 0; 1869 for (CGFunctionInfo::const_arg_iterator I = FI.arg_begin(), 1870 E = FI.arg_end(); 1871 I != E; ++I, ++ArgNo) { 1872 QualType ParamType = I->type; 1873 const ABIArgInfo &AI = I->info; 1874 llvm::AttrBuilder Attrs; 1875 1876 // Add attribute for padding argument, if necessary. 1877 if (IRFunctionArgs.hasPaddingArg(ArgNo)) { 1878 if (AI.getPaddingInReg()) 1879 PAL.push_back(llvm::AttributeSet::get( 1880 getLLVMContext(), IRFunctionArgs.getPaddingArgNo(ArgNo) + 1, 1881 llvm::Attribute::InReg)); 1882 } 1883 1884 // 'restrict' -> 'noalias' is done in EmitFunctionProlog when we 1885 // have the corresponding parameter variable. It doesn't make 1886 // sense to do it here because parameters are so messed up. 1887 switch (AI.getKind()) { 1888 case ABIArgInfo::Extend: 1889 if (ParamType->isSignedIntegerOrEnumerationType()) 1890 Attrs.addAttribute(llvm::Attribute::SExt); 1891 else if (ParamType->isUnsignedIntegerOrEnumerationType()) { 1892 if (getTypes().getABIInfo().shouldSignExtUnsignedType(ParamType)) 1893 Attrs.addAttribute(llvm::Attribute::SExt); 1894 else 1895 Attrs.addAttribute(llvm::Attribute::ZExt); 1896 } 1897 // FALL THROUGH 1898 case ABIArgInfo::Direct: 1899 if (ArgNo == 0 && FI.isChainCall()) 1900 Attrs.addAttribute(llvm::Attribute::Nest); 1901 else if (AI.getInReg()) 1902 Attrs.addAttribute(llvm::Attribute::InReg); 1903 break; 1904 1905 case ABIArgInfo::Indirect: { 1906 if (AI.getInReg()) 1907 Attrs.addAttribute(llvm::Attribute::InReg); 1908 1909 if (AI.getIndirectByVal()) 1910 Attrs.addAttribute(llvm::Attribute::ByVal); 1911 1912 CharUnits Align = AI.getIndirectAlign(); 1913 1914 // In a byval argument, it is important that the required 1915 // alignment of the type is honored, as LLVM might be creating a 1916 // *new* stack object, and needs to know what alignment to give 1917 // it. (Sometimes it can deduce a sensible alignment on its own, 1918 // but not if clang decides it must emit a packed struct, or the 1919 // user specifies increased alignment requirements.) 1920 // 1921 // This is different from indirect *not* byval, where the object 1922 // exists already, and the align attribute is purely 1923 // informative. 1924 assert(!Align.isZero()); 1925 1926 // For now, only add this when we have a byval argument. 1927 // TODO: be less lazy about updating test cases. 1928 if (AI.getIndirectByVal()) 1929 Attrs.addAlignmentAttr(Align.getQuantity()); 1930 1931 // byval disables readnone and readonly. 1932 FuncAttrs.removeAttribute(llvm::Attribute::ReadOnly) 1933 .removeAttribute(llvm::Attribute::ReadNone); 1934 break; 1935 } 1936 case ABIArgInfo::Ignore: 1937 case ABIArgInfo::Expand: 1938 case ABIArgInfo::CoerceAndExpand: 1939 break; 1940 1941 case ABIArgInfo::InAlloca: 1942 // inalloca disables readnone and readonly. 1943 FuncAttrs.removeAttribute(llvm::Attribute::ReadOnly) 1944 .removeAttribute(llvm::Attribute::ReadNone); 1945 continue; 1946 } 1947 1948 if (const auto *RefTy = ParamType->getAs<ReferenceType>()) { 1949 QualType PTy = RefTy->getPointeeType(); 1950 if (!PTy->isIncompleteType() && PTy->isConstantSizeType()) 1951 Attrs.addDereferenceableAttr(getContext().getTypeSizeInChars(PTy) 1952 .getQuantity()); 1953 else if (getContext().getTargetAddressSpace(PTy) == 0) 1954 Attrs.addAttribute(llvm::Attribute::NonNull); 1955 } 1956 1957 switch (FI.getExtParameterInfo(ArgNo).getABI()) { 1958 case ParameterABI::Ordinary: 1959 break; 1960 1961 case ParameterABI::SwiftIndirectResult: { 1962 // Add 'sret' if we haven't already used it for something, but 1963 // only if the result is void. 1964 if (!hasUsedSRet && RetTy->isVoidType()) { 1965 Attrs.addAttribute(llvm::Attribute::StructRet); 1966 hasUsedSRet = true; 1967 } 1968 1969 // Add 'noalias' in either case. 1970 Attrs.addAttribute(llvm::Attribute::NoAlias); 1971 1972 // Add 'dereferenceable' and 'alignment'. 1973 auto PTy = ParamType->getPointeeType(); 1974 if (!PTy->isIncompleteType() && PTy->isConstantSizeType()) { 1975 auto info = getContext().getTypeInfoInChars(PTy); 1976 Attrs.addDereferenceableAttr(info.first.getQuantity()); 1977 Attrs.addAttribute(llvm::Attribute::getWithAlignment(getLLVMContext(), 1978 info.second.getQuantity())); 1979 } 1980 break; 1981 } 1982 1983 case ParameterABI::SwiftErrorResult: 1984 Attrs.addAttribute(llvm::Attribute::SwiftError); 1985 break; 1986 1987 case ParameterABI::SwiftContext: 1988 Attrs.addAttribute(llvm::Attribute::SwiftSelf); 1989 break; 1990 } 1991 1992 if (Attrs.hasAttributes()) { 1993 unsigned FirstIRArg, NumIRArgs; 1994 std::tie(FirstIRArg, NumIRArgs) = IRFunctionArgs.getIRArgs(ArgNo); 1995 for (unsigned i = 0; i < NumIRArgs; i++) 1996 PAL.push_back(llvm::AttributeSet::get(getLLVMContext(), 1997 FirstIRArg + i + 1, Attrs)); 1998 } 1999 } 2000 assert(ArgNo == FI.arg_size()); 2001 2002 if (FuncAttrs.hasAttributes()) 2003 PAL.push_back(llvm:: 2004 AttributeSet::get(getLLVMContext(), 2005 llvm::AttributeSet::FunctionIndex, 2006 FuncAttrs)); 2007 } 2008 2009 /// An argument came in as a promoted argument; demote it back to its 2010 /// declared type. 2011 static llvm::Value *emitArgumentDemotion(CodeGenFunction &CGF, 2012 const VarDecl *var, 2013 llvm::Value *value) { 2014 llvm::Type *varType = CGF.ConvertType(var->getType()); 2015 2016 // This can happen with promotions that actually don't change the 2017 // underlying type, like the enum promotions. 2018 if (value->getType() == varType) return value; 2019 2020 assert((varType->isIntegerTy() || varType->isFloatingPointTy()) 2021 && "unexpected promotion type"); 2022 2023 if (isa<llvm::IntegerType>(varType)) 2024 return CGF.Builder.CreateTrunc(value, varType, "arg.unpromote"); 2025 2026 return CGF.Builder.CreateFPCast(value, varType, "arg.unpromote"); 2027 } 2028 2029 /// Returns the attribute (either parameter attribute, or function 2030 /// attribute), which declares argument ArgNo to be non-null. 2031 static const NonNullAttr *getNonNullAttr(const Decl *FD, const ParmVarDecl *PVD, 2032 QualType ArgType, unsigned ArgNo) { 2033 // FIXME: __attribute__((nonnull)) can also be applied to: 2034 // - references to pointers, where the pointee is known to be 2035 // nonnull (apparently a Clang extension) 2036 // - transparent unions containing pointers 2037 // In the former case, LLVM IR cannot represent the constraint. In 2038 // the latter case, we have no guarantee that the transparent union 2039 // is in fact passed as a pointer. 2040 if (!ArgType->isAnyPointerType() && !ArgType->isBlockPointerType()) 2041 return nullptr; 2042 // First, check attribute on parameter itself. 2043 if (PVD) { 2044 if (auto ParmNNAttr = PVD->getAttr<NonNullAttr>()) 2045 return ParmNNAttr; 2046 } 2047 // Check function attributes. 2048 if (!FD) 2049 return nullptr; 2050 for (const auto *NNAttr : FD->specific_attrs<NonNullAttr>()) { 2051 if (NNAttr->isNonNull(ArgNo)) 2052 return NNAttr; 2053 } 2054 return nullptr; 2055 } 2056 2057 namespace { 2058 struct CopyBackSwiftError final : EHScopeStack::Cleanup { 2059 Address Temp; 2060 Address Arg; 2061 CopyBackSwiftError(Address temp, Address arg) : Temp(temp), Arg(arg) {} 2062 void Emit(CodeGenFunction &CGF, Flags flags) override { 2063 llvm::Value *errorValue = CGF.Builder.CreateLoad(Temp); 2064 CGF.Builder.CreateStore(errorValue, Arg); 2065 } 2066 }; 2067 } 2068 2069 void CodeGenFunction::EmitFunctionProlog(const CGFunctionInfo &FI, 2070 llvm::Function *Fn, 2071 const FunctionArgList &Args) { 2072 if (CurCodeDecl && CurCodeDecl->hasAttr<NakedAttr>()) 2073 // Naked functions don't have prologues. 2074 return; 2075 2076 // If this is an implicit-return-zero function, go ahead and 2077 // initialize the return value. TODO: it might be nice to have 2078 // a more general mechanism for this that didn't require synthesized 2079 // return statements. 2080 if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(CurCodeDecl)) { 2081 if (FD->hasImplicitReturnZero()) { 2082 QualType RetTy = FD->getReturnType().getUnqualifiedType(); 2083 llvm::Type* LLVMTy = CGM.getTypes().ConvertType(RetTy); 2084 llvm::Constant* Zero = llvm::Constant::getNullValue(LLVMTy); 2085 Builder.CreateStore(Zero, ReturnValue); 2086 } 2087 } 2088 2089 // FIXME: We no longer need the types from FunctionArgList; lift up and 2090 // simplify. 2091 2092 ClangToLLVMArgMapping IRFunctionArgs(CGM.getContext(), FI); 2093 // Flattened function arguments. 2094 SmallVector<llvm::Value *, 16> FnArgs; 2095 FnArgs.reserve(IRFunctionArgs.totalIRArgs()); 2096 for (auto &Arg : Fn->args()) { 2097 FnArgs.push_back(&Arg); 2098 } 2099 assert(FnArgs.size() == IRFunctionArgs.totalIRArgs()); 2100 2101 // If we're using inalloca, all the memory arguments are GEPs off of the last 2102 // parameter, which is a pointer to the complete memory area. 2103 Address ArgStruct = Address::invalid(); 2104 const llvm::StructLayout *ArgStructLayout = nullptr; 2105 if (IRFunctionArgs.hasInallocaArg()) { 2106 ArgStructLayout = CGM.getDataLayout().getStructLayout(FI.getArgStruct()); 2107 ArgStruct = Address(FnArgs[IRFunctionArgs.getInallocaArgNo()], 2108 FI.getArgStructAlignment()); 2109 2110 assert(ArgStruct.getType() == FI.getArgStruct()->getPointerTo()); 2111 } 2112 2113 // Name the struct return parameter. 2114 if (IRFunctionArgs.hasSRetArg()) { 2115 auto AI = cast<llvm::Argument>(FnArgs[IRFunctionArgs.getSRetArgNo()]); 2116 AI->setName("agg.result"); 2117 AI->addAttr(llvm::AttributeSet::get(getLLVMContext(), AI->getArgNo() + 1, 2118 llvm::Attribute::NoAlias)); 2119 } 2120 2121 // Track if we received the parameter as a pointer (indirect, byval, or 2122 // inalloca). If already have a pointer, EmitParmDecl doesn't need to copy it 2123 // into a local alloca for us. 2124 SmallVector<ParamValue, 16> ArgVals; 2125 ArgVals.reserve(Args.size()); 2126 2127 // Create a pointer value for every parameter declaration. This usually 2128 // entails copying one or more LLVM IR arguments into an alloca. Don't push 2129 // any cleanups or do anything that might unwind. We do that separately, so 2130 // we can push the cleanups in the correct order for the ABI. 2131 assert(FI.arg_size() == Args.size() && 2132 "Mismatch between function signature & arguments."); 2133 unsigned ArgNo = 0; 2134 CGFunctionInfo::const_arg_iterator info_it = FI.arg_begin(); 2135 for (FunctionArgList::const_iterator i = Args.begin(), e = Args.end(); 2136 i != e; ++i, ++info_it, ++ArgNo) { 2137 const VarDecl *Arg = *i; 2138 QualType Ty = info_it->type; 2139 const ABIArgInfo &ArgI = info_it->info; 2140 2141 bool isPromoted = 2142 isa<ParmVarDecl>(Arg) && cast<ParmVarDecl>(Arg)->isKNRPromoted(); 2143 2144 unsigned FirstIRArg, NumIRArgs; 2145 std::tie(FirstIRArg, NumIRArgs) = IRFunctionArgs.getIRArgs(ArgNo); 2146 2147 switch (ArgI.getKind()) { 2148 case ABIArgInfo::InAlloca: { 2149 assert(NumIRArgs == 0); 2150 auto FieldIndex = ArgI.getInAllocaFieldIndex(); 2151 CharUnits FieldOffset = 2152 CharUnits::fromQuantity(ArgStructLayout->getElementOffset(FieldIndex)); 2153 Address V = Builder.CreateStructGEP(ArgStruct, FieldIndex, FieldOffset, 2154 Arg->getName()); 2155 ArgVals.push_back(ParamValue::forIndirect(V)); 2156 break; 2157 } 2158 2159 case ABIArgInfo::Indirect: { 2160 assert(NumIRArgs == 1); 2161 Address ParamAddr = Address(FnArgs[FirstIRArg], ArgI.getIndirectAlign()); 2162 2163 if (!hasScalarEvaluationKind(Ty)) { 2164 // Aggregates and complex variables are accessed by reference. All we 2165 // need to do is realign the value, if requested. 2166 Address V = ParamAddr; 2167 if (ArgI.getIndirectRealign()) { 2168 Address AlignedTemp = CreateMemTemp(Ty, "coerce"); 2169 2170 // Copy from the incoming argument pointer to the temporary with the 2171 // appropriate alignment. 2172 // 2173 // FIXME: We should have a common utility for generating an aggregate 2174 // copy. 2175 CharUnits Size = getContext().getTypeSizeInChars(Ty); 2176 auto SizeVal = llvm::ConstantInt::get(IntPtrTy, Size.getQuantity()); 2177 Address Dst = Builder.CreateBitCast(AlignedTemp, Int8PtrTy); 2178 Address Src = Builder.CreateBitCast(ParamAddr, Int8PtrTy); 2179 Builder.CreateMemCpy(Dst, Src, SizeVal, false); 2180 V = AlignedTemp; 2181 } 2182 ArgVals.push_back(ParamValue::forIndirect(V)); 2183 } else { 2184 // Load scalar value from indirect argument. 2185 llvm::Value *V = 2186 EmitLoadOfScalar(ParamAddr, false, Ty, Arg->getLocStart()); 2187 2188 if (isPromoted) 2189 V = emitArgumentDemotion(*this, Arg, V); 2190 ArgVals.push_back(ParamValue::forDirect(V)); 2191 } 2192 break; 2193 } 2194 2195 case ABIArgInfo::Extend: 2196 case ABIArgInfo::Direct: { 2197 2198 // If we have the trivial case, handle it with no muss and fuss. 2199 if (!isa<llvm::StructType>(ArgI.getCoerceToType()) && 2200 ArgI.getCoerceToType() == ConvertType(Ty) && 2201 ArgI.getDirectOffset() == 0) { 2202 assert(NumIRArgs == 1); 2203 llvm::Value *V = FnArgs[FirstIRArg]; 2204 auto AI = cast<llvm::Argument>(V); 2205 2206 if (const ParmVarDecl *PVD = dyn_cast<ParmVarDecl>(Arg)) { 2207 if (getNonNullAttr(CurCodeDecl, PVD, PVD->getType(), 2208 PVD->getFunctionScopeIndex())) 2209 AI->addAttr(llvm::AttributeSet::get(getLLVMContext(), 2210 AI->getArgNo() + 1, 2211 llvm::Attribute::NonNull)); 2212 2213 QualType OTy = PVD->getOriginalType(); 2214 if (const auto *ArrTy = 2215 getContext().getAsConstantArrayType(OTy)) { 2216 // A C99 array parameter declaration with the static keyword also 2217 // indicates dereferenceability, and if the size is constant we can 2218 // use the dereferenceable attribute (which requires the size in 2219 // bytes). 2220 if (ArrTy->getSizeModifier() == ArrayType::Static) { 2221 QualType ETy = ArrTy->getElementType(); 2222 uint64_t ArrSize = ArrTy->getSize().getZExtValue(); 2223 if (!ETy->isIncompleteType() && ETy->isConstantSizeType() && 2224 ArrSize) { 2225 llvm::AttrBuilder Attrs; 2226 Attrs.addDereferenceableAttr( 2227 getContext().getTypeSizeInChars(ETy).getQuantity()*ArrSize); 2228 AI->addAttr(llvm::AttributeSet::get(getLLVMContext(), 2229 AI->getArgNo() + 1, Attrs)); 2230 } else if (getContext().getTargetAddressSpace(ETy) == 0) { 2231 AI->addAttr(llvm::AttributeSet::get(getLLVMContext(), 2232 AI->getArgNo() + 1, 2233 llvm::Attribute::NonNull)); 2234 } 2235 } 2236 } else if (const auto *ArrTy = 2237 getContext().getAsVariableArrayType(OTy)) { 2238 // For C99 VLAs with the static keyword, we don't know the size so 2239 // we can't use the dereferenceable attribute, but in addrspace(0) 2240 // we know that it must be nonnull. 2241 if (ArrTy->getSizeModifier() == VariableArrayType::Static && 2242 !getContext().getTargetAddressSpace(ArrTy->getElementType())) 2243 AI->addAttr(llvm::AttributeSet::get(getLLVMContext(), 2244 AI->getArgNo() + 1, 2245 llvm::Attribute::NonNull)); 2246 } 2247 2248 const auto *AVAttr = PVD->getAttr<AlignValueAttr>(); 2249 if (!AVAttr) 2250 if (const auto *TOTy = dyn_cast<TypedefType>(OTy)) 2251 AVAttr = TOTy->getDecl()->getAttr<AlignValueAttr>(); 2252 if (AVAttr) { 2253 llvm::Value *AlignmentValue = 2254 EmitScalarExpr(AVAttr->getAlignment()); 2255 llvm::ConstantInt *AlignmentCI = 2256 cast<llvm::ConstantInt>(AlignmentValue); 2257 unsigned Alignment = 2258 std::min((unsigned) AlignmentCI->getZExtValue(), 2259 +llvm::Value::MaximumAlignment); 2260 2261 llvm::AttrBuilder Attrs; 2262 Attrs.addAlignmentAttr(Alignment); 2263 AI->addAttr(llvm::AttributeSet::get(getLLVMContext(), 2264 AI->getArgNo() + 1, Attrs)); 2265 } 2266 } 2267 2268 if (Arg->getType().isRestrictQualified()) 2269 AI->addAttr(llvm::AttributeSet::get(getLLVMContext(), 2270 AI->getArgNo() + 1, 2271 llvm::Attribute::NoAlias)); 2272 2273 // LLVM expects swifterror parameters to be used in very restricted 2274 // ways. Copy the value into a less-restricted temporary. 2275 if (FI.getExtParameterInfo(ArgNo).getABI() 2276 == ParameterABI::SwiftErrorResult) { 2277 QualType pointeeTy = Ty->getPointeeType(); 2278 assert(pointeeTy->isPointerType()); 2279 Address temp = 2280 CreateMemTemp(pointeeTy, getPointerAlign(), "swifterror.temp"); 2281 Address arg = Address(V, getContext().getTypeAlignInChars(pointeeTy)); 2282 llvm::Value *incomingErrorValue = Builder.CreateLoad(arg); 2283 Builder.CreateStore(incomingErrorValue, temp); 2284 V = temp.getPointer(); 2285 2286 // Push a cleanup to copy the value back at the end of the function. 2287 // The convention does not guarantee that the value will be written 2288 // back if the function exits with an unwind exception. 2289 EHStack.pushCleanup<CopyBackSwiftError>(NormalCleanup, temp, arg); 2290 } 2291 2292 // Ensure the argument is the correct type. 2293 if (V->getType() != ArgI.getCoerceToType()) 2294 V = Builder.CreateBitCast(V, ArgI.getCoerceToType()); 2295 2296 if (isPromoted) 2297 V = emitArgumentDemotion(*this, Arg, V); 2298 2299 if (const CXXMethodDecl *MD = 2300 dyn_cast_or_null<CXXMethodDecl>(CurCodeDecl)) { 2301 if (MD->isVirtual() && Arg == CXXABIThisDecl) 2302 V = CGM.getCXXABI(). 2303 adjustThisParameterInVirtualFunctionPrologue(*this, CurGD, V); 2304 } 2305 2306 // Because of merging of function types from multiple decls it is 2307 // possible for the type of an argument to not match the corresponding 2308 // type in the function type. Since we are codegening the callee 2309 // in here, add a cast to the argument type. 2310 llvm::Type *LTy = ConvertType(Arg->getType()); 2311 if (V->getType() != LTy) 2312 V = Builder.CreateBitCast(V, LTy); 2313 2314 ArgVals.push_back(ParamValue::forDirect(V)); 2315 break; 2316 } 2317 2318 Address Alloca = CreateMemTemp(Ty, getContext().getDeclAlign(Arg), 2319 Arg->getName()); 2320 2321 // Pointer to store into. 2322 Address Ptr = emitAddressAtOffset(*this, Alloca, ArgI); 2323 2324 // Fast-isel and the optimizer generally like scalar values better than 2325 // FCAs, so we flatten them if this is safe to do for this argument. 2326 llvm::StructType *STy = dyn_cast<llvm::StructType>(ArgI.getCoerceToType()); 2327 if (ArgI.isDirect() && ArgI.getCanBeFlattened() && STy && 2328 STy->getNumElements() > 1) { 2329 auto SrcLayout = CGM.getDataLayout().getStructLayout(STy); 2330 uint64_t SrcSize = CGM.getDataLayout().getTypeAllocSize(STy); 2331 llvm::Type *DstTy = Ptr.getElementType(); 2332 uint64_t DstSize = CGM.getDataLayout().getTypeAllocSize(DstTy); 2333 2334 Address AddrToStoreInto = Address::invalid(); 2335 if (SrcSize <= DstSize) { 2336 AddrToStoreInto = 2337 Builder.CreateBitCast(Ptr, llvm::PointerType::getUnqual(STy)); 2338 } else { 2339 AddrToStoreInto = 2340 CreateTempAlloca(STy, Alloca.getAlignment(), "coerce"); 2341 } 2342 2343 assert(STy->getNumElements() == NumIRArgs); 2344 for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) { 2345 auto AI = FnArgs[FirstIRArg + i]; 2346 AI->setName(Arg->getName() + ".coerce" + Twine(i)); 2347 auto Offset = CharUnits::fromQuantity(SrcLayout->getElementOffset(i)); 2348 Address EltPtr = 2349 Builder.CreateStructGEP(AddrToStoreInto, i, Offset); 2350 Builder.CreateStore(AI, EltPtr); 2351 } 2352 2353 if (SrcSize > DstSize) { 2354 Builder.CreateMemCpy(Ptr, AddrToStoreInto, DstSize); 2355 } 2356 2357 } else { 2358 // Simple case, just do a coerced store of the argument into the alloca. 2359 assert(NumIRArgs == 1); 2360 auto AI = FnArgs[FirstIRArg]; 2361 AI->setName(Arg->getName() + ".coerce"); 2362 CreateCoercedStore(AI, Ptr, /*DestIsVolatile=*/false, *this); 2363 } 2364 2365 // Match to what EmitParmDecl is expecting for this type. 2366 if (CodeGenFunction::hasScalarEvaluationKind(Ty)) { 2367 llvm::Value *V = 2368 EmitLoadOfScalar(Alloca, false, Ty, Arg->getLocStart()); 2369 if (isPromoted) 2370 V = emitArgumentDemotion(*this, Arg, V); 2371 ArgVals.push_back(ParamValue::forDirect(V)); 2372 } else { 2373 ArgVals.push_back(ParamValue::forIndirect(Alloca)); 2374 } 2375 break; 2376 } 2377 2378 case ABIArgInfo::CoerceAndExpand: { 2379 // Reconstruct into a temporary. 2380 Address alloca = CreateMemTemp(Ty, getContext().getDeclAlign(Arg)); 2381 ArgVals.push_back(ParamValue::forIndirect(alloca)); 2382 2383 auto coercionType = ArgI.getCoerceAndExpandType(); 2384 alloca = Builder.CreateElementBitCast(alloca, coercionType); 2385 auto layout = CGM.getDataLayout().getStructLayout(coercionType); 2386 2387 unsigned argIndex = FirstIRArg; 2388 for (unsigned i = 0, e = coercionType->getNumElements(); i != e; ++i) { 2389 llvm::Type *eltType = coercionType->getElementType(i); 2390 if (ABIArgInfo::isPaddingForCoerceAndExpand(eltType)) 2391 continue; 2392 2393 auto eltAddr = Builder.CreateStructGEP(alloca, i, layout); 2394 auto elt = FnArgs[argIndex++]; 2395 Builder.CreateStore(elt, eltAddr); 2396 } 2397 assert(argIndex == FirstIRArg + NumIRArgs); 2398 break; 2399 } 2400 2401 case ABIArgInfo::Expand: { 2402 // If this structure was expanded into multiple arguments then 2403 // we need to create a temporary and reconstruct it from the 2404 // arguments. 2405 Address Alloca = CreateMemTemp(Ty, getContext().getDeclAlign(Arg)); 2406 LValue LV = MakeAddrLValue(Alloca, Ty); 2407 ArgVals.push_back(ParamValue::forIndirect(Alloca)); 2408 2409 auto FnArgIter = FnArgs.begin() + FirstIRArg; 2410 ExpandTypeFromArgs(Ty, LV, FnArgIter); 2411 assert(FnArgIter == FnArgs.begin() + FirstIRArg + NumIRArgs); 2412 for (unsigned i = 0, e = NumIRArgs; i != e; ++i) { 2413 auto AI = FnArgs[FirstIRArg + i]; 2414 AI->setName(Arg->getName() + "." + Twine(i)); 2415 } 2416 break; 2417 } 2418 2419 case ABIArgInfo::Ignore: 2420 assert(NumIRArgs == 0); 2421 // Initialize the local variable appropriately. 2422 if (!hasScalarEvaluationKind(Ty)) { 2423 ArgVals.push_back(ParamValue::forIndirect(CreateMemTemp(Ty))); 2424 } else { 2425 llvm::Value *U = llvm::UndefValue::get(ConvertType(Arg->getType())); 2426 ArgVals.push_back(ParamValue::forDirect(U)); 2427 } 2428 break; 2429 } 2430 } 2431 2432 if (getTarget().getCXXABI().areArgsDestroyedLeftToRightInCallee()) { 2433 for (int I = Args.size() - 1; I >= 0; --I) 2434 EmitParmDecl(*Args[I], ArgVals[I], I + 1); 2435 } else { 2436 for (unsigned I = 0, E = Args.size(); I != E; ++I) 2437 EmitParmDecl(*Args[I], ArgVals[I], I + 1); 2438 } 2439 } 2440 2441 static void eraseUnusedBitCasts(llvm::Instruction *insn) { 2442 while (insn->use_empty()) { 2443 llvm::BitCastInst *bitcast = dyn_cast<llvm::BitCastInst>(insn); 2444 if (!bitcast) return; 2445 2446 // This is "safe" because we would have used a ConstantExpr otherwise. 2447 insn = cast<llvm::Instruction>(bitcast->getOperand(0)); 2448 bitcast->eraseFromParent(); 2449 } 2450 } 2451 2452 /// Try to emit a fused autorelease of a return result. 2453 static llvm::Value *tryEmitFusedAutoreleaseOfResult(CodeGenFunction &CGF, 2454 llvm::Value *result) { 2455 // We must be immediately followed the cast. 2456 llvm::BasicBlock *BB = CGF.Builder.GetInsertBlock(); 2457 if (BB->empty()) return nullptr; 2458 if (&BB->back() != result) return nullptr; 2459 2460 llvm::Type *resultType = result->getType(); 2461 2462 // result is in a BasicBlock and is therefore an Instruction. 2463 llvm::Instruction *generator = cast<llvm::Instruction>(result); 2464 2465 SmallVector<llvm::Instruction*,4> insnsToKill; 2466 2467 // Look for: 2468 // %generator = bitcast %type1* %generator2 to %type2* 2469 while (llvm::BitCastInst *bitcast = dyn_cast<llvm::BitCastInst>(generator)) { 2470 // We would have emitted this as a constant if the operand weren't 2471 // an Instruction. 2472 generator = cast<llvm::Instruction>(bitcast->getOperand(0)); 2473 2474 // Require the generator to be immediately followed by the cast. 2475 if (generator->getNextNode() != bitcast) 2476 return nullptr; 2477 2478 insnsToKill.push_back(bitcast); 2479 } 2480 2481 // Look for: 2482 // %generator = call i8* @objc_retain(i8* %originalResult) 2483 // or 2484 // %generator = call i8* @objc_retainAutoreleasedReturnValue(i8* %originalResult) 2485 llvm::CallInst *call = dyn_cast<llvm::CallInst>(generator); 2486 if (!call) return nullptr; 2487 2488 bool doRetainAutorelease; 2489 2490 if (call->getCalledValue() == CGF.CGM.getObjCEntrypoints().objc_retain) { 2491 doRetainAutorelease = true; 2492 } else if (call->getCalledValue() == CGF.CGM.getObjCEntrypoints() 2493 .objc_retainAutoreleasedReturnValue) { 2494 doRetainAutorelease = false; 2495 2496 // If we emitted an assembly marker for this call (and the 2497 // ARCEntrypoints field should have been set if so), go looking 2498 // for that call. If we can't find it, we can't do this 2499 // optimization. But it should always be the immediately previous 2500 // instruction, unless we needed bitcasts around the call. 2501 if (CGF.CGM.getObjCEntrypoints().retainAutoreleasedReturnValueMarker) { 2502 llvm::Instruction *prev = call->getPrevNode(); 2503 assert(prev); 2504 if (isa<llvm::BitCastInst>(prev)) { 2505 prev = prev->getPrevNode(); 2506 assert(prev); 2507 } 2508 assert(isa<llvm::CallInst>(prev)); 2509 assert(cast<llvm::CallInst>(prev)->getCalledValue() == 2510 CGF.CGM.getObjCEntrypoints().retainAutoreleasedReturnValueMarker); 2511 insnsToKill.push_back(prev); 2512 } 2513 } else { 2514 return nullptr; 2515 } 2516 2517 result = call->getArgOperand(0); 2518 insnsToKill.push_back(call); 2519 2520 // Keep killing bitcasts, for sanity. Note that we no longer care 2521 // about precise ordering as long as there's exactly one use. 2522 while (llvm::BitCastInst *bitcast = dyn_cast<llvm::BitCastInst>(result)) { 2523 if (!bitcast->hasOneUse()) break; 2524 insnsToKill.push_back(bitcast); 2525 result = bitcast->getOperand(0); 2526 } 2527 2528 // Delete all the unnecessary instructions, from latest to earliest. 2529 for (SmallVectorImpl<llvm::Instruction*>::iterator 2530 i = insnsToKill.begin(), e = insnsToKill.end(); i != e; ++i) 2531 (*i)->eraseFromParent(); 2532 2533 // Do the fused retain/autorelease if we were asked to. 2534 if (doRetainAutorelease) 2535 result = CGF.EmitARCRetainAutoreleaseReturnValue(result); 2536 2537 // Cast back to the result type. 2538 return CGF.Builder.CreateBitCast(result, resultType); 2539 } 2540 2541 /// If this is a +1 of the value of an immutable 'self', remove it. 2542 static llvm::Value *tryRemoveRetainOfSelf(CodeGenFunction &CGF, 2543 llvm::Value *result) { 2544 // This is only applicable to a method with an immutable 'self'. 2545 const ObjCMethodDecl *method = 2546 dyn_cast_or_null<ObjCMethodDecl>(CGF.CurCodeDecl); 2547 if (!method) return nullptr; 2548 const VarDecl *self = method->getSelfDecl(); 2549 if (!self->getType().isConstQualified()) return nullptr; 2550 2551 // Look for a retain call. 2552 llvm::CallInst *retainCall = 2553 dyn_cast<llvm::CallInst>(result->stripPointerCasts()); 2554 if (!retainCall || 2555 retainCall->getCalledValue() != CGF.CGM.getObjCEntrypoints().objc_retain) 2556 return nullptr; 2557 2558 // Look for an ordinary load of 'self'. 2559 llvm::Value *retainedValue = retainCall->getArgOperand(0); 2560 llvm::LoadInst *load = 2561 dyn_cast<llvm::LoadInst>(retainedValue->stripPointerCasts()); 2562 if (!load || load->isAtomic() || load->isVolatile() || 2563 load->getPointerOperand() != CGF.GetAddrOfLocalVar(self).getPointer()) 2564 return nullptr; 2565 2566 // Okay! Burn it all down. This relies for correctness on the 2567 // assumption that the retain is emitted as part of the return and 2568 // that thereafter everything is used "linearly". 2569 llvm::Type *resultType = result->getType(); 2570 eraseUnusedBitCasts(cast<llvm::Instruction>(result)); 2571 assert(retainCall->use_empty()); 2572 retainCall->eraseFromParent(); 2573 eraseUnusedBitCasts(cast<llvm::Instruction>(retainedValue)); 2574 2575 return CGF.Builder.CreateBitCast(load, resultType); 2576 } 2577 2578 /// Emit an ARC autorelease of the result of a function. 2579 /// 2580 /// \return the value to actually return from the function 2581 static llvm::Value *emitAutoreleaseOfResult(CodeGenFunction &CGF, 2582 llvm::Value *result) { 2583 // If we're returning 'self', kill the initial retain. This is a 2584 // heuristic attempt to "encourage correctness" in the really unfortunate 2585 // case where we have a return of self during a dealloc and we desperately 2586 // need to avoid the possible autorelease. 2587 if (llvm::Value *self = tryRemoveRetainOfSelf(CGF, result)) 2588 return self; 2589 2590 // At -O0, try to emit a fused retain/autorelease. 2591 if (CGF.shouldUseFusedARCCalls()) 2592 if (llvm::Value *fused = tryEmitFusedAutoreleaseOfResult(CGF, result)) 2593 return fused; 2594 2595 return CGF.EmitARCAutoreleaseReturnValue(result); 2596 } 2597 2598 /// Heuristically search for a dominating store to the return-value slot. 2599 static llvm::StoreInst *findDominatingStoreToReturnValue(CodeGenFunction &CGF) { 2600 // Check if a User is a store which pointerOperand is the ReturnValue. 2601 // We are looking for stores to the ReturnValue, not for stores of the 2602 // ReturnValue to some other location. 2603 auto GetStoreIfValid = [&CGF](llvm::User *U) -> llvm::StoreInst * { 2604 auto *SI = dyn_cast<llvm::StoreInst>(U); 2605 if (!SI || SI->getPointerOperand() != CGF.ReturnValue.getPointer()) 2606 return nullptr; 2607 // These aren't actually possible for non-coerced returns, and we 2608 // only care about non-coerced returns on this code path. 2609 assert(!SI->isAtomic() && !SI->isVolatile()); 2610 return SI; 2611 }; 2612 // If there are multiple uses of the return-value slot, just check 2613 // for something immediately preceding the IP. Sometimes this can 2614 // happen with how we generate implicit-returns; it can also happen 2615 // with noreturn cleanups. 2616 if (!CGF.ReturnValue.getPointer()->hasOneUse()) { 2617 llvm::BasicBlock *IP = CGF.Builder.GetInsertBlock(); 2618 if (IP->empty()) return nullptr; 2619 llvm::Instruction *I = &IP->back(); 2620 2621 // Skip lifetime markers 2622 for (llvm::BasicBlock::reverse_iterator II = IP->rbegin(), 2623 IE = IP->rend(); 2624 II != IE; ++II) { 2625 if (llvm::IntrinsicInst *Intrinsic = 2626 dyn_cast<llvm::IntrinsicInst>(&*II)) { 2627 if (Intrinsic->getIntrinsicID() == llvm::Intrinsic::lifetime_end) { 2628 const llvm::Value *CastAddr = Intrinsic->getArgOperand(1); 2629 ++II; 2630 if (II == IE) 2631 break; 2632 if (isa<llvm::BitCastInst>(&*II) && (CastAddr == &*II)) 2633 continue; 2634 } 2635 } 2636 I = &*II; 2637 break; 2638 } 2639 2640 return GetStoreIfValid(I); 2641 } 2642 2643 llvm::StoreInst *store = 2644 GetStoreIfValid(CGF.ReturnValue.getPointer()->user_back()); 2645 if (!store) return nullptr; 2646 2647 // Now do a first-and-dirty dominance check: just walk up the 2648 // single-predecessors chain from the current insertion point. 2649 llvm::BasicBlock *StoreBB = store->getParent(); 2650 llvm::BasicBlock *IP = CGF.Builder.GetInsertBlock(); 2651 while (IP != StoreBB) { 2652 if (!(IP = IP->getSinglePredecessor())) 2653 return nullptr; 2654 } 2655 2656 // Okay, the store's basic block dominates the insertion point; we 2657 // can do our thing. 2658 return store; 2659 } 2660 2661 void CodeGenFunction::EmitFunctionEpilog(const CGFunctionInfo &FI, 2662 bool EmitRetDbgLoc, 2663 SourceLocation EndLoc) { 2664 if (CurCodeDecl && CurCodeDecl->hasAttr<NakedAttr>()) { 2665 // Naked functions don't have epilogues. 2666 Builder.CreateUnreachable(); 2667 return; 2668 } 2669 2670 // Functions with no result always return void. 2671 if (!ReturnValue.isValid()) { 2672 Builder.CreateRetVoid(); 2673 return; 2674 } 2675 2676 llvm::DebugLoc RetDbgLoc; 2677 llvm::Value *RV = nullptr; 2678 QualType RetTy = FI.getReturnType(); 2679 const ABIArgInfo &RetAI = FI.getReturnInfo(); 2680 2681 switch (RetAI.getKind()) { 2682 case ABIArgInfo::InAlloca: 2683 // Aggregrates get evaluated directly into the destination. Sometimes we 2684 // need to return the sret value in a register, though. 2685 assert(hasAggregateEvaluationKind(RetTy)); 2686 if (RetAI.getInAllocaSRet()) { 2687 llvm::Function::arg_iterator EI = CurFn->arg_end(); 2688 --EI; 2689 llvm::Value *ArgStruct = &*EI; 2690 llvm::Value *SRet = Builder.CreateStructGEP( 2691 nullptr, ArgStruct, RetAI.getInAllocaFieldIndex()); 2692 RV = Builder.CreateAlignedLoad(SRet, getPointerAlign(), "sret"); 2693 } 2694 break; 2695 2696 case ABIArgInfo::Indirect: { 2697 auto AI = CurFn->arg_begin(); 2698 if (RetAI.isSRetAfterThis()) 2699 ++AI; 2700 switch (getEvaluationKind(RetTy)) { 2701 case TEK_Complex: { 2702 ComplexPairTy RT = 2703 EmitLoadOfComplex(MakeAddrLValue(ReturnValue, RetTy), EndLoc); 2704 EmitStoreOfComplex(RT, MakeNaturalAlignAddrLValue(&*AI, RetTy), 2705 /*isInit*/ true); 2706 break; 2707 } 2708 case TEK_Aggregate: 2709 // Do nothing; aggregrates get evaluated directly into the destination. 2710 break; 2711 case TEK_Scalar: 2712 EmitStoreOfScalar(Builder.CreateLoad(ReturnValue), 2713 MakeNaturalAlignAddrLValue(&*AI, RetTy), 2714 /*isInit*/ true); 2715 break; 2716 } 2717 break; 2718 } 2719 2720 case ABIArgInfo::Extend: 2721 case ABIArgInfo::Direct: 2722 if (RetAI.getCoerceToType() == ConvertType(RetTy) && 2723 RetAI.getDirectOffset() == 0) { 2724 // The internal return value temp always will have pointer-to-return-type 2725 // type, just do a load. 2726 2727 // If there is a dominating store to ReturnValue, we can elide 2728 // the load, zap the store, and usually zap the alloca. 2729 if (llvm::StoreInst *SI = 2730 findDominatingStoreToReturnValue(*this)) { 2731 // Reuse the debug location from the store unless there is 2732 // cleanup code to be emitted between the store and return 2733 // instruction. 2734 if (EmitRetDbgLoc && !AutoreleaseResult) 2735 RetDbgLoc = SI->getDebugLoc(); 2736 // Get the stored value and nuke the now-dead store. 2737 RV = SI->getValueOperand(); 2738 SI->eraseFromParent(); 2739 2740 // If that was the only use of the return value, nuke it as well now. 2741 auto returnValueInst = ReturnValue.getPointer(); 2742 if (returnValueInst->use_empty()) { 2743 if (auto alloca = dyn_cast<llvm::AllocaInst>(returnValueInst)) { 2744 alloca->eraseFromParent(); 2745 ReturnValue = Address::invalid(); 2746 } 2747 } 2748 2749 // Otherwise, we have to do a simple load. 2750 } else { 2751 RV = Builder.CreateLoad(ReturnValue); 2752 } 2753 } else { 2754 // If the value is offset in memory, apply the offset now. 2755 Address V = emitAddressAtOffset(*this, ReturnValue, RetAI); 2756 2757 RV = CreateCoercedLoad(V, RetAI.getCoerceToType(), *this); 2758 } 2759 2760 // In ARC, end functions that return a retainable type with a call 2761 // to objc_autoreleaseReturnValue. 2762 if (AutoreleaseResult) { 2763 #ifndef NDEBUG 2764 // Type::isObjCRetainabletype has to be called on a QualType that hasn't 2765 // been stripped of the typedefs, so we cannot use RetTy here. Get the 2766 // original return type of FunctionDecl, CurCodeDecl, and BlockDecl from 2767 // CurCodeDecl or BlockInfo. 2768 QualType RT; 2769 2770 if (auto *FD = dyn_cast<FunctionDecl>(CurCodeDecl)) 2771 RT = FD->getReturnType(); 2772 else if (auto *MD = dyn_cast<ObjCMethodDecl>(CurCodeDecl)) 2773 RT = MD->getReturnType(); 2774 else if (isa<BlockDecl>(CurCodeDecl)) 2775 RT = BlockInfo->BlockExpression->getFunctionType()->getReturnType(); 2776 else 2777 llvm_unreachable("Unexpected function/method type"); 2778 2779 assert(getLangOpts().ObjCAutoRefCount && 2780 !FI.isReturnsRetained() && 2781 RT->isObjCRetainableType()); 2782 #endif 2783 RV = emitAutoreleaseOfResult(*this, RV); 2784 } 2785 2786 break; 2787 2788 case ABIArgInfo::Ignore: 2789 break; 2790 2791 case ABIArgInfo::CoerceAndExpand: { 2792 auto coercionType = RetAI.getCoerceAndExpandType(); 2793 auto layout = CGM.getDataLayout().getStructLayout(coercionType); 2794 2795 // Load all of the coerced elements out into results. 2796 llvm::SmallVector<llvm::Value*, 4> results; 2797 Address addr = Builder.CreateElementBitCast(ReturnValue, coercionType); 2798 for (unsigned i = 0, e = coercionType->getNumElements(); i != e; ++i) { 2799 auto coercedEltType = coercionType->getElementType(i); 2800 if (ABIArgInfo::isPaddingForCoerceAndExpand(coercedEltType)) 2801 continue; 2802 2803 auto eltAddr = Builder.CreateStructGEP(addr, i, layout); 2804 auto elt = Builder.CreateLoad(eltAddr); 2805 results.push_back(elt); 2806 } 2807 2808 // If we have one result, it's the single direct result type. 2809 if (results.size() == 1) { 2810 RV = results[0]; 2811 2812 // Otherwise, we need to make a first-class aggregate. 2813 } else { 2814 // Construct a return type that lacks padding elements. 2815 llvm::Type *returnType = RetAI.getUnpaddedCoerceAndExpandType(); 2816 2817 RV = llvm::UndefValue::get(returnType); 2818 for (unsigned i = 0, e = results.size(); i != e; ++i) { 2819 RV = Builder.CreateInsertValue(RV, results[i], i); 2820 } 2821 } 2822 break; 2823 } 2824 2825 case ABIArgInfo::Expand: 2826 llvm_unreachable("Invalid ABI kind for return argument"); 2827 } 2828 2829 llvm::Instruction *Ret; 2830 if (RV) { 2831 if (CurCodeDecl && SanOpts.has(SanitizerKind::ReturnsNonnullAttribute)) { 2832 if (auto RetNNAttr = CurCodeDecl->getAttr<ReturnsNonNullAttr>()) { 2833 SanitizerScope SanScope(this); 2834 llvm::Value *Cond = Builder.CreateICmpNE( 2835 RV, llvm::Constant::getNullValue(RV->getType())); 2836 llvm::Constant *StaticData[] = { 2837 EmitCheckSourceLocation(EndLoc), 2838 EmitCheckSourceLocation(RetNNAttr->getLocation()), 2839 }; 2840 EmitCheck(std::make_pair(Cond, SanitizerKind::ReturnsNonnullAttribute), 2841 "nonnull_return", StaticData, None); 2842 } 2843 } 2844 Ret = Builder.CreateRet(RV); 2845 } else { 2846 Ret = Builder.CreateRetVoid(); 2847 } 2848 2849 if (RetDbgLoc) 2850 Ret->setDebugLoc(std::move(RetDbgLoc)); 2851 } 2852 2853 static bool isInAllocaArgument(CGCXXABI &ABI, QualType type) { 2854 const CXXRecordDecl *RD = type->getAsCXXRecordDecl(); 2855 return RD && ABI.getRecordArgABI(RD) == CGCXXABI::RAA_DirectInMemory; 2856 } 2857 2858 static AggValueSlot createPlaceholderSlot(CodeGenFunction &CGF, 2859 QualType Ty) { 2860 // FIXME: Generate IR in one pass, rather than going back and fixing up these 2861 // placeholders. 2862 llvm::Type *IRTy = CGF.ConvertTypeForMem(Ty); 2863 llvm::Value *Placeholder = 2864 llvm::UndefValue::get(IRTy->getPointerTo()->getPointerTo()); 2865 Placeholder = CGF.Builder.CreateDefaultAlignedLoad(Placeholder); 2866 2867 // FIXME: When we generate this IR in one pass, we shouldn't need 2868 // this win32-specific alignment hack. 2869 CharUnits Align = CharUnits::fromQuantity(4); 2870 2871 return AggValueSlot::forAddr(Address(Placeholder, Align), 2872 Ty.getQualifiers(), 2873 AggValueSlot::IsNotDestructed, 2874 AggValueSlot::DoesNotNeedGCBarriers, 2875 AggValueSlot::IsNotAliased); 2876 } 2877 2878 void CodeGenFunction::EmitDelegateCallArg(CallArgList &args, 2879 const VarDecl *param, 2880 SourceLocation loc) { 2881 // StartFunction converted the ABI-lowered parameter(s) into a 2882 // local alloca. We need to turn that into an r-value suitable 2883 // for EmitCall. 2884 Address local = GetAddrOfLocalVar(param); 2885 2886 QualType type = param->getType(); 2887 2888 assert(!isInAllocaArgument(CGM.getCXXABI(), type) && 2889 "cannot emit delegate call arguments for inalloca arguments!"); 2890 2891 // For the most part, we just need to load the alloca, except that 2892 // aggregate r-values are actually pointers to temporaries. 2893 if (type->isReferenceType()) 2894 args.add(RValue::get(Builder.CreateLoad(local)), type); 2895 else 2896 args.add(convertTempToRValue(local, type, loc), type); 2897 } 2898 2899 static bool isProvablyNull(llvm::Value *addr) { 2900 return isa<llvm::ConstantPointerNull>(addr); 2901 } 2902 2903 static bool isProvablyNonNull(llvm::Value *addr) { 2904 return isa<llvm::AllocaInst>(addr); 2905 } 2906 2907 /// Emit the actual writing-back of a writeback. 2908 static void emitWriteback(CodeGenFunction &CGF, 2909 const CallArgList::Writeback &writeback) { 2910 const LValue &srcLV = writeback.Source; 2911 Address srcAddr = srcLV.getAddress(); 2912 assert(!isProvablyNull(srcAddr.getPointer()) && 2913 "shouldn't have writeback for provably null argument"); 2914 2915 llvm::BasicBlock *contBB = nullptr; 2916 2917 // If the argument wasn't provably non-null, we need to null check 2918 // before doing the store. 2919 bool provablyNonNull = isProvablyNonNull(srcAddr.getPointer()); 2920 if (!provablyNonNull) { 2921 llvm::BasicBlock *writebackBB = CGF.createBasicBlock("icr.writeback"); 2922 contBB = CGF.createBasicBlock("icr.done"); 2923 2924 llvm::Value *isNull = 2925 CGF.Builder.CreateIsNull(srcAddr.getPointer(), "icr.isnull"); 2926 CGF.Builder.CreateCondBr(isNull, contBB, writebackBB); 2927 CGF.EmitBlock(writebackBB); 2928 } 2929 2930 // Load the value to writeback. 2931 llvm::Value *value = CGF.Builder.CreateLoad(writeback.Temporary); 2932 2933 // Cast it back, in case we're writing an id to a Foo* or something. 2934 value = CGF.Builder.CreateBitCast(value, srcAddr.getElementType(), 2935 "icr.writeback-cast"); 2936 2937 // Perform the writeback. 2938 2939 // If we have a "to use" value, it's something we need to emit a use 2940 // of. This has to be carefully threaded in: if it's done after the 2941 // release it's potentially undefined behavior (and the optimizer 2942 // will ignore it), and if it happens before the retain then the 2943 // optimizer could move the release there. 2944 if (writeback.ToUse) { 2945 assert(srcLV.getObjCLifetime() == Qualifiers::OCL_Strong); 2946 2947 // Retain the new value. No need to block-copy here: the block's 2948 // being passed up the stack. 2949 value = CGF.EmitARCRetainNonBlock(value); 2950 2951 // Emit the intrinsic use here. 2952 CGF.EmitARCIntrinsicUse(writeback.ToUse); 2953 2954 // Load the old value (primitively). 2955 llvm::Value *oldValue = CGF.EmitLoadOfScalar(srcLV, SourceLocation()); 2956 2957 // Put the new value in place (primitively). 2958 CGF.EmitStoreOfScalar(value, srcLV, /*init*/ false); 2959 2960 // Release the old value. 2961 CGF.EmitARCRelease(oldValue, srcLV.isARCPreciseLifetime()); 2962 2963 // Otherwise, we can just do a normal lvalue store. 2964 } else { 2965 CGF.EmitStoreThroughLValue(RValue::get(value), srcLV); 2966 } 2967 2968 // Jump to the continuation block. 2969 if (!provablyNonNull) 2970 CGF.EmitBlock(contBB); 2971 } 2972 2973 static void emitWritebacks(CodeGenFunction &CGF, 2974 const CallArgList &args) { 2975 for (const auto &I : args.writebacks()) 2976 emitWriteback(CGF, I); 2977 } 2978 2979 static void deactivateArgCleanupsBeforeCall(CodeGenFunction &CGF, 2980 const CallArgList &CallArgs) { 2981 assert(CGF.getTarget().getCXXABI().areArgsDestroyedLeftToRightInCallee()); 2982 ArrayRef<CallArgList::CallArgCleanup> Cleanups = 2983 CallArgs.getCleanupsToDeactivate(); 2984 // Iterate in reverse to increase the likelihood of popping the cleanup. 2985 for (const auto &I : llvm::reverse(Cleanups)) { 2986 CGF.DeactivateCleanupBlock(I.Cleanup, I.IsActiveIP); 2987 I.IsActiveIP->eraseFromParent(); 2988 } 2989 } 2990 2991 static const Expr *maybeGetUnaryAddrOfOperand(const Expr *E) { 2992 if (const UnaryOperator *uop = dyn_cast<UnaryOperator>(E->IgnoreParens())) 2993 if (uop->getOpcode() == UO_AddrOf) 2994 return uop->getSubExpr(); 2995 return nullptr; 2996 } 2997 2998 /// Emit an argument that's being passed call-by-writeback. That is, 2999 /// we are passing the address of an __autoreleased temporary; it 3000 /// might be copy-initialized with the current value of the given 3001 /// address, but it will definitely be copied out of after the call. 3002 static void emitWritebackArg(CodeGenFunction &CGF, CallArgList &args, 3003 const ObjCIndirectCopyRestoreExpr *CRE) { 3004 LValue srcLV; 3005 3006 // Make an optimistic effort to emit the address as an l-value. 3007 // This can fail if the argument expression is more complicated. 3008 if (const Expr *lvExpr = maybeGetUnaryAddrOfOperand(CRE->getSubExpr())) { 3009 srcLV = CGF.EmitLValue(lvExpr); 3010 3011 // Otherwise, just emit it as a scalar. 3012 } else { 3013 Address srcAddr = CGF.EmitPointerWithAlignment(CRE->getSubExpr()); 3014 3015 QualType srcAddrType = 3016 CRE->getSubExpr()->getType()->castAs<PointerType>()->getPointeeType(); 3017 srcLV = CGF.MakeAddrLValue(srcAddr, srcAddrType); 3018 } 3019 Address srcAddr = srcLV.getAddress(); 3020 3021 // The dest and src types don't necessarily match in LLVM terms 3022 // because of the crazy ObjC compatibility rules. 3023 3024 llvm::PointerType *destType = 3025 cast<llvm::PointerType>(CGF.ConvertType(CRE->getType())); 3026 3027 // If the address is a constant null, just pass the appropriate null. 3028 if (isProvablyNull(srcAddr.getPointer())) { 3029 args.add(RValue::get(llvm::ConstantPointerNull::get(destType)), 3030 CRE->getType()); 3031 return; 3032 } 3033 3034 // Create the temporary. 3035 Address temp = CGF.CreateTempAlloca(destType->getElementType(), 3036 CGF.getPointerAlign(), 3037 "icr.temp"); 3038 // Loading an l-value can introduce a cleanup if the l-value is __weak, 3039 // and that cleanup will be conditional if we can't prove that the l-value 3040 // isn't null, so we need to register a dominating point so that the cleanups 3041 // system will make valid IR. 3042 CodeGenFunction::ConditionalEvaluation condEval(CGF); 3043 3044 // Zero-initialize it if we're not doing a copy-initialization. 3045 bool shouldCopy = CRE->shouldCopy(); 3046 if (!shouldCopy) { 3047 llvm::Value *null = 3048 llvm::ConstantPointerNull::get( 3049 cast<llvm::PointerType>(destType->getElementType())); 3050 CGF.Builder.CreateStore(null, temp); 3051 } 3052 3053 llvm::BasicBlock *contBB = nullptr; 3054 llvm::BasicBlock *originBB = nullptr; 3055 3056 // If the address is *not* known to be non-null, we need to switch. 3057 llvm::Value *finalArgument; 3058 3059 bool provablyNonNull = isProvablyNonNull(srcAddr.getPointer()); 3060 if (provablyNonNull) { 3061 finalArgument = temp.getPointer(); 3062 } else { 3063 llvm::Value *isNull = 3064 CGF.Builder.CreateIsNull(srcAddr.getPointer(), "icr.isnull"); 3065 3066 finalArgument = CGF.Builder.CreateSelect(isNull, 3067 llvm::ConstantPointerNull::get(destType), 3068 temp.getPointer(), "icr.argument"); 3069 3070 // If we need to copy, then the load has to be conditional, which 3071 // means we need control flow. 3072 if (shouldCopy) { 3073 originBB = CGF.Builder.GetInsertBlock(); 3074 contBB = CGF.createBasicBlock("icr.cont"); 3075 llvm::BasicBlock *copyBB = CGF.createBasicBlock("icr.copy"); 3076 CGF.Builder.CreateCondBr(isNull, contBB, copyBB); 3077 CGF.EmitBlock(copyBB); 3078 condEval.begin(CGF); 3079 } 3080 } 3081 3082 llvm::Value *valueToUse = nullptr; 3083 3084 // Perform a copy if necessary. 3085 if (shouldCopy) { 3086 RValue srcRV = CGF.EmitLoadOfLValue(srcLV, SourceLocation()); 3087 assert(srcRV.isScalar()); 3088 3089 llvm::Value *src = srcRV.getScalarVal(); 3090 src = CGF.Builder.CreateBitCast(src, destType->getElementType(), 3091 "icr.cast"); 3092 3093 // Use an ordinary store, not a store-to-lvalue. 3094 CGF.Builder.CreateStore(src, temp); 3095 3096 // If optimization is enabled, and the value was held in a 3097 // __strong variable, we need to tell the optimizer that this 3098 // value has to stay alive until we're doing the store back. 3099 // This is because the temporary is effectively unretained, 3100 // and so otherwise we can violate the high-level semantics. 3101 if (CGF.CGM.getCodeGenOpts().OptimizationLevel != 0 && 3102 srcLV.getObjCLifetime() == Qualifiers::OCL_Strong) { 3103 valueToUse = src; 3104 } 3105 } 3106 3107 // Finish the control flow if we needed it. 3108 if (shouldCopy && !provablyNonNull) { 3109 llvm::BasicBlock *copyBB = CGF.Builder.GetInsertBlock(); 3110 CGF.EmitBlock(contBB); 3111 3112 // Make a phi for the value to intrinsically use. 3113 if (valueToUse) { 3114 llvm::PHINode *phiToUse = CGF.Builder.CreatePHI(valueToUse->getType(), 2, 3115 "icr.to-use"); 3116 phiToUse->addIncoming(valueToUse, copyBB); 3117 phiToUse->addIncoming(llvm::UndefValue::get(valueToUse->getType()), 3118 originBB); 3119 valueToUse = phiToUse; 3120 } 3121 3122 condEval.end(CGF); 3123 } 3124 3125 args.addWriteback(srcLV, temp, valueToUse); 3126 args.add(RValue::get(finalArgument), CRE->getType()); 3127 } 3128 3129 void CallArgList::allocateArgumentMemory(CodeGenFunction &CGF) { 3130 assert(!StackBase && !StackCleanup.isValid()); 3131 3132 // Save the stack. 3133 llvm::Function *F = CGF.CGM.getIntrinsic(llvm::Intrinsic::stacksave); 3134 StackBase = CGF.Builder.CreateCall(F, {}, "inalloca.save"); 3135 } 3136 3137 void CallArgList::freeArgumentMemory(CodeGenFunction &CGF) const { 3138 if (StackBase) { 3139 // Restore the stack after the call. 3140 llvm::Value *F = CGF.CGM.getIntrinsic(llvm::Intrinsic::stackrestore); 3141 CGF.Builder.CreateCall(F, StackBase); 3142 } 3143 } 3144 3145 void CodeGenFunction::EmitNonNullArgCheck(RValue RV, QualType ArgType, 3146 SourceLocation ArgLoc, 3147 const FunctionDecl *FD, 3148 unsigned ParmNum) { 3149 if (!SanOpts.has(SanitizerKind::NonnullAttribute) || !FD) 3150 return; 3151 auto PVD = ParmNum < FD->getNumParams() ? FD->getParamDecl(ParmNum) : nullptr; 3152 unsigned ArgNo = PVD ? PVD->getFunctionScopeIndex() : ParmNum; 3153 auto NNAttr = getNonNullAttr(FD, PVD, ArgType, ArgNo); 3154 if (!NNAttr) 3155 return; 3156 SanitizerScope SanScope(this); 3157 assert(RV.isScalar()); 3158 llvm::Value *V = RV.getScalarVal(); 3159 llvm::Value *Cond = 3160 Builder.CreateICmpNE(V, llvm::Constant::getNullValue(V->getType())); 3161 llvm::Constant *StaticData[] = { 3162 EmitCheckSourceLocation(ArgLoc), 3163 EmitCheckSourceLocation(NNAttr->getLocation()), 3164 llvm::ConstantInt::get(Int32Ty, ArgNo + 1), 3165 }; 3166 EmitCheck(std::make_pair(Cond, SanitizerKind::NonnullAttribute), 3167 "nonnull_arg", StaticData, None); 3168 } 3169 3170 void CodeGenFunction::EmitCallArgs( 3171 CallArgList &Args, ArrayRef<QualType> ArgTypes, 3172 llvm::iterator_range<CallExpr::const_arg_iterator> ArgRange, 3173 const FunctionDecl *CalleeDecl, unsigned ParamsToSkip) { 3174 assert((int)ArgTypes.size() == (ArgRange.end() - ArgRange.begin())); 3175 3176 auto MaybeEmitImplicitObjectSize = [&](unsigned I, const Expr *Arg) { 3177 if (CalleeDecl == nullptr || I >= CalleeDecl->getNumParams()) 3178 return; 3179 auto *PS = CalleeDecl->getParamDecl(I)->getAttr<PassObjectSizeAttr>(); 3180 if (PS == nullptr) 3181 return; 3182 3183 const auto &Context = getContext(); 3184 auto SizeTy = Context.getSizeType(); 3185 auto T = Builder.getIntNTy(Context.getTypeSize(SizeTy)); 3186 llvm::Value *V = evaluateOrEmitBuiltinObjectSize(Arg, PS->getType(), T); 3187 Args.add(RValue::get(V), SizeTy); 3188 }; 3189 3190 // We *have* to evaluate arguments from right to left in the MS C++ ABI, 3191 // because arguments are destroyed left to right in the callee. 3192 if (CGM.getTarget().getCXXABI().areArgsDestroyedLeftToRightInCallee()) { 3193 // Insert a stack save if we're going to need any inalloca args. 3194 bool HasInAllocaArgs = false; 3195 for (ArrayRef<QualType>::iterator I = ArgTypes.begin(), E = ArgTypes.end(); 3196 I != E && !HasInAllocaArgs; ++I) 3197 HasInAllocaArgs = isInAllocaArgument(CGM.getCXXABI(), *I); 3198 if (HasInAllocaArgs) { 3199 assert(getTarget().getTriple().getArch() == llvm::Triple::x86); 3200 Args.allocateArgumentMemory(*this); 3201 } 3202 3203 // Evaluate each argument. 3204 size_t CallArgsStart = Args.size(); 3205 for (int I = ArgTypes.size() - 1; I >= 0; --I) { 3206 CallExpr::const_arg_iterator Arg = ArgRange.begin() + I; 3207 MaybeEmitImplicitObjectSize(I, *Arg); 3208 EmitCallArg(Args, *Arg, ArgTypes[I]); 3209 EmitNonNullArgCheck(Args.back().RV, ArgTypes[I], (*Arg)->getExprLoc(), 3210 CalleeDecl, ParamsToSkip + I); 3211 } 3212 3213 // Un-reverse the arguments we just evaluated so they match up with the LLVM 3214 // IR function. 3215 std::reverse(Args.begin() + CallArgsStart, Args.end()); 3216 return; 3217 } 3218 3219 for (unsigned I = 0, E = ArgTypes.size(); I != E; ++I) { 3220 CallExpr::const_arg_iterator Arg = ArgRange.begin() + I; 3221 assert(Arg != ArgRange.end()); 3222 EmitCallArg(Args, *Arg, ArgTypes[I]); 3223 EmitNonNullArgCheck(Args.back().RV, ArgTypes[I], (*Arg)->getExprLoc(), 3224 CalleeDecl, ParamsToSkip + I); 3225 MaybeEmitImplicitObjectSize(I, *Arg); 3226 } 3227 } 3228 3229 namespace { 3230 3231 struct DestroyUnpassedArg final : EHScopeStack::Cleanup { 3232 DestroyUnpassedArg(Address Addr, QualType Ty) 3233 : Addr(Addr), Ty(Ty) {} 3234 3235 Address Addr; 3236 QualType Ty; 3237 3238 void Emit(CodeGenFunction &CGF, Flags flags) override { 3239 const CXXDestructorDecl *Dtor = Ty->getAsCXXRecordDecl()->getDestructor(); 3240 assert(!Dtor->isTrivial()); 3241 CGF.EmitCXXDestructorCall(Dtor, Dtor_Complete, /*for vbase*/ false, 3242 /*Delegating=*/false, Addr); 3243 } 3244 }; 3245 3246 struct DisableDebugLocationUpdates { 3247 CodeGenFunction &CGF; 3248 bool disabledDebugInfo; 3249 DisableDebugLocationUpdates(CodeGenFunction &CGF, const Expr *E) : CGF(CGF) { 3250 if ((disabledDebugInfo = isa<CXXDefaultArgExpr>(E) && CGF.getDebugInfo())) 3251 CGF.disableDebugInfo(); 3252 } 3253 ~DisableDebugLocationUpdates() { 3254 if (disabledDebugInfo) 3255 CGF.enableDebugInfo(); 3256 } 3257 }; 3258 3259 } // end anonymous namespace 3260 3261 void CodeGenFunction::EmitCallArg(CallArgList &args, const Expr *E, 3262 QualType type) { 3263 DisableDebugLocationUpdates Dis(*this, E); 3264 if (const ObjCIndirectCopyRestoreExpr *CRE 3265 = dyn_cast<ObjCIndirectCopyRestoreExpr>(E)) { 3266 assert(getLangOpts().ObjCAutoRefCount); 3267 assert(getContext().hasSameType(E->getType(), type)); 3268 return emitWritebackArg(*this, args, CRE); 3269 } 3270 3271 assert(type->isReferenceType() == E->isGLValue() && 3272 "reference binding to unmaterialized r-value!"); 3273 3274 if (E->isGLValue()) { 3275 assert(E->getObjectKind() == OK_Ordinary); 3276 return args.add(EmitReferenceBindingToExpr(E), type); 3277 } 3278 3279 bool HasAggregateEvalKind = hasAggregateEvaluationKind(type); 3280 3281 // In the Microsoft C++ ABI, aggregate arguments are destructed by the callee. 3282 // However, we still have to push an EH-only cleanup in case we unwind before 3283 // we make it to the call. 3284 if (HasAggregateEvalKind && 3285 CGM.getTarget().getCXXABI().areArgsDestroyedLeftToRightInCallee()) { 3286 // If we're using inalloca, use the argument memory. Otherwise, use a 3287 // temporary. 3288 AggValueSlot Slot; 3289 if (args.isUsingInAlloca()) 3290 Slot = createPlaceholderSlot(*this, type); 3291 else 3292 Slot = CreateAggTemp(type, "agg.tmp"); 3293 3294 const CXXRecordDecl *RD = type->getAsCXXRecordDecl(); 3295 bool DestroyedInCallee = 3296 RD && RD->hasNonTrivialDestructor() && 3297 CGM.getCXXABI().getRecordArgABI(RD) != CGCXXABI::RAA_Default; 3298 if (DestroyedInCallee) 3299 Slot.setExternallyDestructed(); 3300 3301 EmitAggExpr(E, Slot); 3302 RValue RV = Slot.asRValue(); 3303 args.add(RV, type); 3304 3305 if (DestroyedInCallee) { 3306 // Create a no-op GEP between the placeholder and the cleanup so we can 3307 // RAUW it successfully. It also serves as a marker of the first 3308 // instruction where the cleanup is active. 3309 pushFullExprCleanup<DestroyUnpassedArg>(EHCleanup, Slot.getAddress(), 3310 type); 3311 // This unreachable is a temporary marker which will be removed later. 3312 llvm::Instruction *IsActive = Builder.CreateUnreachable(); 3313 args.addArgCleanupDeactivation(EHStack.getInnermostEHScope(), IsActive); 3314 } 3315 return; 3316 } 3317 3318 if (HasAggregateEvalKind && isa<ImplicitCastExpr>(E) && 3319 cast<CastExpr>(E)->getCastKind() == CK_LValueToRValue) { 3320 LValue L = EmitLValue(cast<CastExpr>(E)->getSubExpr()); 3321 assert(L.isSimple()); 3322 if (L.getAlignment() >= getContext().getTypeAlignInChars(type)) { 3323 args.add(L.asAggregateRValue(), type, /*NeedsCopy*/true); 3324 } else { 3325 // We can't represent a misaligned lvalue in the CallArgList, so copy 3326 // to an aligned temporary now. 3327 Address tmp = CreateMemTemp(type); 3328 EmitAggregateCopy(tmp, L.getAddress(), type, L.isVolatile()); 3329 args.add(RValue::getAggregate(tmp), type); 3330 } 3331 return; 3332 } 3333 3334 args.add(EmitAnyExprToTemp(E), type); 3335 } 3336 3337 QualType CodeGenFunction::getVarArgType(const Expr *Arg) { 3338 // System headers on Windows define NULL to 0 instead of 0LL on Win64. MSVC 3339 // implicitly widens null pointer constants that are arguments to varargs 3340 // functions to pointer-sized ints. 3341 if (!getTarget().getTriple().isOSWindows()) 3342 return Arg->getType(); 3343 3344 if (Arg->getType()->isIntegerType() && 3345 getContext().getTypeSize(Arg->getType()) < 3346 getContext().getTargetInfo().getPointerWidth(0) && 3347 Arg->isNullPointerConstant(getContext(), 3348 Expr::NPC_ValueDependentIsNotNull)) { 3349 return getContext().getIntPtrType(); 3350 } 3351 3352 return Arg->getType(); 3353 } 3354 3355 // In ObjC ARC mode with no ObjC ARC exception safety, tell the ARC 3356 // optimizer it can aggressively ignore unwind edges. 3357 void 3358 CodeGenFunction::AddObjCARCExceptionMetadata(llvm::Instruction *Inst) { 3359 if (CGM.getCodeGenOpts().OptimizationLevel != 0 && 3360 !CGM.getCodeGenOpts().ObjCAutoRefCountExceptions) 3361 Inst->setMetadata("clang.arc.no_objc_arc_exceptions", 3362 CGM.getNoObjCARCExceptionsMetadata()); 3363 } 3364 3365 /// Emits a call to the given no-arguments nounwind runtime function. 3366 llvm::CallInst * 3367 CodeGenFunction::EmitNounwindRuntimeCall(llvm::Value *callee, 3368 const llvm::Twine &name) { 3369 return EmitNounwindRuntimeCall(callee, None, name); 3370 } 3371 3372 /// Emits a call to the given nounwind runtime function. 3373 llvm::CallInst * 3374 CodeGenFunction::EmitNounwindRuntimeCall(llvm::Value *callee, 3375 ArrayRef<llvm::Value*> args, 3376 const llvm::Twine &name) { 3377 llvm::CallInst *call = EmitRuntimeCall(callee, args, name); 3378 call->setDoesNotThrow(); 3379 return call; 3380 } 3381 3382 /// Emits a simple call (never an invoke) to the given no-arguments 3383 /// runtime function. 3384 llvm::CallInst * 3385 CodeGenFunction::EmitRuntimeCall(llvm::Value *callee, 3386 const llvm::Twine &name) { 3387 return EmitRuntimeCall(callee, None, name); 3388 } 3389 3390 // Calls which may throw must have operand bundles indicating which funclet 3391 // they are nested within. 3392 static void 3393 getBundlesForFunclet(llvm::Value *Callee, llvm::Instruction *CurrentFuncletPad, 3394 SmallVectorImpl<llvm::OperandBundleDef> &BundleList) { 3395 // There is no need for a funclet operand bundle if we aren't inside a 3396 // funclet. 3397 if (!CurrentFuncletPad) 3398 return; 3399 3400 // Skip intrinsics which cannot throw. 3401 auto *CalleeFn = dyn_cast<llvm::Function>(Callee->stripPointerCasts()); 3402 if (CalleeFn && CalleeFn->isIntrinsic() && CalleeFn->doesNotThrow()) 3403 return; 3404 3405 BundleList.emplace_back("funclet", CurrentFuncletPad); 3406 } 3407 3408 /// Emits a simple call (never an invoke) to the given runtime function. 3409 llvm::CallInst * 3410 CodeGenFunction::EmitRuntimeCall(llvm::Value *callee, 3411 ArrayRef<llvm::Value*> args, 3412 const llvm::Twine &name) { 3413 SmallVector<llvm::OperandBundleDef, 1> BundleList; 3414 getBundlesForFunclet(callee, CurrentFuncletPad, BundleList); 3415 3416 llvm::CallInst *call = Builder.CreateCall(callee, args, BundleList, name); 3417 call->setCallingConv(getRuntimeCC()); 3418 return call; 3419 } 3420 3421 /// Emits a call or invoke to the given noreturn runtime function. 3422 void CodeGenFunction::EmitNoreturnRuntimeCallOrInvoke(llvm::Value *callee, 3423 ArrayRef<llvm::Value*> args) { 3424 SmallVector<llvm::OperandBundleDef, 1> BundleList; 3425 getBundlesForFunclet(callee, CurrentFuncletPad, BundleList); 3426 3427 if (getInvokeDest()) { 3428 llvm::InvokeInst *invoke = 3429 Builder.CreateInvoke(callee, 3430 getUnreachableBlock(), 3431 getInvokeDest(), 3432 args, 3433 BundleList); 3434 invoke->setDoesNotReturn(); 3435 invoke->setCallingConv(getRuntimeCC()); 3436 } else { 3437 llvm::CallInst *call = Builder.CreateCall(callee, args, BundleList); 3438 call->setDoesNotReturn(); 3439 call->setCallingConv(getRuntimeCC()); 3440 Builder.CreateUnreachable(); 3441 } 3442 } 3443 3444 /// Emits a call or invoke instruction to the given nullary runtime function. 3445 llvm::CallSite 3446 CodeGenFunction::EmitRuntimeCallOrInvoke(llvm::Value *callee, 3447 const Twine &name) { 3448 return EmitRuntimeCallOrInvoke(callee, None, name); 3449 } 3450 3451 /// Emits a call or invoke instruction to the given runtime function. 3452 llvm::CallSite 3453 CodeGenFunction::EmitRuntimeCallOrInvoke(llvm::Value *callee, 3454 ArrayRef<llvm::Value*> args, 3455 const Twine &name) { 3456 llvm::CallSite callSite = EmitCallOrInvoke(callee, args, name); 3457 callSite.setCallingConv(getRuntimeCC()); 3458 return callSite; 3459 } 3460 3461 /// Emits a call or invoke instruction to the given function, depending 3462 /// on the current state of the EH stack. 3463 llvm::CallSite 3464 CodeGenFunction::EmitCallOrInvoke(llvm::Value *Callee, 3465 ArrayRef<llvm::Value *> Args, 3466 const Twine &Name) { 3467 llvm::BasicBlock *InvokeDest = getInvokeDest(); 3468 SmallVector<llvm::OperandBundleDef, 1> BundleList; 3469 getBundlesForFunclet(Callee, CurrentFuncletPad, BundleList); 3470 3471 llvm::Instruction *Inst; 3472 if (!InvokeDest) 3473 Inst = Builder.CreateCall(Callee, Args, BundleList, Name); 3474 else { 3475 llvm::BasicBlock *ContBB = createBasicBlock("invoke.cont"); 3476 Inst = Builder.CreateInvoke(Callee, ContBB, InvokeDest, Args, BundleList, 3477 Name); 3478 EmitBlock(ContBB); 3479 } 3480 3481 // In ObjC ARC mode with no ObjC ARC exception safety, tell the ARC 3482 // optimizer it can aggressively ignore unwind edges. 3483 if (CGM.getLangOpts().ObjCAutoRefCount) 3484 AddObjCARCExceptionMetadata(Inst); 3485 3486 return llvm::CallSite(Inst); 3487 } 3488 3489 /// \brief Store a non-aggregate value to an address to initialize it. For 3490 /// initialization, a non-atomic store will be used. 3491 static void EmitInitStoreOfNonAggregate(CodeGenFunction &CGF, RValue Src, 3492 LValue Dst) { 3493 if (Src.isScalar()) 3494 CGF.EmitStoreOfScalar(Src.getScalarVal(), Dst, /*init=*/true); 3495 else 3496 CGF.EmitStoreOfComplex(Src.getComplexVal(), Dst, /*init=*/true); 3497 } 3498 3499 void CodeGenFunction::deferPlaceholderReplacement(llvm::Instruction *Old, 3500 llvm::Value *New) { 3501 DeferredReplacements.push_back(std::make_pair(Old, New)); 3502 } 3503 3504 RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo, 3505 llvm::Value *Callee, 3506 ReturnValueSlot ReturnValue, 3507 const CallArgList &CallArgs, 3508 CGCalleeInfo CalleeInfo, 3509 llvm::Instruction **callOrInvoke) { 3510 // FIXME: We no longer need the types from CallArgs; lift up and simplify. 3511 3512 // Handle struct-return functions by passing a pointer to the 3513 // location that we would like to return into. 3514 QualType RetTy = CallInfo.getReturnType(); 3515 const ABIArgInfo &RetAI = CallInfo.getReturnInfo(); 3516 3517 llvm::FunctionType *IRFuncTy = 3518 cast<llvm::FunctionType>( 3519 cast<llvm::PointerType>(Callee->getType())->getElementType()); 3520 3521 // If we're using inalloca, insert the allocation after the stack save. 3522 // FIXME: Do this earlier rather than hacking it in here! 3523 Address ArgMemory = Address::invalid(); 3524 const llvm::StructLayout *ArgMemoryLayout = nullptr; 3525 if (llvm::StructType *ArgStruct = CallInfo.getArgStruct()) { 3526 ArgMemoryLayout = CGM.getDataLayout().getStructLayout(ArgStruct); 3527 llvm::Instruction *IP = CallArgs.getStackBase(); 3528 llvm::AllocaInst *AI; 3529 if (IP) { 3530 IP = IP->getNextNode(); 3531 AI = new llvm::AllocaInst(ArgStruct, "argmem", IP); 3532 } else { 3533 AI = CreateTempAlloca(ArgStruct, "argmem"); 3534 } 3535 auto Align = CallInfo.getArgStructAlignment(); 3536 AI->setAlignment(Align.getQuantity()); 3537 AI->setUsedWithInAlloca(true); 3538 assert(AI->isUsedWithInAlloca() && !AI->isStaticAlloca()); 3539 ArgMemory = Address(AI, Align); 3540 } 3541 3542 // Helper function to drill into the inalloca allocation. 3543 auto createInAllocaStructGEP = [&](unsigned FieldIndex) -> Address { 3544 auto FieldOffset = 3545 CharUnits::fromQuantity(ArgMemoryLayout->getElementOffset(FieldIndex)); 3546 return Builder.CreateStructGEP(ArgMemory, FieldIndex, FieldOffset); 3547 }; 3548 3549 ClangToLLVMArgMapping IRFunctionArgs(CGM.getContext(), CallInfo); 3550 SmallVector<llvm::Value *, 16> IRCallArgs(IRFunctionArgs.totalIRArgs()); 3551 3552 // If the call returns a temporary with struct return, create a temporary 3553 // alloca to hold the result, unless one is given to us. 3554 Address SRetPtr = Address::invalid(); 3555 size_t UnusedReturnSize = 0; 3556 if (RetAI.isIndirect() || RetAI.isInAlloca() || RetAI.isCoerceAndExpand()) { 3557 if (!ReturnValue.isNull()) { 3558 SRetPtr = ReturnValue.getValue(); 3559 } else { 3560 SRetPtr = CreateMemTemp(RetTy); 3561 if (HaveInsertPoint() && ReturnValue.isUnused()) { 3562 uint64_t size = 3563 CGM.getDataLayout().getTypeAllocSize(ConvertTypeForMem(RetTy)); 3564 if (EmitLifetimeStart(size, SRetPtr.getPointer())) 3565 UnusedReturnSize = size; 3566 } 3567 } 3568 if (IRFunctionArgs.hasSRetArg()) { 3569 IRCallArgs[IRFunctionArgs.getSRetArgNo()] = SRetPtr.getPointer(); 3570 } else if (RetAI.isInAlloca()) { 3571 Address Addr = createInAllocaStructGEP(RetAI.getInAllocaFieldIndex()); 3572 Builder.CreateStore(SRetPtr.getPointer(), Addr); 3573 } 3574 } 3575 3576 Address swiftErrorTemp = Address::invalid(); 3577 Address swiftErrorArg = Address::invalid(); 3578 3579 assert(CallInfo.arg_size() == CallArgs.size() && 3580 "Mismatch between function signature & arguments."); 3581 unsigned ArgNo = 0; 3582 CGFunctionInfo::const_arg_iterator info_it = CallInfo.arg_begin(); 3583 for (CallArgList::const_iterator I = CallArgs.begin(), E = CallArgs.end(); 3584 I != E; ++I, ++info_it, ++ArgNo) { 3585 const ABIArgInfo &ArgInfo = info_it->info; 3586 RValue RV = I->RV; 3587 3588 // Insert a padding argument to ensure proper alignment. 3589 if (IRFunctionArgs.hasPaddingArg(ArgNo)) 3590 IRCallArgs[IRFunctionArgs.getPaddingArgNo(ArgNo)] = 3591 llvm::UndefValue::get(ArgInfo.getPaddingType()); 3592 3593 unsigned FirstIRArg, NumIRArgs; 3594 std::tie(FirstIRArg, NumIRArgs) = IRFunctionArgs.getIRArgs(ArgNo); 3595 3596 switch (ArgInfo.getKind()) { 3597 case ABIArgInfo::InAlloca: { 3598 assert(NumIRArgs == 0); 3599 assert(getTarget().getTriple().getArch() == llvm::Triple::x86); 3600 if (RV.isAggregate()) { 3601 // Replace the placeholder with the appropriate argument slot GEP. 3602 llvm::Instruction *Placeholder = 3603 cast<llvm::Instruction>(RV.getAggregatePointer()); 3604 CGBuilderTy::InsertPoint IP = Builder.saveIP(); 3605 Builder.SetInsertPoint(Placeholder); 3606 Address Addr = createInAllocaStructGEP(ArgInfo.getInAllocaFieldIndex()); 3607 Builder.restoreIP(IP); 3608 deferPlaceholderReplacement(Placeholder, Addr.getPointer()); 3609 } else { 3610 // Store the RValue into the argument struct. 3611 Address Addr = createInAllocaStructGEP(ArgInfo.getInAllocaFieldIndex()); 3612 unsigned AS = Addr.getType()->getPointerAddressSpace(); 3613 llvm::Type *MemType = ConvertTypeForMem(I->Ty)->getPointerTo(AS); 3614 // There are some cases where a trivial bitcast is not avoidable. The 3615 // definition of a type later in a translation unit may change it's type 3616 // from {}* to (%struct.foo*)*. 3617 if (Addr.getType() != MemType) 3618 Addr = Builder.CreateBitCast(Addr, MemType); 3619 LValue argLV = MakeAddrLValue(Addr, I->Ty); 3620 EmitInitStoreOfNonAggregate(*this, RV, argLV); 3621 } 3622 break; 3623 } 3624 3625 case ABIArgInfo::Indirect: { 3626 assert(NumIRArgs == 1); 3627 if (RV.isScalar() || RV.isComplex()) { 3628 // Make a temporary alloca to pass the argument. 3629 Address Addr = CreateMemTemp(I->Ty, ArgInfo.getIndirectAlign()); 3630 IRCallArgs[FirstIRArg] = Addr.getPointer(); 3631 3632 LValue argLV = MakeAddrLValue(Addr, I->Ty); 3633 EmitInitStoreOfNonAggregate(*this, RV, argLV); 3634 } else { 3635 // We want to avoid creating an unnecessary temporary+copy here; 3636 // however, we need one in three cases: 3637 // 1. If the argument is not byval, and we are required to copy the 3638 // source. (This case doesn't occur on any common architecture.) 3639 // 2. If the argument is byval, RV is not sufficiently aligned, and 3640 // we cannot force it to be sufficiently aligned. 3641 // 3. If the argument is byval, but RV is located in an address space 3642 // different than that of the argument (0). 3643 Address Addr = RV.getAggregateAddress(); 3644 CharUnits Align = ArgInfo.getIndirectAlign(); 3645 const llvm::DataLayout *TD = &CGM.getDataLayout(); 3646 const unsigned RVAddrSpace = Addr.getType()->getAddressSpace(); 3647 const unsigned ArgAddrSpace = 3648 (FirstIRArg < IRFuncTy->getNumParams() 3649 ? IRFuncTy->getParamType(FirstIRArg)->getPointerAddressSpace() 3650 : 0); 3651 if ((!ArgInfo.getIndirectByVal() && I->NeedsCopy) || 3652 (ArgInfo.getIndirectByVal() && Addr.getAlignment() < Align && 3653 llvm::getOrEnforceKnownAlignment(Addr.getPointer(), 3654 Align.getQuantity(), *TD) 3655 < Align.getQuantity()) || 3656 (ArgInfo.getIndirectByVal() && (RVAddrSpace != ArgAddrSpace))) { 3657 // Create an aligned temporary, and copy to it. 3658 Address AI = CreateMemTemp(I->Ty, ArgInfo.getIndirectAlign()); 3659 IRCallArgs[FirstIRArg] = AI.getPointer(); 3660 EmitAggregateCopy(AI, Addr, I->Ty, RV.isVolatileQualified()); 3661 } else { 3662 // Skip the extra memcpy call. 3663 IRCallArgs[FirstIRArg] = Addr.getPointer(); 3664 } 3665 } 3666 break; 3667 } 3668 3669 case ABIArgInfo::Ignore: 3670 assert(NumIRArgs == 0); 3671 break; 3672 3673 case ABIArgInfo::Extend: 3674 case ABIArgInfo::Direct: { 3675 if (!isa<llvm::StructType>(ArgInfo.getCoerceToType()) && 3676 ArgInfo.getCoerceToType() == ConvertType(info_it->type) && 3677 ArgInfo.getDirectOffset() == 0) { 3678 assert(NumIRArgs == 1); 3679 llvm::Value *V; 3680 if (RV.isScalar()) 3681 V = RV.getScalarVal(); 3682 else 3683 V = Builder.CreateLoad(RV.getAggregateAddress()); 3684 3685 // Implement swifterror by copying into a new swifterror argument. 3686 // We'll write back in the normal path out of the call. 3687 if (CallInfo.getExtParameterInfo(ArgNo).getABI() 3688 == ParameterABI::SwiftErrorResult) { 3689 assert(!swiftErrorTemp.isValid() && "multiple swifterror args"); 3690 3691 QualType pointeeTy = I->Ty->getPointeeType(); 3692 swiftErrorArg = 3693 Address(V, getContext().getTypeAlignInChars(pointeeTy)); 3694 3695 swiftErrorTemp = 3696 CreateMemTemp(pointeeTy, getPointerAlign(), "swifterror.temp"); 3697 V = swiftErrorTemp.getPointer(); 3698 cast<llvm::AllocaInst>(V)->setSwiftError(true); 3699 3700 llvm::Value *errorValue = Builder.CreateLoad(swiftErrorArg); 3701 Builder.CreateStore(errorValue, swiftErrorTemp); 3702 } 3703 3704 // We might have to widen integers, but we should never truncate. 3705 if (ArgInfo.getCoerceToType() != V->getType() && 3706 V->getType()->isIntegerTy()) 3707 V = Builder.CreateZExt(V, ArgInfo.getCoerceToType()); 3708 3709 // If the argument doesn't match, perform a bitcast to coerce it. This 3710 // can happen due to trivial type mismatches. 3711 if (FirstIRArg < IRFuncTy->getNumParams() && 3712 V->getType() != IRFuncTy->getParamType(FirstIRArg)) 3713 V = Builder.CreateBitCast(V, IRFuncTy->getParamType(FirstIRArg)); 3714 3715 IRCallArgs[FirstIRArg] = V; 3716 break; 3717 } 3718 3719 // FIXME: Avoid the conversion through memory if possible. 3720 Address Src = Address::invalid(); 3721 if (RV.isScalar() || RV.isComplex()) { 3722 Src = CreateMemTemp(I->Ty, "coerce"); 3723 LValue SrcLV = MakeAddrLValue(Src, I->Ty); 3724 EmitInitStoreOfNonAggregate(*this, RV, SrcLV); 3725 } else { 3726 Src = RV.getAggregateAddress(); 3727 } 3728 3729 // If the value is offset in memory, apply the offset now. 3730 Src = emitAddressAtOffset(*this, Src, ArgInfo); 3731 3732 // Fast-isel and the optimizer generally like scalar values better than 3733 // FCAs, so we flatten them if this is safe to do for this argument. 3734 llvm::StructType *STy = 3735 dyn_cast<llvm::StructType>(ArgInfo.getCoerceToType()); 3736 if (STy && ArgInfo.isDirect() && ArgInfo.getCanBeFlattened()) { 3737 llvm::Type *SrcTy = Src.getType()->getElementType(); 3738 uint64_t SrcSize = CGM.getDataLayout().getTypeAllocSize(SrcTy); 3739 uint64_t DstSize = CGM.getDataLayout().getTypeAllocSize(STy); 3740 3741 // If the source type is smaller than the destination type of the 3742 // coerce-to logic, copy the source value into a temp alloca the size 3743 // of the destination type to allow loading all of it. The bits past 3744 // the source value are left undef. 3745 if (SrcSize < DstSize) { 3746 Address TempAlloca 3747 = CreateTempAlloca(STy, Src.getAlignment(), 3748 Src.getName() + ".coerce"); 3749 Builder.CreateMemCpy(TempAlloca, Src, SrcSize); 3750 Src = TempAlloca; 3751 } else { 3752 Src = Builder.CreateBitCast(Src, llvm::PointerType::getUnqual(STy)); 3753 } 3754 3755 auto SrcLayout = CGM.getDataLayout().getStructLayout(STy); 3756 assert(NumIRArgs == STy->getNumElements()); 3757 for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) { 3758 auto Offset = CharUnits::fromQuantity(SrcLayout->getElementOffset(i)); 3759 Address EltPtr = Builder.CreateStructGEP(Src, i, Offset); 3760 llvm::Value *LI = Builder.CreateLoad(EltPtr); 3761 IRCallArgs[FirstIRArg + i] = LI; 3762 } 3763 } else { 3764 // In the simple case, just pass the coerced loaded value. 3765 assert(NumIRArgs == 1); 3766 IRCallArgs[FirstIRArg] = 3767 CreateCoercedLoad(Src, ArgInfo.getCoerceToType(), *this); 3768 } 3769 3770 break; 3771 } 3772 3773 case ABIArgInfo::CoerceAndExpand: { 3774 auto coercionType = ArgInfo.getCoerceAndExpandType(); 3775 auto layout = CGM.getDataLayout().getStructLayout(coercionType); 3776 3777 llvm::Value *tempSize = nullptr; 3778 Address addr = Address::invalid(); 3779 if (RV.isAggregate()) { 3780 addr = RV.getAggregateAddress(); 3781 } else { 3782 assert(RV.isScalar()); // complex should always just be direct 3783 3784 llvm::Type *scalarType = RV.getScalarVal()->getType(); 3785 auto scalarSize = CGM.getDataLayout().getTypeAllocSize(scalarType); 3786 auto scalarAlign = CGM.getDataLayout().getPrefTypeAlignment(scalarType); 3787 3788 tempSize = llvm::ConstantInt::get(CGM.Int64Ty, scalarSize); 3789 3790 // Materialize to a temporary. 3791 addr = CreateTempAlloca(RV.getScalarVal()->getType(), 3792 CharUnits::fromQuantity(std::max(layout->getAlignment(), 3793 scalarAlign))); 3794 EmitLifetimeStart(scalarSize, addr.getPointer()); 3795 3796 Builder.CreateStore(RV.getScalarVal(), addr); 3797 } 3798 3799 addr = Builder.CreateElementBitCast(addr, coercionType); 3800 3801 unsigned IRArgPos = FirstIRArg; 3802 for (unsigned i = 0, e = coercionType->getNumElements(); i != e; ++i) { 3803 llvm::Type *eltType = coercionType->getElementType(i); 3804 if (ABIArgInfo::isPaddingForCoerceAndExpand(eltType)) continue; 3805 Address eltAddr = Builder.CreateStructGEP(addr, i, layout); 3806 llvm::Value *elt = Builder.CreateLoad(eltAddr); 3807 IRCallArgs[IRArgPos++] = elt; 3808 } 3809 assert(IRArgPos == FirstIRArg + NumIRArgs); 3810 3811 if (tempSize) { 3812 EmitLifetimeEnd(tempSize, addr.getPointer()); 3813 } 3814 3815 break; 3816 } 3817 3818 case ABIArgInfo::Expand: 3819 unsigned IRArgPos = FirstIRArg; 3820 ExpandTypeToArgs(I->Ty, RV, IRFuncTy, IRCallArgs, IRArgPos); 3821 assert(IRArgPos == FirstIRArg + NumIRArgs); 3822 break; 3823 } 3824 } 3825 3826 if (ArgMemory.isValid()) { 3827 llvm::Value *Arg = ArgMemory.getPointer(); 3828 if (CallInfo.isVariadic()) { 3829 // When passing non-POD arguments by value to variadic functions, we will 3830 // end up with a variadic prototype and an inalloca call site. In such 3831 // cases, we can't do any parameter mismatch checks. Give up and bitcast 3832 // the callee. 3833 unsigned CalleeAS = 3834 cast<llvm::PointerType>(Callee->getType())->getAddressSpace(); 3835 Callee = Builder.CreateBitCast( 3836 Callee, getTypes().GetFunctionType(CallInfo)->getPointerTo(CalleeAS)); 3837 } else { 3838 llvm::Type *LastParamTy = 3839 IRFuncTy->getParamType(IRFuncTy->getNumParams() - 1); 3840 if (Arg->getType() != LastParamTy) { 3841 #ifndef NDEBUG 3842 // Assert that these structs have equivalent element types. 3843 llvm::StructType *FullTy = CallInfo.getArgStruct(); 3844 llvm::StructType *DeclaredTy = cast<llvm::StructType>( 3845 cast<llvm::PointerType>(LastParamTy)->getElementType()); 3846 assert(DeclaredTy->getNumElements() == FullTy->getNumElements()); 3847 for (llvm::StructType::element_iterator DI = DeclaredTy->element_begin(), 3848 DE = DeclaredTy->element_end(), 3849 FI = FullTy->element_begin(); 3850 DI != DE; ++DI, ++FI) 3851 assert(*DI == *FI); 3852 #endif 3853 Arg = Builder.CreateBitCast(Arg, LastParamTy); 3854 } 3855 } 3856 assert(IRFunctionArgs.hasInallocaArg()); 3857 IRCallArgs[IRFunctionArgs.getInallocaArgNo()] = Arg; 3858 } 3859 3860 if (!CallArgs.getCleanupsToDeactivate().empty()) 3861 deactivateArgCleanupsBeforeCall(*this, CallArgs); 3862 3863 // If the callee is a bitcast of a function to a varargs pointer to function 3864 // type, check to see if we can remove the bitcast. This handles some cases 3865 // with unprototyped functions. 3866 if (llvm::ConstantExpr *CE = dyn_cast<llvm::ConstantExpr>(Callee)) 3867 if (llvm::Function *CalleeF = dyn_cast<llvm::Function>(CE->getOperand(0))) { 3868 llvm::PointerType *CurPT=cast<llvm::PointerType>(Callee->getType()); 3869 llvm::FunctionType *CurFT = 3870 cast<llvm::FunctionType>(CurPT->getElementType()); 3871 llvm::FunctionType *ActualFT = CalleeF->getFunctionType(); 3872 3873 if (CE->getOpcode() == llvm::Instruction::BitCast && 3874 ActualFT->getReturnType() == CurFT->getReturnType() && 3875 ActualFT->getNumParams() == CurFT->getNumParams() && 3876 ActualFT->getNumParams() == IRCallArgs.size() && 3877 (CurFT->isVarArg() || !ActualFT->isVarArg())) { 3878 bool ArgsMatch = true; 3879 for (unsigned i = 0, e = ActualFT->getNumParams(); i != e; ++i) 3880 if (ActualFT->getParamType(i) != CurFT->getParamType(i)) { 3881 ArgsMatch = false; 3882 break; 3883 } 3884 3885 // Strip the cast if we can get away with it. This is a nice cleanup, 3886 // but also allows us to inline the function at -O0 if it is marked 3887 // always_inline. 3888 if (ArgsMatch) 3889 Callee = CalleeF; 3890 } 3891 } 3892 3893 assert(IRCallArgs.size() == IRFuncTy->getNumParams() || IRFuncTy->isVarArg()); 3894 for (unsigned i = 0; i < IRCallArgs.size(); ++i) { 3895 // Inalloca argument can have different type. 3896 if (IRFunctionArgs.hasInallocaArg() && 3897 i == IRFunctionArgs.getInallocaArgNo()) 3898 continue; 3899 if (i < IRFuncTy->getNumParams()) 3900 assert(IRCallArgs[i]->getType() == IRFuncTy->getParamType(i)); 3901 } 3902 3903 unsigned CallingConv; 3904 CodeGen::AttributeListType AttributeList; 3905 CGM.ConstructAttributeList(Callee->getName(), CallInfo, CalleeInfo, 3906 AttributeList, CallingConv, 3907 /*AttrOnCallSite=*/true); 3908 llvm::AttributeSet Attrs = llvm::AttributeSet::get(getLLVMContext(), 3909 AttributeList); 3910 3911 bool CannotThrow; 3912 if (currentFunctionUsesSEHTry()) { 3913 // SEH cares about asynchronous exceptions, everything can "throw." 3914 CannotThrow = false; 3915 } else if (isCleanupPadScope() && 3916 EHPersonality::get(*this).isMSVCXXPersonality()) { 3917 // The MSVC++ personality will implicitly terminate the program if an 3918 // exception is thrown. An unwind edge cannot be reached. 3919 CannotThrow = true; 3920 } else { 3921 // Otherwise, nowunind callsites will never throw. 3922 CannotThrow = Attrs.hasAttribute(llvm::AttributeSet::FunctionIndex, 3923 llvm::Attribute::NoUnwind); 3924 } 3925 llvm::BasicBlock *InvokeDest = CannotThrow ? nullptr : getInvokeDest(); 3926 3927 SmallVector<llvm::OperandBundleDef, 1> BundleList; 3928 getBundlesForFunclet(Callee, CurrentFuncletPad, BundleList); 3929 3930 llvm::CallSite CS; 3931 if (!InvokeDest) { 3932 CS = Builder.CreateCall(Callee, IRCallArgs, BundleList); 3933 } else { 3934 llvm::BasicBlock *Cont = createBasicBlock("invoke.cont"); 3935 CS = Builder.CreateInvoke(Callee, Cont, InvokeDest, IRCallArgs, 3936 BundleList); 3937 EmitBlock(Cont); 3938 } 3939 if (callOrInvoke) 3940 *callOrInvoke = CS.getInstruction(); 3941 3942 if (CurCodeDecl && CurCodeDecl->hasAttr<FlattenAttr>() && 3943 !CS.hasFnAttr(llvm::Attribute::NoInline)) 3944 Attrs = 3945 Attrs.addAttribute(getLLVMContext(), llvm::AttributeSet::FunctionIndex, 3946 llvm::Attribute::AlwaysInline); 3947 3948 // Disable inlining inside SEH __try blocks. 3949 if (isSEHTryScope()) 3950 Attrs = 3951 Attrs.addAttribute(getLLVMContext(), llvm::AttributeSet::FunctionIndex, 3952 llvm::Attribute::NoInline); 3953 3954 CS.setAttributes(Attrs); 3955 CS.setCallingConv(static_cast<llvm::CallingConv::ID>(CallingConv)); 3956 3957 // Insert instrumentation or attach profile metadata at indirect call sites. 3958 // For more details, see the comment before the definition of 3959 // IPVK_IndirectCallTarget in InstrProfData.inc. 3960 if (!CS.getCalledFunction()) 3961 PGO.valueProfile(Builder, llvm::IPVK_IndirectCallTarget, 3962 CS.getInstruction(), Callee); 3963 3964 // In ObjC ARC mode with no ObjC ARC exception safety, tell the ARC 3965 // optimizer it can aggressively ignore unwind edges. 3966 if (CGM.getLangOpts().ObjCAutoRefCount) 3967 AddObjCARCExceptionMetadata(CS.getInstruction()); 3968 3969 // If the call doesn't return, finish the basic block and clear the 3970 // insertion point; this allows the rest of IRgen to discard 3971 // unreachable code. 3972 if (CS.doesNotReturn()) { 3973 if (UnusedReturnSize) 3974 EmitLifetimeEnd(llvm::ConstantInt::get(Int64Ty, UnusedReturnSize), 3975 SRetPtr.getPointer()); 3976 3977 Builder.CreateUnreachable(); 3978 Builder.ClearInsertionPoint(); 3979 3980 // FIXME: For now, emit a dummy basic block because expr emitters in 3981 // generally are not ready to handle emitting expressions at unreachable 3982 // points. 3983 EnsureInsertPoint(); 3984 3985 // Return a reasonable RValue. 3986 return GetUndefRValue(RetTy); 3987 } 3988 3989 llvm::Instruction *CI = CS.getInstruction(); 3990 if (!CI->getType()->isVoidTy()) 3991 CI->setName("call"); 3992 3993 // Perform the swifterror writeback. 3994 if (swiftErrorTemp.isValid()) { 3995 llvm::Value *errorResult = Builder.CreateLoad(swiftErrorTemp); 3996 Builder.CreateStore(errorResult, swiftErrorArg); 3997 } 3998 3999 // Emit any writebacks immediately. Arguably this should happen 4000 // after any return-value munging. 4001 if (CallArgs.hasWritebacks()) 4002 emitWritebacks(*this, CallArgs); 4003 4004 // The stack cleanup for inalloca arguments has to run out of the normal 4005 // lexical order, so deactivate it and run it manually here. 4006 CallArgs.freeArgumentMemory(*this); 4007 4008 if (llvm::CallInst *Call = dyn_cast<llvm::CallInst>(CI)) { 4009 const Decl *TargetDecl = CalleeInfo.getCalleeDecl(); 4010 if (TargetDecl && TargetDecl->hasAttr<NotTailCalledAttr>()) 4011 Call->setTailCallKind(llvm::CallInst::TCK_NoTail); 4012 } 4013 4014 RValue Ret = [&] { 4015 switch (RetAI.getKind()) { 4016 case ABIArgInfo::CoerceAndExpand: { 4017 auto coercionType = RetAI.getCoerceAndExpandType(); 4018 auto layout = CGM.getDataLayout().getStructLayout(coercionType); 4019 4020 Address addr = SRetPtr; 4021 addr = Builder.CreateElementBitCast(addr, coercionType); 4022 4023 assert(CI->getType() == RetAI.getUnpaddedCoerceAndExpandType()); 4024 bool requiresExtract = isa<llvm::StructType>(CI->getType()); 4025 4026 unsigned unpaddedIndex = 0; 4027 for (unsigned i = 0, e = coercionType->getNumElements(); i != e; ++i) { 4028 llvm::Type *eltType = coercionType->getElementType(i); 4029 if (ABIArgInfo::isPaddingForCoerceAndExpand(eltType)) continue; 4030 Address eltAddr = Builder.CreateStructGEP(addr, i, layout); 4031 llvm::Value *elt = CI; 4032 if (requiresExtract) 4033 elt = Builder.CreateExtractValue(elt, unpaddedIndex++); 4034 else 4035 assert(unpaddedIndex == 0); 4036 Builder.CreateStore(elt, eltAddr); 4037 } 4038 // FALLTHROUGH 4039 } 4040 4041 case ABIArgInfo::InAlloca: 4042 case ABIArgInfo::Indirect: { 4043 RValue ret = convertTempToRValue(SRetPtr, RetTy, SourceLocation()); 4044 if (UnusedReturnSize) 4045 EmitLifetimeEnd(llvm::ConstantInt::get(Int64Ty, UnusedReturnSize), 4046 SRetPtr.getPointer()); 4047 return ret; 4048 } 4049 4050 case ABIArgInfo::Ignore: 4051 // If we are ignoring an argument that had a result, make sure to 4052 // construct the appropriate return value for our caller. 4053 return GetUndefRValue(RetTy); 4054 4055 case ABIArgInfo::Extend: 4056 case ABIArgInfo::Direct: { 4057 llvm::Type *RetIRTy = ConvertType(RetTy); 4058 if (RetAI.getCoerceToType() == RetIRTy && RetAI.getDirectOffset() == 0) { 4059 switch (getEvaluationKind(RetTy)) { 4060 case TEK_Complex: { 4061 llvm::Value *Real = Builder.CreateExtractValue(CI, 0); 4062 llvm::Value *Imag = Builder.CreateExtractValue(CI, 1); 4063 return RValue::getComplex(std::make_pair(Real, Imag)); 4064 } 4065 case TEK_Aggregate: { 4066 Address DestPtr = ReturnValue.getValue(); 4067 bool DestIsVolatile = ReturnValue.isVolatile(); 4068 4069 if (!DestPtr.isValid()) { 4070 DestPtr = CreateMemTemp(RetTy, "agg.tmp"); 4071 DestIsVolatile = false; 4072 } 4073 BuildAggStore(*this, CI, DestPtr, DestIsVolatile); 4074 return RValue::getAggregate(DestPtr); 4075 } 4076 case TEK_Scalar: { 4077 // If the argument doesn't match, perform a bitcast to coerce it. This 4078 // can happen due to trivial type mismatches. 4079 llvm::Value *V = CI; 4080 if (V->getType() != RetIRTy) 4081 V = Builder.CreateBitCast(V, RetIRTy); 4082 return RValue::get(V); 4083 } 4084 } 4085 llvm_unreachable("bad evaluation kind"); 4086 } 4087 4088 Address DestPtr = ReturnValue.getValue(); 4089 bool DestIsVolatile = ReturnValue.isVolatile(); 4090 4091 if (!DestPtr.isValid()) { 4092 DestPtr = CreateMemTemp(RetTy, "coerce"); 4093 DestIsVolatile = false; 4094 } 4095 4096 // If the value is offset in memory, apply the offset now. 4097 Address StorePtr = emitAddressAtOffset(*this, DestPtr, RetAI); 4098 CreateCoercedStore(CI, StorePtr, DestIsVolatile, *this); 4099 4100 return convertTempToRValue(DestPtr, RetTy, SourceLocation()); 4101 } 4102 4103 case ABIArgInfo::Expand: 4104 llvm_unreachable("Invalid ABI kind for return argument"); 4105 } 4106 4107 llvm_unreachable("Unhandled ABIArgInfo::Kind"); 4108 } (); 4109 4110 const Decl *TargetDecl = CalleeInfo.getCalleeDecl(); 4111 4112 if (Ret.isScalar() && TargetDecl) { 4113 if (const auto *AA = TargetDecl->getAttr<AssumeAlignedAttr>()) { 4114 llvm::Value *OffsetValue = nullptr; 4115 if (const auto *Offset = AA->getOffset()) 4116 OffsetValue = EmitScalarExpr(Offset); 4117 4118 llvm::Value *Alignment = EmitScalarExpr(AA->getAlignment()); 4119 llvm::ConstantInt *AlignmentCI = cast<llvm::ConstantInt>(Alignment); 4120 EmitAlignmentAssumption(Ret.getScalarVal(), AlignmentCI->getZExtValue(), 4121 OffsetValue); 4122 } 4123 } 4124 4125 return Ret; 4126 } 4127 4128 /* VarArg handling */ 4129 4130 Address CodeGenFunction::EmitVAArg(VAArgExpr *VE, Address &VAListAddr) { 4131 VAListAddr = VE->isMicrosoftABI() 4132 ? EmitMSVAListRef(VE->getSubExpr()) 4133 : EmitVAListRef(VE->getSubExpr()); 4134 QualType Ty = VE->getType(); 4135 if (VE->isMicrosoftABI()) 4136 return CGM.getTypes().getABIInfo().EmitMSVAArg(*this, VAListAddr, Ty); 4137 return CGM.getTypes().getABIInfo().EmitVAArg(*this, VAListAddr, Ty); 4138 } 4139