1 //===--- CGCall.cpp - Encapsulate calling convention details --------------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // These classes wrap the information about a call or function 11 // definition used to handle ABI compliancy. 12 // 13 //===----------------------------------------------------------------------===// 14 15 #include "CGCall.h" 16 #include "ABIInfo.h" 17 #include "CGBlocks.h" 18 #include "CGCXXABI.h" 19 #include "CGCleanup.h" 20 #include "CodeGenFunction.h" 21 #include "CodeGenModule.h" 22 #include "TargetInfo.h" 23 #include "clang/AST/Decl.h" 24 #include "clang/AST/DeclCXX.h" 25 #include "clang/AST/DeclObjC.h" 26 #include "clang/Basic/TargetBuiltins.h" 27 #include "clang/Basic/TargetInfo.h" 28 #include "clang/CodeGen/CGFunctionInfo.h" 29 #include "clang/Frontend/CodeGenOptions.h" 30 #include "llvm/ADT/StringExtras.h" 31 #include "llvm/IR/Attributes.h" 32 #include "llvm/IR/CallSite.h" 33 #include "llvm/IR/DataLayout.h" 34 #include "llvm/IR/InlineAsm.h" 35 #include "llvm/IR/Intrinsics.h" 36 #include "llvm/IR/IntrinsicInst.h" 37 #include "llvm/Transforms/Utils/Local.h" 38 using namespace clang; 39 using namespace CodeGen; 40 41 /***/ 42 43 static unsigned ClangCallConvToLLVMCallConv(CallingConv CC) { 44 switch (CC) { 45 default: return llvm::CallingConv::C; 46 case CC_X86StdCall: return llvm::CallingConv::X86_StdCall; 47 case CC_X86FastCall: return llvm::CallingConv::X86_FastCall; 48 case CC_X86ThisCall: return llvm::CallingConv::X86_ThisCall; 49 case CC_X86_64Win64: return llvm::CallingConv::X86_64_Win64; 50 case CC_X86_64SysV: return llvm::CallingConv::X86_64_SysV; 51 case CC_AAPCS: return llvm::CallingConv::ARM_AAPCS; 52 case CC_AAPCS_VFP: return llvm::CallingConv::ARM_AAPCS_VFP; 53 case CC_IntelOclBicc: return llvm::CallingConv::Intel_OCL_BI; 54 // TODO: Add support for __pascal to LLVM. 55 case CC_X86Pascal: return llvm::CallingConv::C; 56 // TODO: Add support for __vectorcall to LLVM. 57 case CC_X86VectorCall: return llvm::CallingConv::X86_VectorCall; 58 case CC_SpirFunction: return llvm::CallingConv::SPIR_FUNC; 59 case CC_SpirKernel: return llvm::CallingConv::SPIR_KERNEL; 60 } 61 } 62 63 /// Derives the 'this' type for codegen purposes, i.e. ignoring method 64 /// qualification. 65 /// FIXME: address space qualification? 66 static CanQualType GetThisType(ASTContext &Context, const CXXRecordDecl *RD) { 67 QualType RecTy = Context.getTagDeclType(RD)->getCanonicalTypeInternal(); 68 return Context.getPointerType(CanQualType::CreateUnsafe(RecTy)); 69 } 70 71 /// Returns the canonical formal type of the given C++ method. 72 static CanQual<FunctionProtoType> GetFormalType(const CXXMethodDecl *MD) { 73 return MD->getType()->getCanonicalTypeUnqualified() 74 .getAs<FunctionProtoType>(); 75 } 76 77 /// Returns the "extra-canonicalized" return type, which discards 78 /// qualifiers on the return type. Codegen doesn't care about them, 79 /// and it makes ABI code a little easier to be able to assume that 80 /// all parameter and return types are top-level unqualified. 81 static CanQualType GetReturnType(QualType RetTy) { 82 return RetTy->getCanonicalTypeUnqualified().getUnqualifiedType(); 83 } 84 85 /// Arrange the argument and result information for a value of the given 86 /// unprototyped freestanding function type. 87 const CGFunctionInfo & 88 CodeGenTypes::arrangeFreeFunctionType(CanQual<FunctionNoProtoType> FTNP) { 89 // When translating an unprototyped function type, always use a 90 // variadic type. 91 return arrangeLLVMFunctionInfo(FTNP->getReturnType().getUnqualifiedType(), 92 /*instanceMethod=*/false, 93 /*chainCall=*/false, None, 94 FTNP->getExtInfo(), {}, RequiredArgs(0)); 95 } 96 97 /// Adds the formal paramaters in FPT to the given prefix. If any parameter in 98 /// FPT has pass_object_size attrs, then we'll add parameters for those, too. 99 static void appendParameterTypes(const CodeGenTypes &CGT, 100 SmallVectorImpl<CanQualType> &prefix, 101 SmallVectorImpl<FunctionProtoType::ExtParameterInfo> ¶mInfos, 102 CanQual<FunctionProtoType> FPT, 103 const FunctionDecl *FD) { 104 // Fill out paramInfos. 105 if (FPT->hasExtParameterInfos() || !paramInfos.empty()) { 106 assert(paramInfos.size() <= prefix.size()); 107 auto protoParamInfos = FPT->getExtParameterInfos(); 108 paramInfos.reserve(prefix.size() + protoParamInfos.size()); 109 paramInfos.resize(prefix.size()); 110 paramInfos.append(paramInfos.begin(), paramInfos.end()); 111 } 112 113 // Fast path: unknown target. 114 if (FD == nullptr) { 115 prefix.append(FPT->param_type_begin(), FPT->param_type_end()); 116 return; 117 } 118 119 // In the vast majority cases, we'll have precisely FPT->getNumParams() 120 // parameters; the only thing that can change this is the presence of 121 // pass_object_size. So, we preallocate for the common case. 122 prefix.reserve(prefix.size() + FPT->getNumParams()); 123 124 assert(FD->getNumParams() == FPT->getNumParams()); 125 for (unsigned I = 0, E = FPT->getNumParams(); I != E; ++I) { 126 prefix.push_back(FPT->getParamType(I)); 127 if (FD->getParamDecl(I)->hasAttr<PassObjectSizeAttr>()) 128 prefix.push_back(CGT.getContext().getSizeType()); 129 } 130 } 131 132 /// Arrange the LLVM function layout for a value of the given function 133 /// type, on top of any implicit parameters already stored. 134 static const CGFunctionInfo & 135 arrangeLLVMFunctionInfo(CodeGenTypes &CGT, bool instanceMethod, 136 SmallVectorImpl<CanQualType> &prefix, 137 CanQual<FunctionProtoType> FTP, 138 const FunctionDecl *FD) { 139 SmallVector<FunctionProtoType::ExtParameterInfo, 16> paramInfos; 140 RequiredArgs required = RequiredArgs::forPrototypePlus(FTP, prefix.size()); 141 // FIXME: Kill copy. 142 appendParameterTypes(CGT, prefix, paramInfos, FTP, FD); 143 CanQualType resultType = FTP->getReturnType().getUnqualifiedType(); 144 145 return CGT.arrangeLLVMFunctionInfo(resultType, instanceMethod, 146 /*chainCall=*/false, prefix, 147 FTP->getExtInfo(), paramInfos, 148 required); 149 } 150 151 /// Arrange the argument and result information for a value of the 152 /// given freestanding function type. 153 const CGFunctionInfo & 154 CodeGenTypes::arrangeFreeFunctionType(CanQual<FunctionProtoType> FTP, 155 const FunctionDecl *FD) { 156 SmallVector<CanQualType, 16> argTypes; 157 return ::arrangeLLVMFunctionInfo(*this, /*instanceMethod=*/false, argTypes, 158 FTP, FD); 159 } 160 161 static CallingConv getCallingConventionForDecl(const Decl *D, bool IsWindows) { 162 // Set the appropriate calling convention for the Function. 163 if (D->hasAttr<StdCallAttr>()) 164 return CC_X86StdCall; 165 166 if (D->hasAttr<FastCallAttr>()) 167 return CC_X86FastCall; 168 169 if (D->hasAttr<ThisCallAttr>()) 170 return CC_X86ThisCall; 171 172 if (D->hasAttr<VectorCallAttr>()) 173 return CC_X86VectorCall; 174 175 if (D->hasAttr<PascalAttr>()) 176 return CC_X86Pascal; 177 178 if (PcsAttr *PCS = D->getAttr<PcsAttr>()) 179 return (PCS->getPCS() == PcsAttr::AAPCS ? CC_AAPCS : CC_AAPCS_VFP); 180 181 if (D->hasAttr<IntelOclBiccAttr>()) 182 return CC_IntelOclBicc; 183 184 if (D->hasAttr<MSABIAttr>()) 185 return IsWindows ? CC_C : CC_X86_64Win64; 186 187 if (D->hasAttr<SysVABIAttr>()) 188 return IsWindows ? CC_X86_64SysV : CC_C; 189 190 return CC_C; 191 } 192 193 /// Arrange the argument and result information for a call to an 194 /// unknown C++ non-static member function of the given abstract type. 195 /// (Zero value of RD means we don't have any meaningful "this" argument type, 196 /// so fall back to a generic pointer type). 197 /// The member function must be an ordinary function, i.e. not a 198 /// constructor or destructor. 199 const CGFunctionInfo & 200 CodeGenTypes::arrangeCXXMethodType(const CXXRecordDecl *RD, 201 const FunctionProtoType *FTP, 202 const CXXMethodDecl *MD) { 203 SmallVector<CanQualType, 16> argTypes; 204 205 // Add the 'this' pointer. 206 if (RD) 207 argTypes.push_back(GetThisType(Context, RD)); 208 else 209 argTypes.push_back(Context.VoidPtrTy); 210 211 return ::arrangeLLVMFunctionInfo( 212 *this, true, argTypes, 213 FTP->getCanonicalTypeUnqualified().getAs<FunctionProtoType>(), MD); 214 } 215 216 /// Arrange the argument and result information for a declaration or 217 /// definition of the given C++ non-static member function. The 218 /// member function must be an ordinary function, i.e. not a 219 /// constructor or destructor. 220 const CGFunctionInfo & 221 CodeGenTypes::arrangeCXXMethodDeclaration(const CXXMethodDecl *MD) { 222 assert(!isa<CXXConstructorDecl>(MD) && "wrong method for constructors!"); 223 assert(!isa<CXXDestructorDecl>(MD) && "wrong method for destructors!"); 224 225 CanQual<FunctionProtoType> prototype = GetFormalType(MD); 226 227 if (MD->isInstance()) { 228 // The abstract case is perfectly fine. 229 const CXXRecordDecl *ThisType = TheCXXABI.getThisArgumentTypeForMethod(MD); 230 return arrangeCXXMethodType(ThisType, prototype.getTypePtr(), MD); 231 } 232 233 return arrangeFreeFunctionType(prototype, MD); 234 } 235 236 const CGFunctionInfo & 237 CodeGenTypes::arrangeCXXStructorDeclaration(const CXXMethodDecl *MD, 238 StructorType Type) { 239 240 SmallVector<CanQualType, 16> argTypes; 241 SmallVector<FunctionProtoType::ExtParameterInfo, 16> paramInfos; 242 argTypes.push_back(GetThisType(Context, MD->getParent())); 243 244 GlobalDecl GD; 245 if (auto *CD = dyn_cast<CXXConstructorDecl>(MD)) { 246 GD = GlobalDecl(CD, toCXXCtorType(Type)); 247 } else { 248 auto *DD = dyn_cast<CXXDestructorDecl>(MD); 249 GD = GlobalDecl(DD, toCXXDtorType(Type)); 250 } 251 252 CanQual<FunctionProtoType> FTP = GetFormalType(MD); 253 254 // Add the formal parameters. 255 appendParameterTypes(*this, argTypes, paramInfos, FTP, MD); 256 257 TheCXXABI.buildStructorSignature(MD, Type, argTypes); 258 259 RequiredArgs required = 260 (MD->isVariadic() ? RequiredArgs(argTypes.size()) : RequiredArgs::All); 261 262 FunctionType::ExtInfo extInfo = FTP->getExtInfo(); 263 CanQualType resultType = TheCXXABI.HasThisReturn(GD) 264 ? argTypes.front() 265 : TheCXXABI.hasMostDerivedReturn(GD) 266 ? CGM.getContext().VoidPtrTy 267 : Context.VoidTy; 268 return arrangeLLVMFunctionInfo(resultType, /*instanceMethod=*/true, 269 /*chainCall=*/false, argTypes, extInfo, 270 paramInfos, required); 271 } 272 273 static SmallVector<CanQualType, 16> 274 getArgTypesForCall(ASTContext &ctx, const CallArgList &args) { 275 SmallVector<CanQualType, 16> argTypes; 276 for (auto &arg : args) 277 argTypes.push_back(ctx.getCanonicalParamType(arg.Ty)); 278 return argTypes; 279 } 280 281 static SmallVector<CanQualType, 16> 282 getArgTypesForDeclaration(ASTContext &ctx, const FunctionArgList &args) { 283 SmallVector<CanQualType, 16> argTypes; 284 for (auto &arg : args) 285 argTypes.push_back(ctx.getCanonicalParamType(arg->getType())); 286 return argTypes; 287 } 288 289 static void addExtParameterInfosForCall( 290 llvm::SmallVectorImpl<FunctionProtoType::ExtParameterInfo> ¶mInfos, 291 const FunctionProtoType *proto, 292 unsigned prefixArgs, 293 unsigned totalArgs) { 294 assert(proto->hasExtParameterInfos()); 295 assert(paramInfos.size() <= prefixArgs); 296 assert(proto->getNumParams() + prefixArgs <= totalArgs); 297 298 // Add default infos for any prefix args that don't already have infos. 299 paramInfos.resize(prefixArgs); 300 301 // Add infos for the prototype. 302 auto protoInfos = proto->getExtParameterInfos(); 303 paramInfos.append(protoInfos.begin(), protoInfos.end()); 304 305 // Add default infos for the variadic arguments. 306 paramInfos.resize(totalArgs); 307 } 308 309 static llvm::SmallVector<FunctionProtoType::ExtParameterInfo, 16> 310 getExtParameterInfosForCall(const FunctionProtoType *proto, 311 unsigned prefixArgs, unsigned totalArgs) { 312 llvm::SmallVector<FunctionProtoType::ExtParameterInfo, 16> result; 313 if (proto->hasExtParameterInfos()) { 314 addExtParameterInfosForCall(result, proto, prefixArgs, totalArgs); 315 } 316 return result; 317 } 318 319 /// Arrange a call to a C++ method, passing the given arguments. 320 const CGFunctionInfo & 321 CodeGenTypes::arrangeCXXConstructorCall(const CallArgList &args, 322 const CXXConstructorDecl *D, 323 CXXCtorType CtorKind, 324 unsigned ExtraArgs) { 325 // FIXME: Kill copy. 326 SmallVector<CanQualType, 16> ArgTypes; 327 for (const auto &Arg : args) 328 ArgTypes.push_back(Context.getCanonicalParamType(Arg.Ty)); 329 330 CanQual<FunctionProtoType> FPT = GetFormalType(D); 331 RequiredArgs Required = RequiredArgs::forPrototypePlus(FPT, 1 + ExtraArgs); 332 GlobalDecl GD(D, CtorKind); 333 CanQualType ResultType = TheCXXABI.HasThisReturn(GD) 334 ? ArgTypes.front() 335 : TheCXXABI.hasMostDerivedReturn(GD) 336 ? CGM.getContext().VoidPtrTy 337 : Context.VoidTy; 338 339 FunctionType::ExtInfo Info = FPT->getExtInfo(); 340 auto ParamInfos = getExtParameterInfosForCall(FPT.getTypePtr(), 1 + ExtraArgs, 341 ArgTypes.size()); 342 return arrangeLLVMFunctionInfo(ResultType, /*instanceMethod=*/true, 343 /*chainCall=*/false, ArgTypes, Info, 344 ParamInfos, Required); 345 } 346 347 /// Arrange the argument and result information for the declaration or 348 /// definition of the given function. 349 const CGFunctionInfo & 350 CodeGenTypes::arrangeFunctionDeclaration(const FunctionDecl *FD) { 351 if (const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(FD)) 352 if (MD->isInstance()) 353 return arrangeCXXMethodDeclaration(MD); 354 355 CanQualType FTy = FD->getType()->getCanonicalTypeUnqualified(); 356 357 assert(isa<FunctionType>(FTy)); 358 359 // When declaring a function without a prototype, always use a 360 // non-variadic type. 361 if (isa<FunctionNoProtoType>(FTy)) { 362 CanQual<FunctionNoProtoType> noProto = FTy.getAs<FunctionNoProtoType>(); 363 return arrangeLLVMFunctionInfo( 364 noProto->getReturnType(), /*instanceMethod=*/false, 365 /*chainCall=*/false, None, noProto->getExtInfo(), {},RequiredArgs::All); 366 } 367 368 assert(isa<FunctionProtoType>(FTy)); 369 return arrangeFreeFunctionType(FTy.getAs<FunctionProtoType>(), FD); 370 } 371 372 /// Arrange the argument and result information for the declaration or 373 /// definition of an Objective-C method. 374 const CGFunctionInfo & 375 CodeGenTypes::arrangeObjCMethodDeclaration(const ObjCMethodDecl *MD) { 376 // It happens that this is the same as a call with no optional 377 // arguments, except also using the formal 'self' type. 378 return arrangeObjCMessageSendSignature(MD, MD->getSelfDecl()->getType()); 379 } 380 381 /// Arrange the argument and result information for the function type 382 /// through which to perform a send to the given Objective-C method, 383 /// using the given receiver type. The receiver type is not always 384 /// the 'self' type of the method or even an Objective-C pointer type. 385 /// This is *not* the right method for actually performing such a 386 /// message send, due to the possibility of optional arguments. 387 const CGFunctionInfo & 388 CodeGenTypes::arrangeObjCMessageSendSignature(const ObjCMethodDecl *MD, 389 QualType receiverType) { 390 SmallVector<CanQualType, 16> argTys; 391 argTys.push_back(Context.getCanonicalParamType(receiverType)); 392 argTys.push_back(Context.getCanonicalParamType(Context.getObjCSelType())); 393 // FIXME: Kill copy? 394 for (const auto *I : MD->params()) { 395 argTys.push_back(Context.getCanonicalParamType(I->getType())); 396 } 397 398 FunctionType::ExtInfo einfo; 399 bool IsWindows = getContext().getTargetInfo().getTriple().isOSWindows(); 400 einfo = einfo.withCallingConv(getCallingConventionForDecl(MD, IsWindows)); 401 402 if (getContext().getLangOpts().ObjCAutoRefCount && 403 MD->hasAttr<NSReturnsRetainedAttr>()) 404 einfo = einfo.withProducesResult(true); 405 406 RequiredArgs required = 407 (MD->isVariadic() ? RequiredArgs(argTys.size()) : RequiredArgs::All); 408 409 return arrangeLLVMFunctionInfo( 410 GetReturnType(MD->getReturnType()), /*instanceMethod=*/false, 411 /*chainCall=*/false, argTys, einfo, {}, required); 412 } 413 414 const CGFunctionInfo & 415 CodeGenTypes::arrangeUnprototypedObjCMessageSend(QualType returnType, 416 const CallArgList &args) { 417 auto argTypes = getArgTypesForCall(Context, args); 418 FunctionType::ExtInfo einfo; 419 420 return arrangeLLVMFunctionInfo( 421 GetReturnType(returnType), /*instanceMethod=*/false, 422 /*chainCall=*/false, argTypes, einfo, {}, RequiredArgs::All); 423 } 424 425 const CGFunctionInfo & 426 CodeGenTypes::arrangeGlobalDeclaration(GlobalDecl GD) { 427 // FIXME: Do we need to handle ObjCMethodDecl? 428 const FunctionDecl *FD = cast<FunctionDecl>(GD.getDecl()); 429 430 if (const CXXConstructorDecl *CD = dyn_cast<CXXConstructorDecl>(FD)) 431 return arrangeCXXStructorDeclaration(CD, getFromCtorType(GD.getCtorType())); 432 433 if (const CXXDestructorDecl *DD = dyn_cast<CXXDestructorDecl>(FD)) 434 return arrangeCXXStructorDeclaration(DD, getFromDtorType(GD.getDtorType())); 435 436 return arrangeFunctionDeclaration(FD); 437 } 438 439 /// Arrange a thunk that takes 'this' as the first parameter followed by 440 /// varargs. Return a void pointer, regardless of the actual return type. 441 /// The body of the thunk will end in a musttail call to a function of the 442 /// correct type, and the caller will bitcast the function to the correct 443 /// prototype. 444 const CGFunctionInfo & 445 CodeGenTypes::arrangeMSMemberPointerThunk(const CXXMethodDecl *MD) { 446 assert(MD->isVirtual() && "only virtual memptrs have thunks"); 447 CanQual<FunctionProtoType> FTP = GetFormalType(MD); 448 CanQualType ArgTys[] = { GetThisType(Context, MD->getParent()) }; 449 return arrangeLLVMFunctionInfo(Context.VoidTy, /*instanceMethod=*/false, 450 /*chainCall=*/false, ArgTys, 451 FTP->getExtInfo(), {}, RequiredArgs(1)); 452 } 453 454 const CGFunctionInfo & 455 CodeGenTypes::arrangeMSCtorClosure(const CXXConstructorDecl *CD, 456 CXXCtorType CT) { 457 assert(CT == Ctor_CopyingClosure || CT == Ctor_DefaultClosure); 458 459 CanQual<FunctionProtoType> FTP = GetFormalType(CD); 460 SmallVector<CanQualType, 2> ArgTys; 461 const CXXRecordDecl *RD = CD->getParent(); 462 ArgTys.push_back(GetThisType(Context, RD)); 463 if (CT == Ctor_CopyingClosure) 464 ArgTys.push_back(*FTP->param_type_begin()); 465 if (RD->getNumVBases() > 0) 466 ArgTys.push_back(Context.IntTy); 467 CallingConv CC = Context.getDefaultCallingConvention( 468 /*IsVariadic=*/false, /*IsCXXMethod=*/true); 469 return arrangeLLVMFunctionInfo(Context.VoidTy, /*instanceMethod=*/true, 470 /*chainCall=*/false, ArgTys, 471 FunctionType::ExtInfo(CC), {}, 472 RequiredArgs::All); 473 } 474 475 /// Arrange a call as unto a free function, except possibly with an 476 /// additional number of formal parameters considered required. 477 static const CGFunctionInfo & 478 arrangeFreeFunctionLikeCall(CodeGenTypes &CGT, 479 CodeGenModule &CGM, 480 const CallArgList &args, 481 const FunctionType *fnType, 482 unsigned numExtraRequiredArgs, 483 bool chainCall) { 484 assert(args.size() >= numExtraRequiredArgs); 485 486 llvm::SmallVector<FunctionProtoType::ExtParameterInfo, 16> paramInfos; 487 488 // In most cases, there are no optional arguments. 489 RequiredArgs required = RequiredArgs::All; 490 491 // If we have a variadic prototype, the required arguments are the 492 // extra prefix plus the arguments in the prototype. 493 if (const FunctionProtoType *proto = dyn_cast<FunctionProtoType>(fnType)) { 494 if (proto->isVariadic()) 495 required = RequiredArgs(proto->getNumParams() + numExtraRequiredArgs); 496 497 if (proto->hasExtParameterInfos()) 498 addExtParameterInfosForCall(paramInfos, proto, numExtraRequiredArgs, 499 args.size()); 500 501 // If we don't have a prototype at all, but we're supposed to 502 // explicitly use the variadic convention for unprototyped calls, 503 // treat all of the arguments as required but preserve the nominal 504 // possibility of variadics. 505 } else if (CGM.getTargetCodeGenInfo() 506 .isNoProtoCallVariadic(args, 507 cast<FunctionNoProtoType>(fnType))) { 508 required = RequiredArgs(args.size()); 509 } 510 511 // FIXME: Kill copy. 512 SmallVector<CanQualType, 16> argTypes; 513 for (const auto &arg : args) 514 argTypes.push_back(CGT.getContext().getCanonicalParamType(arg.Ty)); 515 return CGT.arrangeLLVMFunctionInfo(GetReturnType(fnType->getReturnType()), 516 /*instanceMethod=*/false, chainCall, 517 argTypes, fnType->getExtInfo(), paramInfos, 518 required); 519 } 520 521 /// Figure out the rules for calling a function with the given formal 522 /// type using the given arguments. The arguments are necessary 523 /// because the function might be unprototyped, in which case it's 524 /// target-dependent in crazy ways. 525 const CGFunctionInfo & 526 CodeGenTypes::arrangeFreeFunctionCall(const CallArgList &args, 527 const FunctionType *fnType, 528 bool chainCall) { 529 return arrangeFreeFunctionLikeCall(*this, CGM, args, fnType, 530 chainCall ? 1 : 0, chainCall); 531 } 532 533 /// A block function is essentially a free function with an 534 /// extra implicit argument. 535 const CGFunctionInfo & 536 CodeGenTypes::arrangeBlockFunctionCall(const CallArgList &args, 537 const FunctionType *fnType) { 538 return arrangeFreeFunctionLikeCall(*this, CGM, args, fnType, 1, 539 /*chainCall=*/false); 540 } 541 542 const CGFunctionInfo & 543 CodeGenTypes::arrangeBlockFunctionDeclaration(const FunctionProtoType *proto, 544 const FunctionArgList ¶ms) { 545 auto paramInfos = getExtParameterInfosForCall(proto, 1, params.size()); 546 auto argTypes = getArgTypesForDeclaration(Context, params); 547 548 return arrangeLLVMFunctionInfo(GetReturnType(proto->getReturnType()), 549 /*instanceMethod*/ false, /*chainCall*/ false, 550 argTypes, proto->getExtInfo(), paramInfos, 551 RequiredArgs::forPrototypePlus(proto, 1)); 552 } 553 554 const CGFunctionInfo & 555 CodeGenTypes::arrangeBuiltinFunctionCall(QualType resultType, 556 const CallArgList &args) { 557 // FIXME: Kill copy. 558 SmallVector<CanQualType, 16> argTypes; 559 for (const auto &Arg : args) 560 argTypes.push_back(Context.getCanonicalParamType(Arg.Ty)); 561 return arrangeLLVMFunctionInfo( 562 GetReturnType(resultType), /*instanceMethod=*/false, 563 /*chainCall=*/false, argTypes, FunctionType::ExtInfo(), 564 /*paramInfos=*/ {}, RequiredArgs::All); 565 } 566 567 const CGFunctionInfo & 568 CodeGenTypes::arrangeBuiltinFunctionDeclaration(QualType resultType, 569 const FunctionArgList &args) { 570 auto argTypes = getArgTypesForDeclaration(Context, args); 571 572 return arrangeLLVMFunctionInfo( 573 GetReturnType(resultType), /*instanceMethod=*/false, /*chainCall=*/false, 574 argTypes, FunctionType::ExtInfo(), {}, RequiredArgs::All); 575 } 576 577 const CGFunctionInfo & 578 CodeGenTypes::arrangeBuiltinFunctionDeclaration(CanQualType resultType, 579 ArrayRef<CanQualType> argTypes) { 580 return arrangeLLVMFunctionInfo( 581 resultType, /*instanceMethod=*/false, /*chainCall=*/false, 582 argTypes, FunctionType::ExtInfo(), {}, RequiredArgs::All); 583 } 584 585 586 /// Arrange a call to a C++ method, passing the given arguments. 587 const CGFunctionInfo & 588 CodeGenTypes::arrangeCXXMethodCall(const CallArgList &args, 589 const FunctionProtoType *proto, 590 RequiredArgs required) { 591 unsigned numRequiredArgs = 592 (proto->isVariadic() ? required.getNumRequiredArgs() : args.size()); 593 unsigned numPrefixArgs = numRequiredArgs - proto->getNumParams(); 594 auto paramInfos = 595 getExtParameterInfosForCall(proto, numPrefixArgs, args.size()); 596 597 // FIXME: Kill copy. 598 auto argTypes = getArgTypesForCall(Context, args); 599 600 FunctionType::ExtInfo info = proto->getExtInfo(); 601 return arrangeLLVMFunctionInfo( 602 GetReturnType(proto->getReturnType()), /*instanceMethod=*/true, 603 /*chainCall=*/false, argTypes, info, paramInfos, required); 604 } 605 606 const CGFunctionInfo &CodeGenTypes::arrangeNullaryFunction() { 607 return arrangeLLVMFunctionInfo( 608 getContext().VoidTy, /*instanceMethod=*/false, /*chainCall=*/false, 609 None, FunctionType::ExtInfo(), {}, RequiredArgs::All); 610 } 611 612 const CGFunctionInfo & 613 CodeGenTypes::arrangeCall(const CGFunctionInfo &signature, 614 const CallArgList &args) { 615 assert(signature.arg_size() <= args.size()); 616 if (signature.arg_size() == args.size()) 617 return signature; 618 619 SmallVector<FunctionProtoType::ExtParameterInfo, 16> paramInfos; 620 auto sigParamInfos = signature.getExtParameterInfos(); 621 if (!sigParamInfos.empty()) { 622 paramInfos.append(sigParamInfos.begin(), sigParamInfos.end()); 623 paramInfos.resize(args.size()); 624 } 625 626 auto argTypes = getArgTypesForCall(Context, args); 627 628 assert(signature.getRequiredArgs().allowsOptionalArgs()); 629 return arrangeLLVMFunctionInfo(signature.getReturnType(), 630 signature.isInstanceMethod(), 631 signature.isChainCall(), 632 argTypes, 633 signature.getExtInfo(), 634 paramInfos, 635 signature.getRequiredArgs()); 636 } 637 638 /// Arrange the argument and result information for an abstract value 639 /// of a given function type. This is the method which all of the 640 /// above functions ultimately defer to. 641 const CGFunctionInfo & 642 CodeGenTypes::arrangeLLVMFunctionInfo(CanQualType resultType, 643 bool instanceMethod, 644 bool chainCall, 645 ArrayRef<CanQualType> argTypes, 646 FunctionType::ExtInfo info, 647 ArrayRef<FunctionProtoType::ExtParameterInfo> paramInfos, 648 RequiredArgs required) { 649 assert(std::all_of(argTypes.begin(), argTypes.end(), 650 std::mem_fun_ref(&CanQualType::isCanonicalAsParam))); 651 652 // Lookup or create unique function info. 653 llvm::FoldingSetNodeID ID; 654 CGFunctionInfo::Profile(ID, instanceMethod, chainCall, info, paramInfos, 655 required, resultType, argTypes); 656 657 void *insertPos = nullptr; 658 CGFunctionInfo *FI = FunctionInfos.FindNodeOrInsertPos(ID, insertPos); 659 if (FI) 660 return *FI; 661 662 unsigned CC = ClangCallConvToLLVMCallConv(info.getCC()); 663 664 // Construct the function info. We co-allocate the ArgInfos. 665 FI = CGFunctionInfo::create(CC, instanceMethod, chainCall, info, 666 paramInfos, resultType, argTypes, required); 667 FunctionInfos.InsertNode(FI, insertPos); 668 669 bool inserted = FunctionsBeingProcessed.insert(FI).second; 670 (void)inserted; 671 assert(inserted && "Recursively being processed?"); 672 673 // Compute ABI information. 674 getABIInfo().computeInfo(*FI); 675 676 // Loop over all of the computed argument and return value info. If any of 677 // them are direct or extend without a specified coerce type, specify the 678 // default now. 679 ABIArgInfo &retInfo = FI->getReturnInfo(); 680 if (retInfo.canHaveCoerceToType() && retInfo.getCoerceToType() == nullptr) 681 retInfo.setCoerceToType(ConvertType(FI->getReturnType())); 682 683 for (auto &I : FI->arguments()) 684 if (I.info.canHaveCoerceToType() && I.info.getCoerceToType() == nullptr) 685 I.info.setCoerceToType(ConvertType(I.type)); 686 687 bool erased = FunctionsBeingProcessed.erase(FI); (void)erased; 688 assert(erased && "Not in set?"); 689 690 return *FI; 691 } 692 693 CGFunctionInfo *CGFunctionInfo::create(unsigned llvmCC, 694 bool instanceMethod, 695 bool chainCall, 696 const FunctionType::ExtInfo &info, 697 ArrayRef<ExtParameterInfo> paramInfos, 698 CanQualType resultType, 699 ArrayRef<CanQualType> argTypes, 700 RequiredArgs required) { 701 assert(paramInfos.empty() || paramInfos.size() == argTypes.size()); 702 703 void *buffer = 704 operator new(totalSizeToAlloc<ArgInfo, ExtParameterInfo>( 705 argTypes.size() + 1, paramInfos.size())); 706 707 CGFunctionInfo *FI = new(buffer) CGFunctionInfo(); 708 FI->CallingConvention = llvmCC; 709 FI->EffectiveCallingConvention = llvmCC; 710 FI->ASTCallingConvention = info.getCC(); 711 FI->InstanceMethod = instanceMethod; 712 FI->ChainCall = chainCall; 713 FI->NoReturn = info.getNoReturn(); 714 FI->ReturnsRetained = info.getProducesResult(); 715 FI->Required = required; 716 FI->HasRegParm = info.getHasRegParm(); 717 FI->RegParm = info.getRegParm(); 718 FI->ArgStruct = nullptr; 719 FI->ArgStructAlign = 0; 720 FI->NumArgs = argTypes.size(); 721 FI->HasExtParameterInfos = !paramInfos.empty(); 722 FI->getArgsBuffer()[0].type = resultType; 723 for (unsigned i = 0, e = argTypes.size(); i != e; ++i) 724 FI->getArgsBuffer()[i + 1].type = argTypes[i]; 725 for (unsigned i = 0, e = paramInfos.size(); i != e; ++i) 726 FI->getExtParameterInfosBuffer()[i] = paramInfos[i]; 727 return FI; 728 } 729 730 /***/ 731 732 namespace { 733 // ABIArgInfo::Expand implementation. 734 735 // Specifies the way QualType passed as ABIArgInfo::Expand is expanded. 736 struct TypeExpansion { 737 enum TypeExpansionKind { 738 // Elements of constant arrays are expanded recursively. 739 TEK_ConstantArray, 740 // Record fields are expanded recursively (but if record is a union, only 741 // the field with the largest size is expanded). 742 TEK_Record, 743 // For complex types, real and imaginary parts are expanded recursively. 744 TEK_Complex, 745 // All other types are not expandable. 746 TEK_None 747 }; 748 749 const TypeExpansionKind Kind; 750 751 TypeExpansion(TypeExpansionKind K) : Kind(K) {} 752 virtual ~TypeExpansion() {} 753 }; 754 755 struct ConstantArrayExpansion : TypeExpansion { 756 QualType EltTy; 757 uint64_t NumElts; 758 759 ConstantArrayExpansion(QualType EltTy, uint64_t NumElts) 760 : TypeExpansion(TEK_ConstantArray), EltTy(EltTy), NumElts(NumElts) {} 761 static bool classof(const TypeExpansion *TE) { 762 return TE->Kind == TEK_ConstantArray; 763 } 764 }; 765 766 struct RecordExpansion : TypeExpansion { 767 SmallVector<const CXXBaseSpecifier *, 1> Bases; 768 769 SmallVector<const FieldDecl *, 1> Fields; 770 771 RecordExpansion(SmallVector<const CXXBaseSpecifier *, 1> &&Bases, 772 SmallVector<const FieldDecl *, 1> &&Fields) 773 : TypeExpansion(TEK_Record), Bases(std::move(Bases)), 774 Fields(std::move(Fields)) {} 775 static bool classof(const TypeExpansion *TE) { 776 return TE->Kind == TEK_Record; 777 } 778 }; 779 780 struct ComplexExpansion : TypeExpansion { 781 QualType EltTy; 782 783 ComplexExpansion(QualType EltTy) : TypeExpansion(TEK_Complex), EltTy(EltTy) {} 784 static bool classof(const TypeExpansion *TE) { 785 return TE->Kind == TEK_Complex; 786 } 787 }; 788 789 struct NoExpansion : TypeExpansion { 790 NoExpansion() : TypeExpansion(TEK_None) {} 791 static bool classof(const TypeExpansion *TE) { 792 return TE->Kind == TEK_None; 793 } 794 }; 795 } // namespace 796 797 static std::unique_ptr<TypeExpansion> 798 getTypeExpansion(QualType Ty, const ASTContext &Context) { 799 if (const ConstantArrayType *AT = Context.getAsConstantArrayType(Ty)) { 800 return llvm::make_unique<ConstantArrayExpansion>( 801 AT->getElementType(), AT->getSize().getZExtValue()); 802 } 803 if (const RecordType *RT = Ty->getAs<RecordType>()) { 804 SmallVector<const CXXBaseSpecifier *, 1> Bases; 805 SmallVector<const FieldDecl *, 1> Fields; 806 const RecordDecl *RD = RT->getDecl(); 807 assert(!RD->hasFlexibleArrayMember() && 808 "Cannot expand structure with flexible array."); 809 if (RD->isUnion()) { 810 // Unions can be here only in degenerative cases - all the fields are same 811 // after flattening. Thus we have to use the "largest" field. 812 const FieldDecl *LargestFD = nullptr; 813 CharUnits UnionSize = CharUnits::Zero(); 814 815 for (const auto *FD : RD->fields()) { 816 // Skip zero length bitfields. 817 if (FD->isBitField() && FD->getBitWidthValue(Context) == 0) 818 continue; 819 assert(!FD->isBitField() && 820 "Cannot expand structure with bit-field members."); 821 CharUnits FieldSize = Context.getTypeSizeInChars(FD->getType()); 822 if (UnionSize < FieldSize) { 823 UnionSize = FieldSize; 824 LargestFD = FD; 825 } 826 } 827 if (LargestFD) 828 Fields.push_back(LargestFD); 829 } else { 830 if (const auto *CXXRD = dyn_cast<CXXRecordDecl>(RD)) { 831 assert(!CXXRD->isDynamicClass() && 832 "cannot expand vtable pointers in dynamic classes"); 833 for (const CXXBaseSpecifier &BS : CXXRD->bases()) 834 Bases.push_back(&BS); 835 } 836 837 for (const auto *FD : RD->fields()) { 838 // Skip zero length bitfields. 839 if (FD->isBitField() && FD->getBitWidthValue(Context) == 0) 840 continue; 841 assert(!FD->isBitField() && 842 "Cannot expand structure with bit-field members."); 843 Fields.push_back(FD); 844 } 845 } 846 return llvm::make_unique<RecordExpansion>(std::move(Bases), 847 std::move(Fields)); 848 } 849 if (const ComplexType *CT = Ty->getAs<ComplexType>()) { 850 return llvm::make_unique<ComplexExpansion>(CT->getElementType()); 851 } 852 return llvm::make_unique<NoExpansion>(); 853 } 854 855 static int getExpansionSize(QualType Ty, const ASTContext &Context) { 856 auto Exp = getTypeExpansion(Ty, Context); 857 if (auto CAExp = dyn_cast<ConstantArrayExpansion>(Exp.get())) { 858 return CAExp->NumElts * getExpansionSize(CAExp->EltTy, Context); 859 } 860 if (auto RExp = dyn_cast<RecordExpansion>(Exp.get())) { 861 int Res = 0; 862 for (auto BS : RExp->Bases) 863 Res += getExpansionSize(BS->getType(), Context); 864 for (auto FD : RExp->Fields) 865 Res += getExpansionSize(FD->getType(), Context); 866 return Res; 867 } 868 if (isa<ComplexExpansion>(Exp.get())) 869 return 2; 870 assert(isa<NoExpansion>(Exp.get())); 871 return 1; 872 } 873 874 void 875 CodeGenTypes::getExpandedTypes(QualType Ty, 876 SmallVectorImpl<llvm::Type *>::iterator &TI) { 877 auto Exp = getTypeExpansion(Ty, Context); 878 if (auto CAExp = dyn_cast<ConstantArrayExpansion>(Exp.get())) { 879 for (int i = 0, n = CAExp->NumElts; i < n; i++) { 880 getExpandedTypes(CAExp->EltTy, TI); 881 } 882 } else if (auto RExp = dyn_cast<RecordExpansion>(Exp.get())) { 883 for (auto BS : RExp->Bases) 884 getExpandedTypes(BS->getType(), TI); 885 for (auto FD : RExp->Fields) 886 getExpandedTypes(FD->getType(), TI); 887 } else if (auto CExp = dyn_cast<ComplexExpansion>(Exp.get())) { 888 llvm::Type *EltTy = ConvertType(CExp->EltTy); 889 *TI++ = EltTy; 890 *TI++ = EltTy; 891 } else { 892 assert(isa<NoExpansion>(Exp.get())); 893 *TI++ = ConvertType(Ty); 894 } 895 } 896 897 static void forConstantArrayExpansion(CodeGenFunction &CGF, 898 ConstantArrayExpansion *CAE, 899 Address BaseAddr, 900 llvm::function_ref<void(Address)> Fn) { 901 CharUnits EltSize = CGF.getContext().getTypeSizeInChars(CAE->EltTy); 902 CharUnits EltAlign = 903 BaseAddr.getAlignment().alignmentOfArrayElement(EltSize); 904 905 for (int i = 0, n = CAE->NumElts; i < n; i++) { 906 llvm::Value *EltAddr = 907 CGF.Builder.CreateConstGEP2_32(nullptr, BaseAddr.getPointer(), 0, i); 908 Fn(Address(EltAddr, EltAlign)); 909 } 910 } 911 912 void CodeGenFunction::ExpandTypeFromArgs( 913 QualType Ty, LValue LV, SmallVectorImpl<llvm::Argument *>::iterator &AI) { 914 assert(LV.isSimple() && 915 "Unexpected non-simple lvalue during struct expansion."); 916 917 auto Exp = getTypeExpansion(Ty, getContext()); 918 if (auto CAExp = dyn_cast<ConstantArrayExpansion>(Exp.get())) { 919 forConstantArrayExpansion(*this, CAExp, LV.getAddress(), 920 [&](Address EltAddr) { 921 LValue LV = MakeAddrLValue(EltAddr, CAExp->EltTy); 922 ExpandTypeFromArgs(CAExp->EltTy, LV, AI); 923 }); 924 } else if (auto RExp = dyn_cast<RecordExpansion>(Exp.get())) { 925 Address This = LV.getAddress(); 926 for (const CXXBaseSpecifier *BS : RExp->Bases) { 927 // Perform a single step derived-to-base conversion. 928 Address Base = 929 GetAddressOfBaseClass(This, Ty->getAsCXXRecordDecl(), &BS, &BS + 1, 930 /*NullCheckValue=*/false, SourceLocation()); 931 LValue SubLV = MakeAddrLValue(Base, BS->getType()); 932 933 // Recurse onto bases. 934 ExpandTypeFromArgs(BS->getType(), SubLV, AI); 935 } 936 for (auto FD : RExp->Fields) { 937 // FIXME: What are the right qualifiers here? 938 LValue SubLV = EmitLValueForField(LV, FD); 939 ExpandTypeFromArgs(FD->getType(), SubLV, AI); 940 } 941 } else if (isa<ComplexExpansion>(Exp.get())) { 942 auto realValue = *AI++; 943 auto imagValue = *AI++; 944 EmitStoreOfComplex(ComplexPairTy(realValue, imagValue), LV, /*init*/ true); 945 } else { 946 assert(isa<NoExpansion>(Exp.get())); 947 EmitStoreThroughLValue(RValue::get(*AI++), LV); 948 } 949 } 950 951 void CodeGenFunction::ExpandTypeToArgs( 952 QualType Ty, RValue RV, llvm::FunctionType *IRFuncTy, 953 SmallVectorImpl<llvm::Value *> &IRCallArgs, unsigned &IRCallArgPos) { 954 auto Exp = getTypeExpansion(Ty, getContext()); 955 if (auto CAExp = dyn_cast<ConstantArrayExpansion>(Exp.get())) { 956 forConstantArrayExpansion(*this, CAExp, RV.getAggregateAddress(), 957 [&](Address EltAddr) { 958 RValue EltRV = 959 convertTempToRValue(EltAddr, CAExp->EltTy, SourceLocation()); 960 ExpandTypeToArgs(CAExp->EltTy, EltRV, IRFuncTy, IRCallArgs, IRCallArgPos); 961 }); 962 } else if (auto RExp = dyn_cast<RecordExpansion>(Exp.get())) { 963 Address This = RV.getAggregateAddress(); 964 for (const CXXBaseSpecifier *BS : RExp->Bases) { 965 // Perform a single step derived-to-base conversion. 966 Address Base = 967 GetAddressOfBaseClass(This, Ty->getAsCXXRecordDecl(), &BS, &BS + 1, 968 /*NullCheckValue=*/false, SourceLocation()); 969 RValue BaseRV = RValue::getAggregate(Base); 970 971 // Recurse onto bases. 972 ExpandTypeToArgs(BS->getType(), BaseRV, IRFuncTy, IRCallArgs, 973 IRCallArgPos); 974 } 975 976 LValue LV = MakeAddrLValue(This, Ty); 977 for (auto FD : RExp->Fields) { 978 RValue FldRV = EmitRValueForField(LV, FD, SourceLocation()); 979 ExpandTypeToArgs(FD->getType(), FldRV, IRFuncTy, IRCallArgs, 980 IRCallArgPos); 981 } 982 } else if (isa<ComplexExpansion>(Exp.get())) { 983 ComplexPairTy CV = RV.getComplexVal(); 984 IRCallArgs[IRCallArgPos++] = CV.first; 985 IRCallArgs[IRCallArgPos++] = CV.second; 986 } else { 987 assert(isa<NoExpansion>(Exp.get())); 988 assert(RV.isScalar() && 989 "Unexpected non-scalar rvalue during struct expansion."); 990 991 // Insert a bitcast as needed. 992 llvm::Value *V = RV.getScalarVal(); 993 if (IRCallArgPos < IRFuncTy->getNumParams() && 994 V->getType() != IRFuncTy->getParamType(IRCallArgPos)) 995 V = Builder.CreateBitCast(V, IRFuncTy->getParamType(IRCallArgPos)); 996 997 IRCallArgs[IRCallArgPos++] = V; 998 } 999 } 1000 1001 /// Create a temporary allocation for the purposes of coercion. 1002 static Address CreateTempAllocaForCoercion(CodeGenFunction &CGF, llvm::Type *Ty, 1003 CharUnits MinAlign) { 1004 // Don't use an alignment that's worse than what LLVM would prefer. 1005 auto PrefAlign = CGF.CGM.getDataLayout().getPrefTypeAlignment(Ty); 1006 CharUnits Align = std::max(MinAlign, CharUnits::fromQuantity(PrefAlign)); 1007 1008 return CGF.CreateTempAlloca(Ty, Align); 1009 } 1010 1011 /// EnterStructPointerForCoercedAccess - Given a struct pointer that we are 1012 /// accessing some number of bytes out of it, try to gep into the struct to get 1013 /// at its inner goodness. Dive as deep as possible without entering an element 1014 /// with an in-memory size smaller than DstSize. 1015 static Address 1016 EnterStructPointerForCoercedAccess(Address SrcPtr, 1017 llvm::StructType *SrcSTy, 1018 uint64_t DstSize, CodeGenFunction &CGF) { 1019 // We can't dive into a zero-element struct. 1020 if (SrcSTy->getNumElements() == 0) return SrcPtr; 1021 1022 llvm::Type *FirstElt = SrcSTy->getElementType(0); 1023 1024 // If the first elt is at least as large as what we're looking for, or if the 1025 // first element is the same size as the whole struct, we can enter it. The 1026 // comparison must be made on the store size and not the alloca size. Using 1027 // the alloca size may overstate the size of the load. 1028 uint64_t FirstEltSize = 1029 CGF.CGM.getDataLayout().getTypeStoreSize(FirstElt); 1030 if (FirstEltSize < DstSize && 1031 FirstEltSize < CGF.CGM.getDataLayout().getTypeStoreSize(SrcSTy)) 1032 return SrcPtr; 1033 1034 // GEP into the first element. 1035 SrcPtr = CGF.Builder.CreateStructGEP(SrcPtr, 0, CharUnits(), "coerce.dive"); 1036 1037 // If the first element is a struct, recurse. 1038 llvm::Type *SrcTy = SrcPtr.getElementType(); 1039 if (llvm::StructType *SrcSTy = dyn_cast<llvm::StructType>(SrcTy)) 1040 return EnterStructPointerForCoercedAccess(SrcPtr, SrcSTy, DstSize, CGF); 1041 1042 return SrcPtr; 1043 } 1044 1045 /// CoerceIntOrPtrToIntOrPtr - Convert a value Val to the specific Ty where both 1046 /// are either integers or pointers. This does a truncation of the value if it 1047 /// is too large or a zero extension if it is too small. 1048 /// 1049 /// This behaves as if the value were coerced through memory, so on big-endian 1050 /// targets the high bits are preserved in a truncation, while little-endian 1051 /// targets preserve the low bits. 1052 static llvm::Value *CoerceIntOrPtrToIntOrPtr(llvm::Value *Val, 1053 llvm::Type *Ty, 1054 CodeGenFunction &CGF) { 1055 if (Val->getType() == Ty) 1056 return Val; 1057 1058 if (isa<llvm::PointerType>(Val->getType())) { 1059 // If this is Pointer->Pointer avoid conversion to and from int. 1060 if (isa<llvm::PointerType>(Ty)) 1061 return CGF.Builder.CreateBitCast(Val, Ty, "coerce.val"); 1062 1063 // Convert the pointer to an integer so we can play with its width. 1064 Val = CGF.Builder.CreatePtrToInt(Val, CGF.IntPtrTy, "coerce.val.pi"); 1065 } 1066 1067 llvm::Type *DestIntTy = Ty; 1068 if (isa<llvm::PointerType>(DestIntTy)) 1069 DestIntTy = CGF.IntPtrTy; 1070 1071 if (Val->getType() != DestIntTy) { 1072 const llvm::DataLayout &DL = CGF.CGM.getDataLayout(); 1073 if (DL.isBigEndian()) { 1074 // Preserve the high bits on big-endian targets. 1075 // That is what memory coercion does. 1076 uint64_t SrcSize = DL.getTypeSizeInBits(Val->getType()); 1077 uint64_t DstSize = DL.getTypeSizeInBits(DestIntTy); 1078 1079 if (SrcSize > DstSize) { 1080 Val = CGF.Builder.CreateLShr(Val, SrcSize - DstSize, "coerce.highbits"); 1081 Val = CGF.Builder.CreateTrunc(Val, DestIntTy, "coerce.val.ii"); 1082 } else { 1083 Val = CGF.Builder.CreateZExt(Val, DestIntTy, "coerce.val.ii"); 1084 Val = CGF.Builder.CreateShl(Val, DstSize - SrcSize, "coerce.highbits"); 1085 } 1086 } else { 1087 // Little-endian targets preserve the low bits. No shifts required. 1088 Val = CGF.Builder.CreateIntCast(Val, DestIntTy, false, "coerce.val.ii"); 1089 } 1090 } 1091 1092 if (isa<llvm::PointerType>(Ty)) 1093 Val = CGF.Builder.CreateIntToPtr(Val, Ty, "coerce.val.ip"); 1094 return Val; 1095 } 1096 1097 1098 1099 /// CreateCoercedLoad - Create a load from \arg SrcPtr interpreted as 1100 /// a pointer to an object of type \arg Ty, known to be aligned to 1101 /// \arg SrcAlign bytes. 1102 /// 1103 /// This safely handles the case when the src type is smaller than the 1104 /// destination type; in this situation the values of bits which not 1105 /// present in the src are undefined. 1106 static llvm::Value *CreateCoercedLoad(Address Src, llvm::Type *Ty, 1107 CodeGenFunction &CGF) { 1108 llvm::Type *SrcTy = Src.getElementType(); 1109 1110 // If SrcTy and Ty are the same, just do a load. 1111 if (SrcTy == Ty) 1112 return CGF.Builder.CreateLoad(Src); 1113 1114 uint64_t DstSize = CGF.CGM.getDataLayout().getTypeAllocSize(Ty); 1115 1116 if (llvm::StructType *SrcSTy = dyn_cast<llvm::StructType>(SrcTy)) { 1117 Src = EnterStructPointerForCoercedAccess(Src, SrcSTy, DstSize, CGF); 1118 SrcTy = Src.getType()->getElementType(); 1119 } 1120 1121 uint64_t SrcSize = CGF.CGM.getDataLayout().getTypeAllocSize(SrcTy); 1122 1123 // If the source and destination are integer or pointer types, just do an 1124 // extension or truncation to the desired type. 1125 if ((isa<llvm::IntegerType>(Ty) || isa<llvm::PointerType>(Ty)) && 1126 (isa<llvm::IntegerType>(SrcTy) || isa<llvm::PointerType>(SrcTy))) { 1127 llvm::Value *Load = CGF.Builder.CreateLoad(Src); 1128 return CoerceIntOrPtrToIntOrPtr(Load, Ty, CGF); 1129 } 1130 1131 // If load is legal, just bitcast the src pointer. 1132 if (SrcSize >= DstSize) { 1133 // Generally SrcSize is never greater than DstSize, since this means we are 1134 // losing bits. However, this can happen in cases where the structure has 1135 // additional padding, for example due to a user specified alignment. 1136 // 1137 // FIXME: Assert that we aren't truncating non-padding bits when have access 1138 // to that information. 1139 Src = CGF.Builder.CreateBitCast(Src, llvm::PointerType::getUnqual(Ty)); 1140 return CGF.Builder.CreateLoad(Src); 1141 } 1142 1143 // Otherwise do coercion through memory. This is stupid, but simple. 1144 Address Tmp = CreateTempAllocaForCoercion(CGF, Ty, Src.getAlignment()); 1145 Address Casted = CGF.Builder.CreateBitCast(Tmp, CGF.Int8PtrTy); 1146 Address SrcCasted = CGF.Builder.CreateBitCast(Src, CGF.Int8PtrTy); 1147 CGF.Builder.CreateMemCpy(Casted, SrcCasted, 1148 llvm::ConstantInt::get(CGF.IntPtrTy, SrcSize), 1149 false); 1150 return CGF.Builder.CreateLoad(Tmp); 1151 } 1152 1153 // Function to store a first-class aggregate into memory. We prefer to 1154 // store the elements rather than the aggregate to be more friendly to 1155 // fast-isel. 1156 // FIXME: Do we need to recurse here? 1157 static void BuildAggStore(CodeGenFunction &CGF, llvm::Value *Val, 1158 Address Dest, bool DestIsVolatile) { 1159 // Prefer scalar stores to first-class aggregate stores. 1160 if (llvm::StructType *STy = 1161 dyn_cast<llvm::StructType>(Val->getType())) { 1162 const llvm::StructLayout *Layout = 1163 CGF.CGM.getDataLayout().getStructLayout(STy); 1164 1165 for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) { 1166 auto EltOffset = CharUnits::fromQuantity(Layout->getElementOffset(i)); 1167 Address EltPtr = CGF.Builder.CreateStructGEP(Dest, i, EltOffset); 1168 llvm::Value *Elt = CGF.Builder.CreateExtractValue(Val, i); 1169 CGF.Builder.CreateStore(Elt, EltPtr, DestIsVolatile); 1170 } 1171 } else { 1172 CGF.Builder.CreateStore(Val, Dest, DestIsVolatile); 1173 } 1174 } 1175 1176 /// CreateCoercedStore - Create a store to \arg DstPtr from \arg Src, 1177 /// where the source and destination may have different types. The 1178 /// destination is known to be aligned to \arg DstAlign bytes. 1179 /// 1180 /// This safely handles the case when the src type is larger than the 1181 /// destination type; the upper bits of the src will be lost. 1182 static void CreateCoercedStore(llvm::Value *Src, 1183 Address Dst, 1184 bool DstIsVolatile, 1185 CodeGenFunction &CGF) { 1186 llvm::Type *SrcTy = Src->getType(); 1187 llvm::Type *DstTy = Dst.getType()->getElementType(); 1188 if (SrcTy == DstTy) { 1189 CGF.Builder.CreateStore(Src, Dst, DstIsVolatile); 1190 return; 1191 } 1192 1193 uint64_t SrcSize = CGF.CGM.getDataLayout().getTypeAllocSize(SrcTy); 1194 1195 if (llvm::StructType *DstSTy = dyn_cast<llvm::StructType>(DstTy)) { 1196 Dst = EnterStructPointerForCoercedAccess(Dst, DstSTy, SrcSize, CGF); 1197 DstTy = Dst.getType()->getElementType(); 1198 } 1199 1200 // If the source and destination are integer or pointer types, just do an 1201 // extension or truncation to the desired type. 1202 if ((isa<llvm::IntegerType>(SrcTy) || isa<llvm::PointerType>(SrcTy)) && 1203 (isa<llvm::IntegerType>(DstTy) || isa<llvm::PointerType>(DstTy))) { 1204 Src = CoerceIntOrPtrToIntOrPtr(Src, DstTy, CGF); 1205 CGF.Builder.CreateStore(Src, Dst, DstIsVolatile); 1206 return; 1207 } 1208 1209 uint64_t DstSize = CGF.CGM.getDataLayout().getTypeAllocSize(DstTy); 1210 1211 // If store is legal, just bitcast the src pointer. 1212 if (SrcSize <= DstSize) { 1213 Dst = CGF.Builder.CreateBitCast(Dst, llvm::PointerType::getUnqual(SrcTy)); 1214 BuildAggStore(CGF, Src, Dst, DstIsVolatile); 1215 } else { 1216 // Otherwise do coercion through memory. This is stupid, but 1217 // simple. 1218 1219 // Generally SrcSize is never greater than DstSize, since this means we are 1220 // losing bits. However, this can happen in cases where the structure has 1221 // additional padding, for example due to a user specified alignment. 1222 // 1223 // FIXME: Assert that we aren't truncating non-padding bits when have access 1224 // to that information. 1225 Address Tmp = CreateTempAllocaForCoercion(CGF, SrcTy, Dst.getAlignment()); 1226 CGF.Builder.CreateStore(Src, Tmp); 1227 Address Casted = CGF.Builder.CreateBitCast(Tmp, CGF.Int8PtrTy); 1228 Address DstCasted = CGF.Builder.CreateBitCast(Dst, CGF.Int8PtrTy); 1229 CGF.Builder.CreateMemCpy(DstCasted, Casted, 1230 llvm::ConstantInt::get(CGF.IntPtrTy, DstSize), 1231 false); 1232 } 1233 } 1234 1235 static Address emitAddressAtOffset(CodeGenFunction &CGF, Address addr, 1236 const ABIArgInfo &info) { 1237 if (unsigned offset = info.getDirectOffset()) { 1238 addr = CGF.Builder.CreateElementBitCast(addr, CGF.Int8Ty); 1239 addr = CGF.Builder.CreateConstInBoundsByteGEP(addr, 1240 CharUnits::fromQuantity(offset)); 1241 addr = CGF.Builder.CreateElementBitCast(addr, info.getCoerceToType()); 1242 } 1243 return addr; 1244 } 1245 1246 namespace { 1247 1248 /// Encapsulates information about the way function arguments from 1249 /// CGFunctionInfo should be passed to actual LLVM IR function. 1250 class ClangToLLVMArgMapping { 1251 static const unsigned InvalidIndex = ~0U; 1252 unsigned InallocaArgNo; 1253 unsigned SRetArgNo; 1254 unsigned TotalIRArgs; 1255 1256 /// Arguments of LLVM IR function corresponding to single Clang argument. 1257 struct IRArgs { 1258 unsigned PaddingArgIndex; 1259 // Argument is expanded to IR arguments at positions 1260 // [FirstArgIndex, FirstArgIndex + NumberOfArgs). 1261 unsigned FirstArgIndex; 1262 unsigned NumberOfArgs; 1263 1264 IRArgs() 1265 : PaddingArgIndex(InvalidIndex), FirstArgIndex(InvalidIndex), 1266 NumberOfArgs(0) {} 1267 }; 1268 1269 SmallVector<IRArgs, 8> ArgInfo; 1270 1271 public: 1272 ClangToLLVMArgMapping(const ASTContext &Context, const CGFunctionInfo &FI, 1273 bool OnlyRequiredArgs = false) 1274 : InallocaArgNo(InvalidIndex), SRetArgNo(InvalidIndex), TotalIRArgs(0), 1275 ArgInfo(OnlyRequiredArgs ? FI.getNumRequiredArgs() : FI.arg_size()) { 1276 construct(Context, FI, OnlyRequiredArgs); 1277 } 1278 1279 bool hasInallocaArg() const { return InallocaArgNo != InvalidIndex; } 1280 unsigned getInallocaArgNo() const { 1281 assert(hasInallocaArg()); 1282 return InallocaArgNo; 1283 } 1284 1285 bool hasSRetArg() const { return SRetArgNo != InvalidIndex; } 1286 unsigned getSRetArgNo() const { 1287 assert(hasSRetArg()); 1288 return SRetArgNo; 1289 } 1290 1291 unsigned totalIRArgs() const { return TotalIRArgs; } 1292 1293 bool hasPaddingArg(unsigned ArgNo) const { 1294 assert(ArgNo < ArgInfo.size()); 1295 return ArgInfo[ArgNo].PaddingArgIndex != InvalidIndex; 1296 } 1297 unsigned getPaddingArgNo(unsigned ArgNo) const { 1298 assert(hasPaddingArg(ArgNo)); 1299 return ArgInfo[ArgNo].PaddingArgIndex; 1300 } 1301 1302 /// Returns index of first IR argument corresponding to ArgNo, and their 1303 /// quantity. 1304 std::pair<unsigned, unsigned> getIRArgs(unsigned ArgNo) const { 1305 assert(ArgNo < ArgInfo.size()); 1306 return std::make_pair(ArgInfo[ArgNo].FirstArgIndex, 1307 ArgInfo[ArgNo].NumberOfArgs); 1308 } 1309 1310 private: 1311 void construct(const ASTContext &Context, const CGFunctionInfo &FI, 1312 bool OnlyRequiredArgs); 1313 }; 1314 1315 void ClangToLLVMArgMapping::construct(const ASTContext &Context, 1316 const CGFunctionInfo &FI, 1317 bool OnlyRequiredArgs) { 1318 unsigned IRArgNo = 0; 1319 bool SwapThisWithSRet = false; 1320 const ABIArgInfo &RetAI = FI.getReturnInfo(); 1321 1322 if (RetAI.getKind() == ABIArgInfo::Indirect) { 1323 SwapThisWithSRet = RetAI.isSRetAfterThis(); 1324 SRetArgNo = SwapThisWithSRet ? 1 : IRArgNo++; 1325 } 1326 1327 unsigned ArgNo = 0; 1328 unsigned NumArgs = OnlyRequiredArgs ? FI.getNumRequiredArgs() : FI.arg_size(); 1329 for (CGFunctionInfo::const_arg_iterator I = FI.arg_begin(); ArgNo < NumArgs; 1330 ++I, ++ArgNo) { 1331 assert(I != FI.arg_end()); 1332 QualType ArgType = I->type; 1333 const ABIArgInfo &AI = I->info; 1334 // Collect data about IR arguments corresponding to Clang argument ArgNo. 1335 auto &IRArgs = ArgInfo[ArgNo]; 1336 1337 if (AI.getPaddingType()) 1338 IRArgs.PaddingArgIndex = IRArgNo++; 1339 1340 switch (AI.getKind()) { 1341 case ABIArgInfo::Extend: 1342 case ABIArgInfo::Direct: { 1343 // FIXME: handle sseregparm someday... 1344 llvm::StructType *STy = dyn_cast<llvm::StructType>(AI.getCoerceToType()); 1345 if (AI.isDirect() && AI.getCanBeFlattened() && STy) { 1346 IRArgs.NumberOfArgs = STy->getNumElements(); 1347 } else { 1348 IRArgs.NumberOfArgs = 1; 1349 } 1350 break; 1351 } 1352 case ABIArgInfo::Indirect: 1353 IRArgs.NumberOfArgs = 1; 1354 break; 1355 case ABIArgInfo::Ignore: 1356 case ABIArgInfo::InAlloca: 1357 // ignore and inalloca doesn't have matching LLVM parameters. 1358 IRArgs.NumberOfArgs = 0; 1359 break; 1360 case ABIArgInfo::CoerceAndExpand: 1361 IRArgs.NumberOfArgs = AI.getCoerceAndExpandTypeSequence().size(); 1362 break; 1363 case ABIArgInfo::Expand: 1364 IRArgs.NumberOfArgs = getExpansionSize(ArgType, Context); 1365 break; 1366 } 1367 1368 if (IRArgs.NumberOfArgs > 0) { 1369 IRArgs.FirstArgIndex = IRArgNo; 1370 IRArgNo += IRArgs.NumberOfArgs; 1371 } 1372 1373 // Skip over the sret parameter when it comes second. We already handled it 1374 // above. 1375 if (IRArgNo == 1 && SwapThisWithSRet) 1376 IRArgNo++; 1377 } 1378 assert(ArgNo == ArgInfo.size()); 1379 1380 if (FI.usesInAlloca()) 1381 InallocaArgNo = IRArgNo++; 1382 1383 TotalIRArgs = IRArgNo; 1384 } 1385 } // namespace 1386 1387 /***/ 1388 1389 bool CodeGenModule::ReturnTypeUsesSRet(const CGFunctionInfo &FI) { 1390 return FI.getReturnInfo().isIndirect(); 1391 } 1392 1393 bool CodeGenModule::ReturnSlotInterferesWithArgs(const CGFunctionInfo &FI) { 1394 return ReturnTypeUsesSRet(FI) && 1395 getTargetCodeGenInfo().doesReturnSlotInterfereWithArgs(); 1396 } 1397 1398 bool CodeGenModule::ReturnTypeUsesFPRet(QualType ResultType) { 1399 if (const BuiltinType *BT = ResultType->getAs<BuiltinType>()) { 1400 switch (BT->getKind()) { 1401 default: 1402 return false; 1403 case BuiltinType::Float: 1404 return getTarget().useObjCFPRetForRealType(TargetInfo::Float); 1405 case BuiltinType::Double: 1406 return getTarget().useObjCFPRetForRealType(TargetInfo::Double); 1407 case BuiltinType::LongDouble: 1408 return getTarget().useObjCFPRetForRealType(TargetInfo::LongDouble); 1409 } 1410 } 1411 1412 return false; 1413 } 1414 1415 bool CodeGenModule::ReturnTypeUsesFP2Ret(QualType ResultType) { 1416 if (const ComplexType *CT = ResultType->getAs<ComplexType>()) { 1417 if (const BuiltinType *BT = CT->getElementType()->getAs<BuiltinType>()) { 1418 if (BT->getKind() == BuiltinType::LongDouble) 1419 return getTarget().useObjCFP2RetForComplexLongDouble(); 1420 } 1421 } 1422 1423 return false; 1424 } 1425 1426 llvm::FunctionType *CodeGenTypes::GetFunctionType(GlobalDecl GD) { 1427 const CGFunctionInfo &FI = arrangeGlobalDeclaration(GD); 1428 return GetFunctionType(FI); 1429 } 1430 1431 llvm::FunctionType * 1432 CodeGenTypes::GetFunctionType(const CGFunctionInfo &FI) { 1433 1434 bool Inserted = FunctionsBeingProcessed.insert(&FI).second; 1435 (void)Inserted; 1436 assert(Inserted && "Recursively being processed?"); 1437 1438 llvm::Type *resultType = nullptr; 1439 const ABIArgInfo &retAI = FI.getReturnInfo(); 1440 switch (retAI.getKind()) { 1441 case ABIArgInfo::Expand: 1442 llvm_unreachable("Invalid ABI kind for return argument"); 1443 1444 case ABIArgInfo::Extend: 1445 case ABIArgInfo::Direct: 1446 resultType = retAI.getCoerceToType(); 1447 break; 1448 1449 case ABIArgInfo::InAlloca: 1450 if (retAI.getInAllocaSRet()) { 1451 // sret things on win32 aren't void, they return the sret pointer. 1452 QualType ret = FI.getReturnType(); 1453 llvm::Type *ty = ConvertType(ret); 1454 unsigned addressSpace = Context.getTargetAddressSpace(ret); 1455 resultType = llvm::PointerType::get(ty, addressSpace); 1456 } else { 1457 resultType = llvm::Type::getVoidTy(getLLVMContext()); 1458 } 1459 break; 1460 1461 case ABIArgInfo::Indirect: 1462 case ABIArgInfo::Ignore: 1463 resultType = llvm::Type::getVoidTy(getLLVMContext()); 1464 break; 1465 1466 case ABIArgInfo::CoerceAndExpand: 1467 resultType = retAI.getUnpaddedCoerceAndExpandType(); 1468 break; 1469 } 1470 1471 ClangToLLVMArgMapping IRFunctionArgs(getContext(), FI, true); 1472 SmallVector<llvm::Type*, 8> ArgTypes(IRFunctionArgs.totalIRArgs()); 1473 1474 // Add type for sret argument. 1475 if (IRFunctionArgs.hasSRetArg()) { 1476 QualType Ret = FI.getReturnType(); 1477 llvm::Type *Ty = ConvertType(Ret); 1478 unsigned AddressSpace = Context.getTargetAddressSpace(Ret); 1479 ArgTypes[IRFunctionArgs.getSRetArgNo()] = 1480 llvm::PointerType::get(Ty, AddressSpace); 1481 } 1482 1483 // Add type for inalloca argument. 1484 if (IRFunctionArgs.hasInallocaArg()) { 1485 auto ArgStruct = FI.getArgStruct(); 1486 assert(ArgStruct); 1487 ArgTypes[IRFunctionArgs.getInallocaArgNo()] = ArgStruct->getPointerTo(); 1488 } 1489 1490 // Add in all of the required arguments. 1491 unsigned ArgNo = 0; 1492 CGFunctionInfo::const_arg_iterator it = FI.arg_begin(), 1493 ie = it + FI.getNumRequiredArgs(); 1494 for (; it != ie; ++it, ++ArgNo) { 1495 const ABIArgInfo &ArgInfo = it->info; 1496 1497 // Insert a padding type to ensure proper alignment. 1498 if (IRFunctionArgs.hasPaddingArg(ArgNo)) 1499 ArgTypes[IRFunctionArgs.getPaddingArgNo(ArgNo)] = 1500 ArgInfo.getPaddingType(); 1501 1502 unsigned FirstIRArg, NumIRArgs; 1503 std::tie(FirstIRArg, NumIRArgs) = IRFunctionArgs.getIRArgs(ArgNo); 1504 1505 switch (ArgInfo.getKind()) { 1506 case ABIArgInfo::Ignore: 1507 case ABIArgInfo::InAlloca: 1508 assert(NumIRArgs == 0); 1509 break; 1510 1511 case ABIArgInfo::Indirect: { 1512 assert(NumIRArgs == 1); 1513 // indirect arguments are always on the stack, which is addr space #0. 1514 llvm::Type *LTy = ConvertTypeForMem(it->type); 1515 ArgTypes[FirstIRArg] = LTy->getPointerTo(); 1516 break; 1517 } 1518 1519 case ABIArgInfo::Extend: 1520 case ABIArgInfo::Direct: { 1521 // Fast-isel and the optimizer generally like scalar values better than 1522 // FCAs, so we flatten them if this is safe to do for this argument. 1523 llvm::Type *argType = ArgInfo.getCoerceToType(); 1524 llvm::StructType *st = dyn_cast<llvm::StructType>(argType); 1525 if (st && ArgInfo.isDirect() && ArgInfo.getCanBeFlattened()) { 1526 assert(NumIRArgs == st->getNumElements()); 1527 for (unsigned i = 0, e = st->getNumElements(); i != e; ++i) 1528 ArgTypes[FirstIRArg + i] = st->getElementType(i); 1529 } else { 1530 assert(NumIRArgs == 1); 1531 ArgTypes[FirstIRArg] = argType; 1532 } 1533 break; 1534 } 1535 1536 case ABIArgInfo::CoerceAndExpand: { 1537 auto ArgTypesIter = ArgTypes.begin() + FirstIRArg; 1538 for (auto EltTy : ArgInfo.getCoerceAndExpandTypeSequence()) { 1539 *ArgTypesIter++ = EltTy; 1540 } 1541 assert(ArgTypesIter == ArgTypes.begin() + FirstIRArg + NumIRArgs); 1542 break; 1543 } 1544 1545 case ABIArgInfo::Expand: 1546 auto ArgTypesIter = ArgTypes.begin() + FirstIRArg; 1547 getExpandedTypes(it->type, ArgTypesIter); 1548 assert(ArgTypesIter == ArgTypes.begin() + FirstIRArg + NumIRArgs); 1549 break; 1550 } 1551 } 1552 1553 bool Erased = FunctionsBeingProcessed.erase(&FI); (void)Erased; 1554 assert(Erased && "Not in set?"); 1555 1556 return llvm::FunctionType::get(resultType, ArgTypes, FI.isVariadic()); 1557 } 1558 1559 llvm::Type *CodeGenTypes::GetFunctionTypeForVTable(GlobalDecl GD) { 1560 const CXXMethodDecl *MD = cast<CXXMethodDecl>(GD.getDecl()); 1561 const FunctionProtoType *FPT = MD->getType()->getAs<FunctionProtoType>(); 1562 1563 if (!isFuncTypeConvertible(FPT)) 1564 return llvm::StructType::get(getLLVMContext()); 1565 1566 const CGFunctionInfo *Info; 1567 if (isa<CXXDestructorDecl>(MD)) 1568 Info = 1569 &arrangeCXXStructorDeclaration(MD, getFromDtorType(GD.getDtorType())); 1570 else 1571 Info = &arrangeCXXMethodDeclaration(MD); 1572 return GetFunctionType(*Info); 1573 } 1574 1575 static void AddAttributesFromFunctionProtoType(ASTContext &Ctx, 1576 llvm::AttrBuilder &FuncAttrs, 1577 const FunctionProtoType *FPT) { 1578 if (!FPT) 1579 return; 1580 1581 if (!isUnresolvedExceptionSpec(FPT->getExceptionSpecType()) && 1582 FPT->isNothrow(Ctx)) 1583 FuncAttrs.addAttribute(llvm::Attribute::NoUnwind); 1584 } 1585 1586 void CodeGenModule::ConstructAttributeList( 1587 StringRef Name, const CGFunctionInfo &FI, CGCalleeInfo CalleeInfo, 1588 AttributeListType &PAL, unsigned &CallingConv, bool AttrOnCallSite) { 1589 llvm::AttrBuilder FuncAttrs; 1590 llvm::AttrBuilder RetAttrs; 1591 bool HasOptnone = false; 1592 1593 CallingConv = FI.getEffectiveCallingConvention(); 1594 1595 if (FI.isNoReturn()) 1596 FuncAttrs.addAttribute(llvm::Attribute::NoReturn); 1597 1598 // If we have information about the function prototype, we can learn 1599 // attributes form there. 1600 AddAttributesFromFunctionProtoType(getContext(), FuncAttrs, 1601 CalleeInfo.getCalleeFunctionProtoType()); 1602 1603 const Decl *TargetDecl = CalleeInfo.getCalleeDecl(); 1604 1605 bool HasAnyX86InterruptAttr = false; 1606 // FIXME: handle sseregparm someday... 1607 if (TargetDecl) { 1608 if (TargetDecl->hasAttr<ReturnsTwiceAttr>()) 1609 FuncAttrs.addAttribute(llvm::Attribute::ReturnsTwice); 1610 if (TargetDecl->hasAttr<NoThrowAttr>()) 1611 FuncAttrs.addAttribute(llvm::Attribute::NoUnwind); 1612 if (TargetDecl->hasAttr<NoReturnAttr>()) 1613 FuncAttrs.addAttribute(llvm::Attribute::NoReturn); 1614 if (TargetDecl->hasAttr<NoDuplicateAttr>()) 1615 FuncAttrs.addAttribute(llvm::Attribute::NoDuplicate); 1616 1617 if (const FunctionDecl *Fn = dyn_cast<FunctionDecl>(TargetDecl)) { 1618 AddAttributesFromFunctionProtoType( 1619 getContext(), FuncAttrs, Fn->getType()->getAs<FunctionProtoType>()); 1620 // Don't use [[noreturn]] or _Noreturn for a call to a virtual function. 1621 // These attributes are not inherited by overloads. 1622 const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(Fn); 1623 if (Fn->isNoReturn() && !(AttrOnCallSite && MD && MD->isVirtual())) 1624 FuncAttrs.addAttribute(llvm::Attribute::NoReturn); 1625 } 1626 1627 // 'const', 'pure' and 'noalias' attributed functions are also nounwind. 1628 if (TargetDecl->hasAttr<ConstAttr>()) { 1629 FuncAttrs.addAttribute(llvm::Attribute::ReadNone); 1630 FuncAttrs.addAttribute(llvm::Attribute::NoUnwind); 1631 } else if (TargetDecl->hasAttr<PureAttr>()) { 1632 FuncAttrs.addAttribute(llvm::Attribute::ReadOnly); 1633 FuncAttrs.addAttribute(llvm::Attribute::NoUnwind); 1634 } else if (TargetDecl->hasAttr<NoAliasAttr>()) { 1635 FuncAttrs.addAttribute(llvm::Attribute::ArgMemOnly); 1636 FuncAttrs.addAttribute(llvm::Attribute::NoUnwind); 1637 } 1638 if (TargetDecl->hasAttr<RestrictAttr>()) 1639 RetAttrs.addAttribute(llvm::Attribute::NoAlias); 1640 if (TargetDecl->hasAttr<ReturnsNonNullAttr>()) 1641 RetAttrs.addAttribute(llvm::Attribute::NonNull); 1642 1643 HasAnyX86InterruptAttr = TargetDecl->hasAttr<AnyX86InterruptAttr>(); 1644 HasOptnone = TargetDecl->hasAttr<OptimizeNoneAttr>(); 1645 } 1646 1647 // OptimizeNoneAttr takes precedence over -Os or -Oz. No warning needed. 1648 if (!HasOptnone) { 1649 if (CodeGenOpts.OptimizeSize) 1650 FuncAttrs.addAttribute(llvm::Attribute::OptimizeForSize); 1651 if (CodeGenOpts.OptimizeSize == 2) 1652 FuncAttrs.addAttribute(llvm::Attribute::MinSize); 1653 } 1654 1655 if (CodeGenOpts.DisableRedZone) 1656 FuncAttrs.addAttribute(llvm::Attribute::NoRedZone); 1657 if (CodeGenOpts.NoImplicitFloat) 1658 FuncAttrs.addAttribute(llvm::Attribute::NoImplicitFloat); 1659 if (CodeGenOpts.EnableSegmentedStacks && 1660 !(TargetDecl && TargetDecl->hasAttr<NoSplitStackAttr>())) 1661 FuncAttrs.addAttribute("split-stack"); 1662 1663 if (AttrOnCallSite) { 1664 // Attributes that should go on the call site only. 1665 if (!CodeGenOpts.SimplifyLibCalls || 1666 CodeGenOpts.isNoBuiltinFunc(Name.data())) 1667 FuncAttrs.addAttribute(llvm::Attribute::NoBuiltin); 1668 if (!CodeGenOpts.TrapFuncName.empty()) 1669 FuncAttrs.addAttribute("trap-func-name", CodeGenOpts.TrapFuncName); 1670 } else { 1671 // Attributes that should go on the function, but not the call site. 1672 if (!CodeGenOpts.DisableFPElim) { 1673 FuncAttrs.addAttribute("no-frame-pointer-elim", "false"); 1674 } else if (CodeGenOpts.OmitLeafFramePointer) { 1675 FuncAttrs.addAttribute("no-frame-pointer-elim", "false"); 1676 FuncAttrs.addAttribute("no-frame-pointer-elim-non-leaf"); 1677 } else { 1678 FuncAttrs.addAttribute("no-frame-pointer-elim", "true"); 1679 FuncAttrs.addAttribute("no-frame-pointer-elim-non-leaf"); 1680 } 1681 1682 bool DisableTailCalls = 1683 CodeGenOpts.DisableTailCalls || HasAnyX86InterruptAttr || 1684 (TargetDecl && TargetDecl->hasAttr<DisableTailCallsAttr>()); 1685 FuncAttrs.addAttribute( 1686 "disable-tail-calls", 1687 llvm::toStringRef(DisableTailCalls)); 1688 1689 FuncAttrs.addAttribute("less-precise-fpmad", 1690 llvm::toStringRef(CodeGenOpts.LessPreciseFPMAD)); 1691 FuncAttrs.addAttribute("no-infs-fp-math", 1692 llvm::toStringRef(CodeGenOpts.NoInfsFPMath)); 1693 FuncAttrs.addAttribute("no-nans-fp-math", 1694 llvm::toStringRef(CodeGenOpts.NoNaNsFPMath)); 1695 FuncAttrs.addAttribute("unsafe-fp-math", 1696 llvm::toStringRef(CodeGenOpts.UnsafeFPMath)); 1697 FuncAttrs.addAttribute("use-soft-float", 1698 llvm::toStringRef(CodeGenOpts.SoftFloat)); 1699 FuncAttrs.addAttribute("stack-protector-buffer-size", 1700 llvm::utostr(CodeGenOpts.SSPBufferSize)); 1701 1702 if (CodeGenOpts.StackRealignment) 1703 FuncAttrs.addAttribute("stackrealign"); 1704 1705 // Add target-cpu and target-features attributes to functions. If 1706 // we have a decl for the function and it has a target attribute then 1707 // parse that and add it to the feature set. 1708 StringRef TargetCPU = getTarget().getTargetOpts().CPU; 1709 const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(TargetDecl); 1710 if (FD && FD->hasAttr<TargetAttr>()) { 1711 llvm::StringMap<bool> FeatureMap; 1712 getFunctionFeatureMap(FeatureMap, FD); 1713 1714 // Produce the canonical string for this set of features. 1715 std::vector<std::string> Features; 1716 for (llvm::StringMap<bool>::const_iterator it = FeatureMap.begin(), 1717 ie = FeatureMap.end(); 1718 it != ie; ++it) 1719 Features.push_back((it->second ? "+" : "-") + it->first().str()); 1720 1721 // Now add the target-cpu and target-features to the function. 1722 // While we populated the feature map above, we still need to 1723 // get and parse the target attribute so we can get the cpu for 1724 // the function. 1725 const auto *TD = FD->getAttr<TargetAttr>(); 1726 TargetAttr::ParsedTargetAttr ParsedAttr = TD->parse(); 1727 if (ParsedAttr.second != "") 1728 TargetCPU = ParsedAttr.second; 1729 if (TargetCPU != "") 1730 FuncAttrs.addAttribute("target-cpu", TargetCPU); 1731 if (!Features.empty()) { 1732 std::sort(Features.begin(), Features.end()); 1733 FuncAttrs.addAttribute( 1734 "target-features", 1735 llvm::join(Features.begin(), Features.end(), ",")); 1736 } 1737 } else { 1738 // Otherwise just add the existing target cpu and target features to the 1739 // function. 1740 std::vector<std::string> &Features = getTarget().getTargetOpts().Features; 1741 if (TargetCPU != "") 1742 FuncAttrs.addAttribute("target-cpu", TargetCPU); 1743 if (!Features.empty()) { 1744 std::sort(Features.begin(), Features.end()); 1745 FuncAttrs.addAttribute( 1746 "target-features", 1747 llvm::join(Features.begin(), Features.end(), ",")); 1748 } 1749 } 1750 } 1751 1752 if (getLangOpts().CUDA && getLangOpts().CUDAIsDevice) { 1753 // Conservatively, mark all functions and calls in CUDA as convergent 1754 // (meaning, they may call an intrinsically convergent op, such as 1755 // __syncthreads(), and so can't have certain optimizations applied around 1756 // them). LLVM will remove this attribute where it safely can. 1757 FuncAttrs.addAttribute(llvm::Attribute::Convergent); 1758 } 1759 1760 ClangToLLVMArgMapping IRFunctionArgs(getContext(), FI); 1761 1762 QualType RetTy = FI.getReturnType(); 1763 const ABIArgInfo &RetAI = FI.getReturnInfo(); 1764 switch (RetAI.getKind()) { 1765 case ABIArgInfo::Extend: 1766 if (RetTy->hasSignedIntegerRepresentation()) 1767 RetAttrs.addAttribute(llvm::Attribute::SExt); 1768 else if (RetTy->hasUnsignedIntegerRepresentation()) 1769 RetAttrs.addAttribute(llvm::Attribute::ZExt); 1770 // FALL THROUGH 1771 case ABIArgInfo::Direct: 1772 if (RetAI.getInReg()) 1773 RetAttrs.addAttribute(llvm::Attribute::InReg); 1774 break; 1775 case ABIArgInfo::Ignore: 1776 break; 1777 1778 case ABIArgInfo::InAlloca: 1779 case ABIArgInfo::Indirect: { 1780 // inalloca and sret disable readnone and readonly 1781 FuncAttrs.removeAttribute(llvm::Attribute::ReadOnly) 1782 .removeAttribute(llvm::Attribute::ReadNone); 1783 break; 1784 } 1785 1786 case ABIArgInfo::CoerceAndExpand: 1787 break; 1788 1789 case ABIArgInfo::Expand: 1790 llvm_unreachable("Invalid ABI kind for return argument"); 1791 } 1792 1793 if (const auto *RefTy = RetTy->getAs<ReferenceType>()) { 1794 QualType PTy = RefTy->getPointeeType(); 1795 if (!PTy->isIncompleteType() && PTy->isConstantSizeType()) 1796 RetAttrs.addDereferenceableAttr(getContext().getTypeSizeInChars(PTy) 1797 .getQuantity()); 1798 else if (getContext().getTargetAddressSpace(PTy) == 0) 1799 RetAttrs.addAttribute(llvm::Attribute::NonNull); 1800 } 1801 1802 // Attach return attributes. 1803 if (RetAttrs.hasAttributes()) { 1804 PAL.push_back(llvm::AttributeSet::get( 1805 getLLVMContext(), llvm::AttributeSet::ReturnIndex, RetAttrs)); 1806 } 1807 1808 // Attach attributes to sret. 1809 if (IRFunctionArgs.hasSRetArg()) { 1810 llvm::AttrBuilder SRETAttrs; 1811 SRETAttrs.addAttribute(llvm::Attribute::StructRet); 1812 if (RetAI.getInReg()) 1813 SRETAttrs.addAttribute(llvm::Attribute::InReg); 1814 PAL.push_back(llvm::AttributeSet::get( 1815 getLLVMContext(), IRFunctionArgs.getSRetArgNo() + 1, SRETAttrs)); 1816 } 1817 1818 // Attach attributes to inalloca argument. 1819 if (IRFunctionArgs.hasInallocaArg()) { 1820 llvm::AttrBuilder Attrs; 1821 Attrs.addAttribute(llvm::Attribute::InAlloca); 1822 PAL.push_back(llvm::AttributeSet::get( 1823 getLLVMContext(), IRFunctionArgs.getInallocaArgNo() + 1, Attrs)); 1824 } 1825 1826 unsigned ArgNo = 0; 1827 for (CGFunctionInfo::const_arg_iterator I = FI.arg_begin(), 1828 E = FI.arg_end(); 1829 I != E; ++I, ++ArgNo) { 1830 QualType ParamType = I->type; 1831 const ABIArgInfo &AI = I->info; 1832 llvm::AttrBuilder Attrs; 1833 1834 // Add attribute for padding argument, if necessary. 1835 if (IRFunctionArgs.hasPaddingArg(ArgNo)) { 1836 if (AI.getPaddingInReg()) 1837 PAL.push_back(llvm::AttributeSet::get( 1838 getLLVMContext(), IRFunctionArgs.getPaddingArgNo(ArgNo) + 1, 1839 llvm::Attribute::InReg)); 1840 } 1841 1842 // 'restrict' -> 'noalias' is done in EmitFunctionProlog when we 1843 // have the corresponding parameter variable. It doesn't make 1844 // sense to do it here because parameters are so messed up. 1845 switch (AI.getKind()) { 1846 case ABIArgInfo::Extend: 1847 if (ParamType->isSignedIntegerOrEnumerationType()) 1848 Attrs.addAttribute(llvm::Attribute::SExt); 1849 else if (ParamType->isUnsignedIntegerOrEnumerationType()) { 1850 if (getTypes().getABIInfo().shouldSignExtUnsignedType(ParamType)) 1851 Attrs.addAttribute(llvm::Attribute::SExt); 1852 else 1853 Attrs.addAttribute(llvm::Attribute::ZExt); 1854 } 1855 // FALL THROUGH 1856 case ABIArgInfo::Direct: 1857 if (ArgNo == 0 && FI.isChainCall()) 1858 Attrs.addAttribute(llvm::Attribute::Nest); 1859 else if (AI.getInReg()) 1860 Attrs.addAttribute(llvm::Attribute::InReg); 1861 break; 1862 1863 case ABIArgInfo::Indirect: { 1864 if (AI.getInReg()) 1865 Attrs.addAttribute(llvm::Attribute::InReg); 1866 1867 if (AI.getIndirectByVal()) 1868 Attrs.addAttribute(llvm::Attribute::ByVal); 1869 1870 CharUnits Align = AI.getIndirectAlign(); 1871 1872 // In a byval argument, it is important that the required 1873 // alignment of the type is honored, as LLVM might be creating a 1874 // *new* stack object, and needs to know what alignment to give 1875 // it. (Sometimes it can deduce a sensible alignment on its own, 1876 // but not if clang decides it must emit a packed struct, or the 1877 // user specifies increased alignment requirements.) 1878 // 1879 // This is different from indirect *not* byval, where the object 1880 // exists already, and the align attribute is purely 1881 // informative. 1882 assert(!Align.isZero()); 1883 1884 // For now, only add this when we have a byval argument. 1885 // TODO: be less lazy about updating test cases. 1886 if (AI.getIndirectByVal()) 1887 Attrs.addAlignmentAttr(Align.getQuantity()); 1888 1889 // byval disables readnone and readonly. 1890 FuncAttrs.removeAttribute(llvm::Attribute::ReadOnly) 1891 .removeAttribute(llvm::Attribute::ReadNone); 1892 break; 1893 } 1894 case ABIArgInfo::Ignore: 1895 case ABIArgInfo::Expand: 1896 case ABIArgInfo::CoerceAndExpand: 1897 break; 1898 1899 case ABIArgInfo::InAlloca: 1900 // inalloca disables readnone and readonly. 1901 FuncAttrs.removeAttribute(llvm::Attribute::ReadOnly) 1902 .removeAttribute(llvm::Attribute::ReadNone); 1903 continue; 1904 } 1905 1906 if (const auto *RefTy = ParamType->getAs<ReferenceType>()) { 1907 QualType PTy = RefTy->getPointeeType(); 1908 if (!PTy->isIncompleteType() && PTy->isConstantSizeType()) 1909 Attrs.addDereferenceableAttr(getContext().getTypeSizeInChars(PTy) 1910 .getQuantity()); 1911 else if (getContext().getTargetAddressSpace(PTy) == 0) 1912 Attrs.addAttribute(llvm::Attribute::NonNull); 1913 } 1914 1915 if (Attrs.hasAttributes()) { 1916 unsigned FirstIRArg, NumIRArgs; 1917 std::tie(FirstIRArg, NumIRArgs) = IRFunctionArgs.getIRArgs(ArgNo); 1918 for (unsigned i = 0; i < NumIRArgs; i++) 1919 PAL.push_back(llvm::AttributeSet::get(getLLVMContext(), 1920 FirstIRArg + i + 1, Attrs)); 1921 } 1922 } 1923 assert(ArgNo == FI.arg_size()); 1924 1925 if (FuncAttrs.hasAttributes()) 1926 PAL.push_back(llvm:: 1927 AttributeSet::get(getLLVMContext(), 1928 llvm::AttributeSet::FunctionIndex, 1929 FuncAttrs)); 1930 } 1931 1932 /// An argument came in as a promoted argument; demote it back to its 1933 /// declared type. 1934 static llvm::Value *emitArgumentDemotion(CodeGenFunction &CGF, 1935 const VarDecl *var, 1936 llvm::Value *value) { 1937 llvm::Type *varType = CGF.ConvertType(var->getType()); 1938 1939 // This can happen with promotions that actually don't change the 1940 // underlying type, like the enum promotions. 1941 if (value->getType() == varType) return value; 1942 1943 assert((varType->isIntegerTy() || varType->isFloatingPointTy()) 1944 && "unexpected promotion type"); 1945 1946 if (isa<llvm::IntegerType>(varType)) 1947 return CGF.Builder.CreateTrunc(value, varType, "arg.unpromote"); 1948 1949 return CGF.Builder.CreateFPCast(value, varType, "arg.unpromote"); 1950 } 1951 1952 /// Returns the attribute (either parameter attribute, or function 1953 /// attribute), which declares argument ArgNo to be non-null. 1954 static const NonNullAttr *getNonNullAttr(const Decl *FD, const ParmVarDecl *PVD, 1955 QualType ArgType, unsigned ArgNo) { 1956 // FIXME: __attribute__((nonnull)) can also be applied to: 1957 // - references to pointers, where the pointee is known to be 1958 // nonnull (apparently a Clang extension) 1959 // - transparent unions containing pointers 1960 // In the former case, LLVM IR cannot represent the constraint. In 1961 // the latter case, we have no guarantee that the transparent union 1962 // is in fact passed as a pointer. 1963 if (!ArgType->isAnyPointerType() && !ArgType->isBlockPointerType()) 1964 return nullptr; 1965 // First, check attribute on parameter itself. 1966 if (PVD) { 1967 if (auto ParmNNAttr = PVD->getAttr<NonNullAttr>()) 1968 return ParmNNAttr; 1969 } 1970 // Check function attributes. 1971 if (!FD) 1972 return nullptr; 1973 for (const auto *NNAttr : FD->specific_attrs<NonNullAttr>()) { 1974 if (NNAttr->isNonNull(ArgNo)) 1975 return NNAttr; 1976 } 1977 return nullptr; 1978 } 1979 1980 void CodeGenFunction::EmitFunctionProlog(const CGFunctionInfo &FI, 1981 llvm::Function *Fn, 1982 const FunctionArgList &Args) { 1983 if (CurCodeDecl && CurCodeDecl->hasAttr<NakedAttr>()) 1984 // Naked functions don't have prologues. 1985 return; 1986 1987 // If this is an implicit-return-zero function, go ahead and 1988 // initialize the return value. TODO: it might be nice to have 1989 // a more general mechanism for this that didn't require synthesized 1990 // return statements. 1991 if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(CurCodeDecl)) { 1992 if (FD->hasImplicitReturnZero()) { 1993 QualType RetTy = FD->getReturnType().getUnqualifiedType(); 1994 llvm::Type* LLVMTy = CGM.getTypes().ConvertType(RetTy); 1995 llvm::Constant* Zero = llvm::Constant::getNullValue(LLVMTy); 1996 Builder.CreateStore(Zero, ReturnValue); 1997 } 1998 } 1999 2000 // FIXME: We no longer need the types from FunctionArgList; lift up and 2001 // simplify. 2002 2003 ClangToLLVMArgMapping IRFunctionArgs(CGM.getContext(), FI); 2004 // Flattened function arguments. 2005 SmallVector<llvm::Argument *, 16> FnArgs; 2006 FnArgs.reserve(IRFunctionArgs.totalIRArgs()); 2007 for (auto &Arg : Fn->args()) { 2008 FnArgs.push_back(&Arg); 2009 } 2010 assert(FnArgs.size() == IRFunctionArgs.totalIRArgs()); 2011 2012 // If we're using inalloca, all the memory arguments are GEPs off of the last 2013 // parameter, which is a pointer to the complete memory area. 2014 Address ArgStruct = Address::invalid(); 2015 const llvm::StructLayout *ArgStructLayout = nullptr; 2016 if (IRFunctionArgs.hasInallocaArg()) { 2017 ArgStructLayout = CGM.getDataLayout().getStructLayout(FI.getArgStruct()); 2018 ArgStruct = Address(FnArgs[IRFunctionArgs.getInallocaArgNo()], 2019 FI.getArgStructAlignment()); 2020 2021 assert(ArgStruct.getType() == FI.getArgStruct()->getPointerTo()); 2022 } 2023 2024 // Name the struct return parameter. 2025 if (IRFunctionArgs.hasSRetArg()) { 2026 auto AI = FnArgs[IRFunctionArgs.getSRetArgNo()]; 2027 AI->setName("agg.result"); 2028 AI->addAttr(llvm::AttributeSet::get(getLLVMContext(), AI->getArgNo() + 1, 2029 llvm::Attribute::NoAlias)); 2030 } 2031 2032 // Track if we received the parameter as a pointer (indirect, byval, or 2033 // inalloca). If already have a pointer, EmitParmDecl doesn't need to copy it 2034 // into a local alloca for us. 2035 SmallVector<ParamValue, 16> ArgVals; 2036 ArgVals.reserve(Args.size()); 2037 2038 // Create a pointer value for every parameter declaration. This usually 2039 // entails copying one or more LLVM IR arguments into an alloca. Don't push 2040 // any cleanups or do anything that might unwind. We do that separately, so 2041 // we can push the cleanups in the correct order for the ABI. 2042 assert(FI.arg_size() == Args.size() && 2043 "Mismatch between function signature & arguments."); 2044 unsigned ArgNo = 0; 2045 CGFunctionInfo::const_arg_iterator info_it = FI.arg_begin(); 2046 for (FunctionArgList::const_iterator i = Args.begin(), e = Args.end(); 2047 i != e; ++i, ++info_it, ++ArgNo) { 2048 const VarDecl *Arg = *i; 2049 QualType Ty = info_it->type; 2050 const ABIArgInfo &ArgI = info_it->info; 2051 2052 bool isPromoted = 2053 isa<ParmVarDecl>(Arg) && cast<ParmVarDecl>(Arg)->isKNRPromoted(); 2054 2055 unsigned FirstIRArg, NumIRArgs; 2056 std::tie(FirstIRArg, NumIRArgs) = IRFunctionArgs.getIRArgs(ArgNo); 2057 2058 switch (ArgI.getKind()) { 2059 case ABIArgInfo::InAlloca: { 2060 assert(NumIRArgs == 0); 2061 auto FieldIndex = ArgI.getInAllocaFieldIndex(); 2062 CharUnits FieldOffset = 2063 CharUnits::fromQuantity(ArgStructLayout->getElementOffset(FieldIndex)); 2064 Address V = Builder.CreateStructGEP(ArgStruct, FieldIndex, FieldOffset, 2065 Arg->getName()); 2066 ArgVals.push_back(ParamValue::forIndirect(V)); 2067 break; 2068 } 2069 2070 case ABIArgInfo::Indirect: { 2071 assert(NumIRArgs == 1); 2072 Address ParamAddr = Address(FnArgs[FirstIRArg], ArgI.getIndirectAlign()); 2073 2074 if (!hasScalarEvaluationKind(Ty)) { 2075 // Aggregates and complex variables are accessed by reference. All we 2076 // need to do is realign the value, if requested. 2077 Address V = ParamAddr; 2078 if (ArgI.getIndirectRealign()) { 2079 Address AlignedTemp = CreateMemTemp(Ty, "coerce"); 2080 2081 // Copy from the incoming argument pointer to the temporary with the 2082 // appropriate alignment. 2083 // 2084 // FIXME: We should have a common utility for generating an aggregate 2085 // copy. 2086 CharUnits Size = getContext().getTypeSizeInChars(Ty); 2087 auto SizeVal = llvm::ConstantInt::get(IntPtrTy, Size.getQuantity()); 2088 Address Dst = Builder.CreateBitCast(AlignedTemp, Int8PtrTy); 2089 Address Src = Builder.CreateBitCast(ParamAddr, Int8PtrTy); 2090 Builder.CreateMemCpy(Dst, Src, SizeVal, false); 2091 V = AlignedTemp; 2092 } 2093 ArgVals.push_back(ParamValue::forIndirect(V)); 2094 } else { 2095 // Load scalar value from indirect argument. 2096 llvm::Value *V = 2097 EmitLoadOfScalar(ParamAddr, false, Ty, Arg->getLocStart()); 2098 2099 if (isPromoted) 2100 V = emitArgumentDemotion(*this, Arg, V); 2101 ArgVals.push_back(ParamValue::forDirect(V)); 2102 } 2103 break; 2104 } 2105 2106 case ABIArgInfo::Extend: 2107 case ABIArgInfo::Direct: { 2108 2109 // If we have the trivial case, handle it with no muss and fuss. 2110 if (!isa<llvm::StructType>(ArgI.getCoerceToType()) && 2111 ArgI.getCoerceToType() == ConvertType(Ty) && 2112 ArgI.getDirectOffset() == 0) { 2113 assert(NumIRArgs == 1); 2114 auto AI = FnArgs[FirstIRArg]; 2115 llvm::Value *V = AI; 2116 2117 if (const ParmVarDecl *PVD = dyn_cast<ParmVarDecl>(Arg)) { 2118 if (getNonNullAttr(CurCodeDecl, PVD, PVD->getType(), 2119 PVD->getFunctionScopeIndex())) 2120 AI->addAttr(llvm::AttributeSet::get(getLLVMContext(), 2121 AI->getArgNo() + 1, 2122 llvm::Attribute::NonNull)); 2123 2124 QualType OTy = PVD->getOriginalType(); 2125 if (const auto *ArrTy = 2126 getContext().getAsConstantArrayType(OTy)) { 2127 // A C99 array parameter declaration with the static keyword also 2128 // indicates dereferenceability, and if the size is constant we can 2129 // use the dereferenceable attribute (which requires the size in 2130 // bytes). 2131 if (ArrTy->getSizeModifier() == ArrayType::Static) { 2132 QualType ETy = ArrTy->getElementType(); 2133 uint64_t ArrSize = ArrTy->getSize().getZExtValue(); 2134 if (!ETy->isIncompleteType() && ETy->isConstantSizeType() && 2135 ArrSize) { 2136 llvm::AttrBuilder Attrs; 2137 Attrs.addDereferenceableAttr( 2138 getContext().getTypeSizeInChars(ETy).getQuantity()*ArrSize); 2139 AI->addAttr(llvm::AttributeSet::get(getLLVMContext(), 2140 AI->getArgNo() + 1, Attrs)); 2141 } else if (getContext().getTargetAddressSpace(ETy) == 0) { 2142 AI->addAttr(llvm::AttributeSet::get(getLLVMContext(), 2143 AI->getArgNo() + 1, 2144 llvm::Attribute::NonNull)); 2145 } 2146 } 2147 } else if (const auto *ArrTy = 2148 getContext().getAsVariableArrayType(OTy)) { 2149 // For C99 VLAs with the static keyword, we don't know the size so 2150 // we can't use the dereferenceable attribute, but in addrspace(0) 2151 // we know that it must be nonnull. 2152 if (ArrTy->getSizeModifier() == VariableArrayType::Static && 2153 !getContext().getTargetAddressSpace(ArrTy->getElementType())) 2154 AI->addAttr(llvm::AttributeSet::get(getLLVMContext(), 2155 AI->getArgNo() + 1, 2156 llvm::Attribute::NonNull)); 2157 } 2158 2159 const auto *AVAttr = PVD->getAttr<AlignValueAttr>(); 2160 if (!AVAttr) 2161 if (const auto *TOTy = dyn_cast<TypedefType>(OTy)) 2162 AVAttr = TOTy->getDecl()->getAttr<AlignValueAttr>(); 2163 if (AVAttr) { 2164 llvm::Value *AlignmentValue = 2165 EmitScalarExpr(AVAttr->getAlignment()); 2166 llvm::ConstantInt *AlignmentCI = 2167 cast<llvm::ConstantInt>(AlignmentValue); 2168 unsigned Alignment = 2169 std::min((unsigned) AlignmentCI->getZExtValue(), 2170 +llvm::Value::MaximumAlignment); 2171 2172 llvm::AttrBuilder Attrs; 2173 Attrs.addAlignmentAttr(Alignment); 2174 AI->addAttr(llvm::AttributeSet::get(getLLVMContext(), 2175 AI->getArgNo() + 1, Attrs)); 2176 } 2177 } 2178 2179 if (Arg->getType().isRestrictQualified()) 2180 AI->addAttr(llvm::AttributeSet::get(getLLVMContext(), 2181 AI->getArgNo() + 1, 2182 llvm::Attribute::NoAlias)); 2183 2184 // Ensure the argument is the correct type. 2185 if (V->getType() != ArgI.getCoerceToType()) 2186 V = Builder.CreateBitCast(V, ArgI.getCoerceToType()); 2187 2188 if (isPromoted) 2189 V = emitArgumentDemotion(*this, Arg, V); 2190 2191 if (const CXXMethodDecl *MD = 2192 dyn_cast_or_null<CXXMethodDecl>(CurCodeDecl)) { 2193 if (MD->isVirtual() && Arg == CXXABIThisDecl) 2194 V = CGM.getCXXABI(). 2195 adjustThisParameterInVirtualFunctionPrologue(*this, CurGD, V); 2196 } 2197 2198 // Because of merging of function types from multiple decls it is 2199 // possible for the type of an argument to not match the corresponding 2200 // type in the function type. Since we are codegening the callee 2201 // in here, add a cast to the argument type. 2202 llvm::Type *LTy = ConvertType(Arg->getType()); 2203 if (V->getType() != LTy) 2204 V = Builder.CreateBitCast(V, LTy); 2205 2206 ArgVals.push_back(ParamValue::forDirect(V)); 2207 break; 2208 } 2209 2210 Address Alloca = CreateMemTemp(Ty, getContext().getDeclAlign(Arg), 2211 Arg->getName()); 2212 2213 // Pointer to store into. 2214 Address Ptr = emitAddressAtOffset(*this, Alloca, ArgI); 2215 2216 // Fast-isel and the optimizer generally like scalar values better than 2217 // FCAs, so we flatten them if this is safe to do for this argument. 2218 llvm::StructType *STy = dyn_cast<llvm::StructType>(ArgI.getCoerceToType()); 2219 if (ArgI.isDirect() && ArgI.getCanBeFlattened() && STy && 2220 STy->getNumElements() > 1) { 2221 auto SrcLayout = CGM.getDataLayout().getStructLayout(STy); 2222 uint64_t SrcSize = CGM.getDataLayout().getTypeAllocSize(STy); 2223 llvm::Type *DstTy = Ptr.getElementType(); 2224 uint64_t DstSize = CGM.getDataLayout().getTypeAllocSize(DstTy); 2225 2226 Address AddrToStoreInto = Address::invalid(); 2227 if (SrcSize <= DstSize) { 2228 AddrToStoreInto = 2229 Builder.CreateBitCast(Ptr, llvm::PointerType::getUnqual(STy)); 2230 } else { 2231 AddrToStoreInto = 2232 CreateTempAlloca(STy, Alloca.getAlignment(), "coerce"); 2233 } 2234 2235 assert(STy->getNumElements() == NumIRArgs); 2236 for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) { 2237 auto AI = FnArgs[FirstIRArg + i]; 2238 AI->setName(Arg->getName() + ".coerce" + Twine(i)); 2239 auto Offset = CharUnits::fromQuantity(SrcLayout->getElementOffset(i)); 2240 Address EltPtr = 2241 Builder.CreateStructGEP(AddrToStoreInto, i, Offset); 2242 Builder.CreateStore(AI, EltPtr); 2243 } 2244 2245 if (SrcSize > DstSize) { 2246 Builder.CreateMemCpy(Ptr, AddrToStoreInto, DstSize); 2247 } 2248 2249 } else { 2250 // Simple case, just do a coerced store of the argument into the alloca. 2251 assert(NumIRArgs == 1); 2252 auto AI = FnArgs[FirstIRArg]; 2253 AI->setName(Arg->getName() + ".coerce"); 2254 CreateCoercedStore(AI, Ptr, /*DestIsVolatile=*/false, *this); 2255 } 2256 2257 // Match to what EmitParmDecl is expecting for this type. 2258 if (CodeGenFunction::hasScalarEvaluationKind(Ty)) { 2259 llvm::Value *V = 2260 EmitLoadOfScalar(Alloca, false, Ty, Arg->getLocStart()); 2261 if (isPromoted) 2262 V = emitArgumentDemotion(*this, Arg, V); 2263 ArgVals.push_back(ParamValue::forDirect(V)); 2264 } else { 2265 ArgVals.push_back(ParamValue::forIndirect(Alloca)); 2266 } 2267 break; 2268 } 2269 2270 case ABIArgInfo::CoerceAndExpand: { 2271 // Reconstruct into a temporary. 2272 Address alloca = CreateMemTemp(Ty, getContext().getDeclAlign(Arg)); 2273 ArgVals.push_back(ParamValue::forIndirect(alloca)); 2274 2275 auto coercionType = ArgI.getCoerceAndExpandType(); 2276 alloca = Builder.CreateElementBitCast(alloca, coercionType); 2277 auto layout = CGM.getDataLayout().getStructLayout(coercionType); 2278 2279 unsigned argIndex = FirstIRArg; 2280 for (unsigned i = 0, e = coercionType->getNumElements(); i != e; ++i) { 2281 llvm::Type *eltType = coercionType->getElementType(i); 2282 if (ABIArgInfo::isPaddingForCoerceAndExpand(eltType)) 2283 continue; 2284 2285 auto eltAddr = Builder.CreateStructGEP(alloca, i, layout); 2286 auto elt = FnArgs[argIndex++]; 2287 Builder.CreateStore(elt, eltAddr); 2288 } 2289 assert(argIndex == FirstIRArg + NumIRArgs); 2290 break; 2291 } 2292 2293 case ABIArgInfo::Expand: { 2294 // If this structure was expanded into multiple arguments then 2295 // we need to create a temporary and reconstruct it from the 2296 // arguments. 2297 Address Alloca = CreateMemTemp(Ty, getContext().getDeclAlign(Arg)); 2298 LValue LV = MakeAddrLValue(Alloca, Ty); 2299 ArgVals.push_back(ParamValue::forIndirect(Alloca)); 2300 2301 auto FnArgIter = FnArgs.begin() + FirstIRArg; 2302 ExpandTypeFromArgs(Ty, LV, FnArgIter); 2303 assert(FnArgIter == FnArgs.begin() + FirstIRArg + NumIRArgs); 2304 for (unsigned i = 0, e = NumIRArgs; i != e; ++i) { 2305 auto AI = FnArgs[FirstIRArg + i]; 2306 AI->setName(Arg->getName() + "." + Twine(i)); 2307 } 2308 break; 2309 } 2310 2311 case ABIArgInfo::Ignore: 2312 assert(NumIRArgs == 0); 2313 // Initialize the local variable appropriately. 2314 if (!hasScalarEvaluationKind(Ty)) { 2315 ArgVals.push_back(ParamValue::forIndirect(CreateMemTemp(Ty))); 2316 } else { 2317 llvm::Value *U = llvm::UndefValue::get(ConvertType(Arg->getType())); 2318 ArgVals.push_back(ParamValue::forDirect(U)); 2319 } 2320 break; 2321 } 2322 } 2323 2324 if (getTarget().getCXXABI().areArgsDestroyedLeftToRightInCallee()) { 2325 for (int I = Args.size() - 1; I >= 0; --I) 2326 EmitParmDecl(*Args[I], ArgVals[I], I + 1); 2327 } else { 2328 for (unsigned I = 0, E = Args.size(); I != E; ++I) 2329 EmitParmDecl(*Args[I], ArgVals[I], I + 1); 2330 } 2331 } 2332 2333 static void eraseUnusedBitCasts(llvm::Instruction *insn) { 2334 while (insn->use_empty()) { 2335 llvm::BitCastInst *bitcast = dyn_cast<llvm::BitCastInst>(insn); 2336 if (!bitcast) return; 2337 2338 // This is "safe" because we would have used a ConstantExpr otherwise. 2339 insn = cast<llvm::Instruction>(bitcast->getOperand(0)); 2340 bitcast->eraseFromParent(); 2341 } 2342 } 2343 2344 /// Try to emit a fused autorelease of a return result. 2345 static llvm::Value *tryEmitFusedAutoreleaseOfResult(CodeGenFunction &CGF, 2346 llvm::Value *result) { 2347 // We must be immediately followed the cast. 2348 llvm::BasicBlock *BB = CGF.Builder.GetInsertBlock(); 2349 if (BB->empty()) return nullptr; 2350 if (&BB->back() != result) return nullptr; 2351 2352 llvm::Type *resultType = result->getType(); 2353 2354 // result is in a BasicBlock and is therefore an Instruction. 2355 llvm::Instruction *generator = cast<llvm::Instruction>(result); 2356 2357 SmallVector<llvm::Instruction*,4> insnsToKill; 2358 2359 // Look for: 2360 // %generator = bitcast %type1* %generator2 to %type2* 2361 while (llvm::BitCastInst *bitcast = dyn_cast<llvm::BitCastInst>(generator)) { 2362 // We would have emitted this as a constant if the operand weren't 2363 // an Instruction. 2364 generator = cast<llvm::Instruction>(bitcast->getOperand(0)); 2365 2366 // Require the generator to be immediately followed by the cast. 2367 if (generator->getNextNode() != bitcast) 2368 return nullptr; 2369 2370 insnsToKill.push_back(bitcast); 2371 } 2372 2373 // Look for: 2374 // %generator = call i8* @objc_retain(i8* %originalResult) 2375 // or 2376 // %generator = call i8* @objc_retainAutoreleasedReturnValue(i8* %originalResult) 2377 llvm::CallInst *call = dyn_cast<llvm::CallInst>(generator); 2378 if (!call) return nullptr; 2379 2380 bool doRetainAutorelease; 2381 2382 if (call->getCalledValue() == CGF.CGM.getObjCEntrypoints().objc_retain) { 2383 doRetainAutorelease = true; 2384 } else if (call->getCalledValue() == CGF.CGM.getObjCEntrypoints() 2385 .objc_retainAutoreleasedReturnValue) { 2386 doRetainAutorelease = false; 2387 2388 // If we emitted an assembly marker for this call (and the 2389 // ARCEntrypoints field should have been set if so), go looking 2390 // for that call. If we can't find it, we can't do this 2391 // optimization. But it should always be the immediately previous 2392 // instruction, unless we needed bitcasts around the call. 2393 if (CGF.CGM.getObjCEntrypoints().retainAutoreleasedReturnValueMarker) { 2394 llvm::Instruction *prev = call->getPrevNode(); 2395 assert(prev); 2396 if (isa<llvm::BitCastInst>(prev)) { 2397 prev = prev->getPrevNode(); 2398 assert(prev); 2399 } 2400 assert(isa<llvm::CallInst>(prev)); 2401 assert(cast<llvm::CallInst>(prev)->getCalledValue() == 2402 CGF.CGM.getObjCEntrypoints().retainAutoreleasedReturnValueMarker); 2403 insnsToKill.push_back(prev); 2404 } 2405 } else { 2406 return nullptr; 2407 } 2408 2409 result = call->getArgOperand(0); 2410 insnsToKill.push_back(call); 2411 2412 // Keep killing bitcasts, for sanity. Note that we no longer care 2413 // about precise ordering as long as there's exactly one use. 2414 while (llvm::BitCastInst *bitcast = dyn_cast<llvm::BitCastInst>(result)) { 2415 if (!bitcast->hasOneUse()) break; 2416 insnsToKill.push_back(bitcast); 2417 result = bitcast->getOperand(0); 2418 } 2419 2420 // Delete all the unnecessary instructions, from latest to earliest. 2421 for (SmallVectorImpl<llvm::Instruction*>::iterator 2422 i = insnsToKill.begin(), e = insnsToKill.end(); i != e; ++i) 2423 (*i)->eraseFromParent(); 2424 2425 // Do the fused retain/autorelease if we were asked to. 2426 if (doRetainAutorelease) 2427 result = CGF.EmitARCRetainAutoreleaseReturnValue(result); 2428 2429 // Cast back to the result type. 2430 return CGF.Builder.CreateBitCast(result, resultType); 2431 } 2432 2433 /// If this is a +1 of the value of an immutable 'self', remove it. 2434 static llvm::Value *tryRemoveRetainOfSelf(CodeGenFunction &CGF, 2435 llvm::Value *result) { 2436 // This is only applicable to a method with an immutable 'self'. 2437 const ObjCMethodDecl *method = 2438 dyn_cast_or_null<ObjCMethodDecl>(CGF.CurCodeDecl); 2439 if (!method) return nullptr; 2440 const VarDecl *self = method->getSelfDecl(); 2441 if (!self->getType().isConstQualified()) return nullptr; 2442 2443 // Look for a retain call. 2444 llvm::CallInst *retainCall = 2445 dyn_cast<llvm::CallInst>(result->stripPointerCasts()); 2446 if (!retainCall || 2447 retainCall->getCalledValue() != CGF.CGM.getObjCEntrypoints().objc_retain) 2448 return nullptr; 2449 2450 // Look for an ordinary load of 'self'. 2451 llvm::Value *retainedValue = retainCall->getArgOperand(0); 2452 llvm::LoadInst *load = 2453 dyn_cast<llvm::LoadInst>(retainedValue->stripPointerCasts()); 2454 if (!load || load->isAtomic() || load->isVolatile() || 2455 load->getPointerOperand() != CGF.GetAddrOfLocalVar(self).getPointer()) 2456 return nullptr; 2457 2458 // Okay! Burn it all down. This relies for correctness on the 2459 // assumption that the retain is emitted as part of the return and 2460 // that thereafter everything is used "linearly". 2461 llvm::Type *resultType = result->getType(); 2462 eraseUnusedBitCasts(cast<llvm::Instruction>(result)); 2463 assert(retainCall->use_empty()); 2464 retainCall->eraseFromParent(); 2465 eraseUnusedBitCasts(cast<llvm::Instruction>(retainedValue)); 2466 2467 return CGF.Builder.CreateBitCast(load, resultType); 2468 } 2469 2470 /// Emit an ARC autorelease of the result of a function. 2471 /// 2472 /// \return the value to actually return from the function 2473 static llvm::Value *emitAutoreleaseOfResult(CodeGenFunction &CGF, 2474 llvm::Value *result) { 2475 // If we're returning 'self', kill the initial retain. This is a 2476 // heuristic attempt to "encourage correctness" in the really unfortunate 2477 // case where we have a return of self during a dealloc and we desperately 2478 // need to avoid the possible autorelease. 2479 if (llvm::Value *self = tryRemoveRetainOfSelf(CGF, result)) 2480 return self; 2481 2482 // At -O0, try to emit a fused retain/autorelease. 2483 if (CGF.shouldUseFusedARCCalls()) 2484 if (llvm::Value *fused = tryEmitFusedAutoreleaseOfResult(CGF, result)) 2485 return fused; 2486 2487 return CGF.EmitARCAutoreleaseReturnValue(result); 2488 } 2489 2490 /// Heuristically search for a dominating store to the return-value slot. 2491 static llvm::StoreInst *findDominatingStoreToReturnValue(CodeGenFunction &CGF) { 2492 // Check if a User is a store which pointerOperand is the ReturnValue. 2493 // We are looking for stores to the ReturnValue, not for stores of the 2494 // ReturnValue to some other location. 2495 auto GetStoreIfValid = [&CGF](llvm::User *U) -> llvm::StoreInst * { 2496 auto *SI = dyn_cast<llvm::StoreInst>(U); 2497 if (!SI || SI->getPointerOperand() != CGF.ReturnValue.getPointer()) 2498 return nullptr; 2499 // These aren't actually possible for non-coerced returns, and we 2500 // only care about non-coerced returns on this code path. 2501 assert(!SI->isAtomic() && !SI->isVolatile()); 2502 return SI; 2503 }; 2504 // If there are multiple uses of the return-value slot, just check 2505 // for something immediately preceding the IP. Sometimes this can 2506 // happen with how we generate implicit-returns; it can also happen 2507 // with noreturn cleanups. 2508 if (!CGF.ReturnValue.getPointer()->hasOneUse()) { 2509 llvm::BasicBlock *IP = CGF.Builder.GetInsertBlock(); 2510 if (IP->empty()) return nullptr; 2511 llvm::Instruction *I = &IP->back(); 2512 2513 // Skip lifetime markers 2514 for (llvm::BasicBlock::reverse_iterator II = IP->rbegin(), 2515 IE = IP->rend(); 2516 II != IE; ++II) { 2517 if (llvm::IntrinsicInst *Intrinsic = 2518 dyn_cast<llvm::IntrinsicInst>(&*II)) { 2519 if (Intrinsic->getIntrinsicID() == llvm::Intrinsic::lifetime_end) { 2520 const llvm::Value *CastAddr = Intrinsic->getArgOperand(1); 2521 ++II; 2522 if (II == IE) 2523 break; 2524 if (isa<llvm::BitCastInst>(&*II) && (CastAddr == &*II)) 2525 continue; 2526 } 2527 } 2528 I = &*II; 2529 break; 2530 } 2531 2532 return GetStoreIfValid(I); 2533 } 2534 2535 llvm::StoreInst *store = 2536 GetStoreIfValid(CGF.ReturnValue.getPointer()->user_back()); 2537 if (!store) return nullptr; 2538 2539 // Now do a first-and-dirty dominance check: just walk up the 2540 // single-predecessors chain from the current insertion point. 2541 llvm::BasicBlock *StoreBB = store->getParent(); 2542 llvm::BasicBlock *IP = CGF.Builder.GetInsertBlock(); 2543 while (IP != StoreBB) { 2544 if (!(IP = IP->getSinglePredecessor())) 2545 return nullptr; 2546 } 2547 2548 // Okay, the store's basic block dominates the insertion point; we 2549 // can do our thing. 2550 return store; 2551 } 2552 2553 void CodeGenFunction::EmitFunctionEpilog(const CGFunctionInfo &FI, 2554 bool EmitRetDbgLoc, 2555 SourceLocation EndLoc) { 2556 if (CurCodeDecl && CurCodeDecl->hasAttr<NakedAttr>()) { 2557 // Naked functions don't have epilogues. 2558 Builder.CreateUnreachable(); 2559 return; 2560 } 2561 2562 // Functions with no result always return void. 2563 if (!ReturnValue.isValid()) { 2564 Builder.CreateRetVoid(); 2565 return; 2566 } 2567 2568 llvm::DebugLoc RetDbgLoc; 2569 llvm::Value *RV = nullptr; 2570 QualType RetTy = FI.getReturnType(); 2571 const ABIArgInfo &RetAI = FI.getReturnInfo(); 2572 2573 switch (RetAI.getKind()) { 2574 case ABIArgInfo::InAlloca: 2575 // Aggregrates get evaluated directly into the destination. Sometimes we 2576 // need to return the sret value in a register, though. 2577 assert(hasAggregateEvaluationKind(RetTy)); 2578 if (RetAI.getInAllocaSRet()) { 2579 llvm::Function::arg_iterator EI = CurFn->arg_end(); 2580 --EI; 2581 llvm::Value *ArgStruct = &*EI; 2582 llvm::Value *SRet = Builder.CreateStructGEP( 2583 nullptr, ArgStruct, RetAI.getInAllocaFieldIndex()); 2584 RV = Builder.CreateAlignedLoad(SRet, getPointerAlign(), "sret"); 2585 } 2586 break; 2587 2588 case ABIArgInfo::Indirect: { 2589 auto AI = CurFn->arg_begin(); 2590 if (RetAI.isSRetAfterThis()) 2591 ++AI; 2592 switch (getEvaluationKind(RetTy)) { 2593 case TEK_Complex: { 2594 ComplexPairTy RT = 2595 EmitLoadOfComplex(MakeAddrLValue(ReturnValue, RetTy), EndLoc); 2596 EmitStoreOfComplex(RT, MakeNaturalAlignAddrLValue(&*AI, RetTy), 2597 /*isInit*/ true); 2598 break; 2599 } 2600 case TEK_Aggregate: 2601 // Do nothing; aggregrates get evaluated directly into the destination. 2602 break; 2603 case TEK_Scalar: 2604 EmitStoreOfScalar(Builder.CreateLoad(ReturnValue), 2605 MakeNaturalAlignAddrLValue(&*AI, RetTy), 2606 /*isInit*/ true); 2607 break; 2608 } 2609 break; 2610 } 2611 2612 case ABIArgInfo::Extend: 2613 case ABIArgInfo::Direct: 2614 if (RetAI.getCoerceToType() == ConvertType(RetTy) && 2615 RetAI.getDirectOffset() == 0) { 2616 // The internal return value temp always will have pointer-to-return-type 2617 // type, just do a load. 2618 2619 // If there is a dominating store to ReturnValue, we can elide 2620 // the load, zap the store, and usually zap the alloca. 2621 if (llvm::StoreInst *SI = 2622 findDominatingStoreToReturnValue(*this)) { 2623 // Reuse the debug location from the store unless there is 2624 // cleanup code to be emitted between the store and return 2625 // instruction. 2626 if (EmitRetDbgLoc && !AutoreleaseResult) 2627 RetDbgLoc = SI->getDebugLoc(); 2628 // Get the stored value and nuke the now-dead store. 2629 RV = SI->getValueOperand(); 2630 SI->eraseFromParent(); 2631 2632 // If that was the only use of the return value, nuke it as well now. 2633 auto returnValueInst = ReturnValue.getPointer(); 2634 if (returnValueInst->use_empty()) { 2635 if (auto alloca = dyn_cast<llvm::AllocaInst>(returnValueInst)) { 2636 alloca->eraseFromParent(); 2637 ReturnValue = Address::invalid(); 2638 } 2639 } 2640 2641 // Otherwise, we have to do a simple load. 2642 } else { 2643 RV = Builder.CreateLoad(ReturnValue); 2644 } 2645 } else { 2646 // If the value is offset in memory, apply the offset now. 2647 Address V = emitAddressAtOffset(*this, ReturnValue, RetAI); 2648 2649 RV = CreateCoercedLoad(V, RetAI.getCoerceToType(), *this); 2650 } 2651 2652 // In ARC, end functions that return a retainable type with a call 2653 // to objc_autoreleaseReturnValue. 2654 if (AutoreleaseResult) { 2655 #ifndef NDEBUG 2656 // Type::isObjCRetainabletype has to be called on a QualType that hasn't 2657 // been stripped of the typedefs, so we cannot use RetTy here. Get the 2658 // original return type of FunctionDecl, CurCodeDecl, and BlockDecl from 2659 // CurCodeDecl or BlockInfo. 2660 QualType RT; 2661 2662 if (auto *FD = dyn_cast<FunctionDecl>(CurCodeDecl)) 2663 RT = FD->getReturnType(); 2664 else if (auto *MD = dyn_cast<ObjCMethodDecl>(CurCodeDecl)) 2665 RT = MD->getReturnType(); 2666 else if (isa<BlockDecl>(CurCodeDecl)) 2667 RT = BlockInfo->BlockExpression->getFunctionType()->getReturnType(); 2668 else 2669 llvm_unreachable("Unexpected function/method type"); 2670 2671 assert(getLangOpts().ObjCAutoRefCount && 2672 !FI.isReturnsRetained() && 2673 RT->isObjCRetainableType()); 2674 #endif 2675 RV = emitAutoreleaseOfResult(*this, RV); 2676 } 2677 2678 break; 2679 2680 case ABIArgInfo::Ignore: 2681 break; 2682 2683 case ABIArgInfo::CoerceAndExpand: { 2684 auto coercionType = RetAI.getCoerceAndExpandType(); 2685 auto layout = CGM.getDataLayout().getStructLayout(coercionType); 2686 2687 // Load all of the coerced elements out into results. 2688 llvm::SmallVector<llvm::Value*, 4> results; 2689 Address addr = Builder.CreateElementBitCast(ReturnValue, coercionType); 2690 for (unsigned i = 0, e = coercionType->getNumElements(); i != e; ++i) { 2691 auto coercedEltType = coercionType->getElementType(i); 2692 if (ABIArgInfo::isPaddingForCoerceAndExpand(coercedEltType)) 2693 continue; 2694 2695 auto eltAddr = Builder.CreateStructGEP(addr, i, layout); 2696 auto elt = Builder.CreateLoad(eltAddr); 2697 results.push_back(elt); 2698 } 2699 2700 // If we have one result, it's the single direct result type. 2701 if (results.size() == 1) { 2702 RV = results[0]; 2703 2704 // Otherwise, we need to make a first-class aggregate. 2705 } else { 2706 // Construct a return type that lacks padding elements. 2707 llvm::Type *returnType = RetAI.getUnpaddedCoerceAndExpandType(); 2708 2709 RV = llvm::UndefValue::get(returnType); 2710 for (unsigned i = 0, e = results.size(); i != e; ++i) { 2711 RV = Builder.CreateInsertValue(RV, results[i], i); 2712 } 2713 } 2714 break; 2715 } 2716 2717 case ABIArgInfo::Expand: 2718 llvm_unreachable("Invalid ABI kind for return argument"); 2719 } 2720 2721 llvm::Instruction *Ret; 2722 if (RV) { 2723 if (CurCodeDecl && SanOpts.has(SanitizerKind::ReturnsNonnullAttribute)) { 2724 if (auto RetNNAttr = CurCodeDecl->getAttr<ReturnsNonNullAttr>()) { 2725 SanitizerScope SanScope(this); 2726 llvm::Value *Cond = Builder.CreateICmpNE( 2727 RV, llvm::Constant::getNullValue(RV->getType())); 2728 llvm::Constant *StaticData[] = { 2729 EmitCheckSourceLocation(EndLoc), 2730 EmitCheckSourceLocation(RetNNAttr->getLocation()), 2731 }; 2732 EmitCheck(std::make_pair(Cond, SanitizerKind::ReturnsNonnullAttribute), 2733 "nonnull_return", StaticData, None); 2734 } 2735 } 2736 Ret = Builder.CreateRet(RV); 2737 } else { 2738 Ret = Builder.CreateRetVoid(); 2739 } 2740 2741 if (RetDbgLoc) 2742 Ret->setDebugLoc(std::move(RetDbgLoc)); 2743 } 2744 2745 static bool isInAllocaArgument(CGCXXABI &ABI, QualType type) { 2746 const CXXRecordDecl *RD = type->getAsCXXRecordDecl(); 2747 return RD && ABI.getRecordArgABI(RD) == CGCXXABI::RAA_DirectInMemory; 2748 } 2749 2750 static AggValueSlot createPlaceholderSlot(CodeGenFunction &CGF, 2751 QualType Ty) { 2752 // FIXME: Generate IR in one pass, rather than going back and fixing up these 2753 // placeholders. 2754 llvm::Type *IRTy = CGF.ConvertTypeForMem(Ty); 2755 llvm::Value *Placeholder = 2756 llvm::UndefValue::get(IRTy->getPointerTo()->getPointerTo()); 2757 Placeholder = CGF.Builder.CreateDefaultAlignedLoad(Placeholder); 2758 2759 // FIXME: When we generate this IR in one pass, we shouldn't need 2760 // this win32-specific alignment hack. 2761 CharUnits Align = CharUnits::fromQuantity(4); 2762 2763 return AggValueSlot::forAddr(Address(Placeholder, Align), 2764 Ty.getQualifiers(), 2765 AggValueSlot::IsNotDestructed, 2766 AggValueSlot::DoesNotNeedGCBarriers, 2767 AggValueSlot::IsNotAliased); 2768 } 2769 2770 void CodeGenFunction::EmitDelegateCallArg(CallArgList &args, 2771 const VarDecl *param, 2772 SourceLocation loc) { 2773 // StartFunction converted the ABI-lowered parameter(s) into a 2774 // local alloca. We need to turn that into an r-value suitable 2775 // for EmitCall. 2776 Address local = GetAddrOfLocalVar(param); 2777 2778 QualType type = param->getType(); 2779 2780 // For the most part, we just need to load the alloca, except: 2781 // 1) aggregate r-values are actually pointers to temporaries, and 2782 // 2) references to non-scalars are pointers directly to the aggregate. 2783 // I don't know why references to scalars are different here. 2784 if (const ReferenceType *ref = type->getAs<ReferenceType>()) { 2785 if (!hasScalarEvaluationKind(ref->getPointeeType())) 2786 return args.add(RValue::getAggregate(local), type); 2787 2788 // Locals which are references to scalars are represented 2789 // with allocas holding the pointer. 2790 return args.add(RValue::get(Builder.CreateLoad(local)), type); 2791 } 2792 2793 assert(!isInAllocaArgument(CGM.getCXXABI(), type) && 2794 "cannot emit delegate call arguments for inalloca arguments!"); 2795 2796 args.add(convertTempToRValue(local, type, loc), type); 2797 } 2798 2799 static bool isProvablyNull(llvm::Value *addr) { 2800 return isa<llvm::ConstantPointerNull>(addr); 2801 } 2802 2803 static bool isProvablyNonNull(llvm::Value *addr) { 2804 return isa<llvm::AllocaInst>(addr); 2805 } 2806 2807 /// Emit the actual writing-back of a writeback. 2808 static void emitWriteback(CodeGenFunction &CGF, 2809 const CallArgList::Writeback &writeback) { 2810 const LValue &srcLV = writeback.Source; 2811 Address srcAddr = srcLV.getAddress(); 2812 assert(!isProvablyNull(srcAddr.getPointer()) && 2813 "shouldn't have writeback for provably null argument"); 2814 2815 llvm::BasicBlock *contBB = nullptr; 2816 2817 // If the argument wasn't provably non-null, we need to null check 2818 // before doing the store. 2819 bool provablyNonNull = isProvablyNonNull(srcAddr.getPointer()); 2820 if (!provablyNonNull) { 2821 llvm::BasicBlock *writebackBB = CGF.createBasicBlock("icr.writeback"); 2822 contBB = CGF.createBasicBlock("icr.done"); 2823 2824 llvm::Value *isNull = 2825 CGF.Builder.CreateIsNull(srcAddr.getPointer(), "icr.isnull"); 2826 CGF.Builder.CreateCondBr(isNull, contBB, writebackBB); 2827 CGF.EmitBlock(writebackBB); 2828 } 2829 2830 // Load the value to writeback. 2831 llvm::Value *value = CGF.Builder.CreateLoad(writeback.Temporary); 2832 2833 // Cast it back, in case we're writing an id to a Foo* or something. 2834 value = CGF.Builder.CreateBitCast(value, srcAddr.getElementType(), 2835 "icr.writeback-cast"); 2836 2837 // Perform the writeback. 2838 2839 // If we have a "to use" value, it's something we need to emit a use 2840 // of. This has to be carefully threaded in: if it's done after the 2841 // release it's potentially undefined behavior (and the optimizer 2842 // will ignore it), and if it happens before the retain then the 2843 // optimizer could move the release there. 2844 if (writeback.ToUse) { 2845 assert(srcLV.getObjCLifetime() == Qualifiers::OCL_Strong); 2846 2847 // Retain the new value. No need to block-copy here: the block's 2848 // being passed up the stack. 2849 value = CGF.EmitARCRetainNonBlock(value); 2850 2851 // Emit the intrinsic use here. 2852 CGF.EmitARCIntrinsicUse(writeback.ToUse); 2853 2854 // Load the old value (primitively). 2855 llvm::Value *oldValue = CGF.EmitLoadOfScalar(srcLV, SourceLocation()); 2856 2857 // Put the new value in place (primitively). 2858 CGF.EmitStoreOfScalar(value, srcLV, /*init*/ false); 2859 2860 // Release the old value. 2861 CGF.EmitARCRelease(oldValue, srcLV.isARCPreciseLifetime()); 2862 2863 // Otherwise, we can just do a normal lvalue store. 2864 } else { 2865 CGF.EmitStoreThroughLValue(RValue::get(value), srcLV); 2866 } 2867 2868 // Jump to the continuation block. 2869 if (!provablyNonNull) 2870 CGF.EmitBlock(contBB); 2871 } 2872 2873 static void emitWritebacks(CodeGenFunction &CGF, 2874 const CallArgList &args) { 2875 for (const auto &I : args.writebacks()) 2876 emitWriteback(CGF, I); 2877 } 2878 2879 static void deactivateArgCleanupsBeforeCall(CodeGenFunction &CGF, 2880 const CallArgList &CallArgs) { 2881 assert(CGF.getTarget().getCXXABI().areArgsDestroyedLeftToRightInCallee()); 2882 ArrayRef<CallArgList::CallArgCleanup> Cleanups = 2883 CallArgs.getCleanupsToDeactivate(); 2884 // Iterate in reverse to increase the likelihood of popping the cleanup. 2885 for (const auto &I : llvm::reverse(Cleanups)) { 2886 CGF.DeactivateCleanupBlock(I.Cleanup, I.IsActiveIP); 2887 I.IsActiveIP->eraseFromParent(); 2888 } 2889 } 2890 2891 static const Expr *maybeGetUnaryAddrOfOperand(const Expr *E) { 2892 if (const UnaryOperator *uop = dyn_cast<UnaryOperator>(E->IgnoreParens())) 2893 if (uop->getOpcode() == UO_AddrOf) 2894 return uop->getSubExpr(); 2895 return nullptr; 2896 } 2897 2898 /// Emit an argument that's being passed call-by-writeback. That is, 2899 /// we are passing the address of an __autoreleased temporary; it 2900 /// might be copy-initialized with the current value of the given 2901 /// address, but it will definitely be copied out of after the call. 2902 static void emitWritebackArg(CodeGenFunction &CGF, CallArgList &args, 2903 const ObjCIndirectCopyRestoreExpr *CRE) { 2904 LValue srcLV; 2905 2906 // Make an optimistic effort to emit the address as an l-value. 2907 // This can fail if the argument expression is more complicated. 2908 if (const Expr *lvExpr = maybeGetUnaryAddrOfOperand(CRE->getSubExpr())) { 2909 srcLV = CGF.EmitLValue(lvExpr); 2910 2911 // Otherwise, just emit it as a scalar. 2912 } else { 2913 Address srcAddr = CGF.EmitPointerWithAlignment(CRE->getSubExpr()); 2914 2915 QualType srcAddrType = 2916 CRE->getSubExpr()->getType()->castAs<PointerType>()->getPointeeType(); 2917 srcLV = CGF.MakeAddrLValue(srcAddr, srcAddrType); 2918 } 2919 Address srcAddr = srcLV.getAddress(); 2920 2921 // The dest and src types don't necessarily match in LLVM terms 2922 // because of the crazy ObjC compatibility rules. 2923 2924 llvm::PointerType *destType = 2925 cast<llvm::PointerType>(CGF.ConvertType(CRE->getType())); 2926 2927 // If the address is a constant null, just pass the appropriate null. 2928 if (isProvablyNull(srcAddr.getPointer())) { 2929 args.add(RValue::get(llvm::ConstantPointerNull::get(destType)), 2930 CRE->getType()); 2931 return; 2932 } 2933 2934 // Create the temporary. 2935 Address temp = CGF.CreateTempAlloca(destType->getElementType(), 2936 CGF.getPointerAlign(), 2937 "icr.temp"); 2938 // Loading an l-value can introduce a cleanup if the l-value is __weak, 2939 // and that cleanup will be conditional if we can't prove that the l-value 2940 // isn't null, so we need to register a dominating point so that the cleanups 2941 // system will make valid IR. 2942 CodeGenFunction::ConditionalEvaluation condEval(CGF); 2943 2944 // Zero-initialize it if we're not doing a copy-initialization. 2945 bool shouldCopy = CRE->shouldCopy(); 2946 if (!shouldCopy) { 2947 llvm::Value *null = 2948 llvm::ConstantPointerNull::get( 2949 cast<llvm::PointerType>(destType->getElementType())); 2950 CGF.Builder.CreateStore(null, temp); 2951 } 2952 2953 llvm::BasicBlock *contBB = nullptr; 2954 llvm::BasicBlock *originBB = nullptr; 2955 2956 // If the address is *not* known to be non-null, we need to switch. 2957 llvm::Value *finalArgument; 2958 2959 bool provablyNonNull = isProvablyNonNull(srcAddr.getPointer()); 2960 if (provablyNonNull) { 2961 finalArgument = temp.getPointer(); 2962 } else { 2963 llvm::Value *isNull = 2964 CGF.Builder.CreateIsNull(srcAddr.getPointer(), "icr.isnull"); 2965 2966 finalArgument = CGF.Builder.CreateSelect(isNull, 2967 llvm::ConstantPointerNull::get(destType), 2968 temp.getPointer(), "icr.argument"); 2969 2970 // If we need to copy, then the load has to be conditional, which 2971 // means we need control flow. 2972 if (shouldCopy) { 2973 originBB = CGF.Builder.GetInsertBlock(); 2974 contBB = CGF.createBasicBlock("icr.cont"); 2975 llvm::BasicBlock *copyBB = CGF.createBasicBlock("icr.copy"); 2976 CGF.Builder.CreateCondBr(isNull, contBB, copyBB); 2977 CGF.EmitBlock(copyBB); 2978 condEval.begin(CGF); 2979 } 2980 } 2981 2982 llvm::Value *valueToUse = nullptr; 2983 2984 // Perform a copy if necessary. 2985 if (shouldCopy) { 2986 RValue srcRV = CGF.EmitLoadOfLValue(srcLV, SourceLocation()); 2987 assert(srcRV.isScalar()); 2988 2989 llvm::Value *src = srcRV.getScalarVal(); 2990 src = CGF.Builder.CreateBitCast(src, destType->getElementType(), 2991 "icr.cast"); 2992 2993 // Use an ordinary store, not a store-to-lvalue. 2994 CGF.Builder.CreateStore(src, temp); 2995 2996 // If optimization is enabled, and the value was held in a 2997 // __strong variable, we need to tell the optimizer that this 2998 // value has to stay alive until we're doing the store back. 2999 // This is because the temporary is effectively unretained, 3000 // and so otherwise we can violate the high-level semantics. 3001 if (CGF.CGM.getCodeGenOpts().OptimizationLevel != 0 && 3002 srcLV.getObjCLifetime() == Qualifiers::OCL_Strong) { 3003 valueToUse = src; 3004 } 3005 } 3006 3007 // Finish the control flow if we needed it. 3008 if (shouldCopy && !provablyNonNull) { 3009 llvm::BasicBlock *copyBB = CGF.Builder.GetInsertBlock(); 3010 CGF.EmitBlock(contBB); 3011 3012 // Make a phi for the value to intrinsically use. 3013 if (valueToUse) { 3014 llvm::PHINode *phiToUse = CGF.Builder.CreatePHI(valueToUse->getType(), 2, 3015 "icr.to-use"); 3016 phiToUse->addIncoming(valueToUse, copyBB); 3017 phiToUse->addIncoming(llvm::UndefValue::get(valueToUse->getType()), 3018 originBB); 3019 valueToUse = phiToUse; 3020 } 3021 3022 condEval.end(CGF); 3023 } 3024 3025 args.addWriteback(srcLV, temp, valueToUse); 3026 args.add(RValue::get(finalArgument), CRE->getType()); 3027 } 3028 3029 void CallArgList::allocateArgumentMemory(CodeGenFunction &CGF) { 3030 assert(!StackBase && !StackCleanup.isValid()); 3031 3032 // Save the stack. 3033 llvm::Function *F = CGF.CGM.getIntrinsic(llvm::Intrinsic::stacksave); 3034 StackBase = CGF.Builder.CreateCall(F, {}, "inalloca.save"); 3035 } 3036 3037 void CallArgList::freeArgumentMemory(CodeGenFunction &CGF) const { 3038 if (StackBase) { 3039 // Restore the stack after the call. 3040 llvm::Value *F = CGF.CGM.getIntrinsic(llvm::Intrinsic::stackrestore); 3041 CGF.Builder.CreateCall(F, StackBase); 3042 } 3043 } 3044 3045 void CodeGenFunction::EmitNonNullArgCheck(RValue RV, QualType ArgType, 3046 SourceLocation ArgLoc, 3047 const FunctionDecl *FD, 3048 unsigned ParmNum) { 3049 if (!SanOpts.has(SanitizerKind::NonnullAttribute) || !FD) 3050 return; 3051 auto PVD = ParmNum < FD->getNumParams() ? FD->getParamDecl(ParmNum) : nullptr; 3052 unsigned ArgNo = PVD ? PVD->getFunctionScopeIndex() : ParmNum; 3053 auto NNAttr = getNonNullAttr(FD, PVD, ArgType, ArgNo); 3054 if (!NNAttr) 3055 return; 3056 SanitizerScope SanScope(this); 3057 assert(RV.isScalar()); 3058 llvm::Value *V = RV.getScalarVal(); 3059 llvm::Value *Cond = 3060 Builder.CreateICmpNE(V, llvm::Constant::getNullValue(V->getType())); 3061 llvm::Constant *StaticData[] = { 3062 EmitCheckSourceLocation(ArgLoc), 3063 EmitCheckSourceLocation(NNAttr->getLocation()), 3064 llvm::ConstantInt::get(Int32Ty, ArgNo + 1), 3065 }; 3066 EmitCheck(std::make_pair(Cond, SanitizerKind::NonnullAttribute), 3067 "nonnull_arg", StaticData, None); 3068 } 3069 3070 void CodeGenFunction::EmitCallArgs( 3071 CallArgList &Args, ArrayRef<QualType> ArgTypes, 3072 llvm::iterator_range<CallExpr::const_arg_iterator> ArgRange, 3073 const FunctionDecl *CalleeDecl, unsigned ParamsToSkip) { 3074 assert((int)ArgTypes.size() == (ArgRange.end() - ArgRange.begin())); 3075 3076 auto MaybeEmitImplicitObjectSize = [&](unsigned I, const Expr *Arg) { 3077 if (CalleeDecl == nullptr || I >= CalleeDecl->getNumParams()) 3078 return; 3079 auto *PS = CalleeDecl->getParamDecl(I)->getAttr<PassObjectSizeAttr>(); 3080 if (PS == nullptr) 3081 return; 3082 3083 const auto &Context = getContext(); 3084 auto SizeTy = Context.getSizeType(); 3085 auto T = Builder.getIntNTy(Context.getTypeSize(SizeTy)); 3086 llvm::Value *V = evaluateOrEmitBuiltinObjectSize(Arg, PS->getType(), T); 3087 Args.add(RValue::get(V), SizeTy); 3088 }; 3089 3090 // We *have* to evaluate arguments from right to left in the MS C++ ABI, 3091 // because arguments are destroyed left to right in the callee. 3092 if (CGM.getTarget().getCXXABI().areArgsDestroyedLeftToRightInCallee()) { 3093 // Insert a stack save if we're going to need any inalloca args. 3094 bool HasInAllocaArgs = false; 3095 for (ArrayRef<QualType>::iterator I = ArgTypes.begin(), E = ArgTypes.end(); 3096 I != E && !HasInAllocaArgs; ++I) 3097 HasInAllocaArgs = isInAllocaArgument(CGM.getCXXABI(), *I); 3098 if (HasInAllocaArgs) { 3099 assert(getTarget().getTriple().getArch() == llvm::Triple::x86); 3100 Args.allocateArgumentMemory(*this); 3101 } 3102 3103 // Evaluate each argument. 3104 size_t CallArgsStart = Args.size(); 3105 for (int I = ArgTypes.size() - 1; I >= 0; --I) { 3106 CallExpr::const_arg_iterator Arg = ArgRange.begin() + I; 3107 EmitCallArg(Args, *Arg, ArgTypes[I]); 3108 EmitNonNullArgCheck(Args.back().RV, ArgTypes[I], (*Arg)->getExprLoc(), 3109 CalleeDecl, ParamsToSkip + I); 3110 MaybeEmitImplicitObjectSize(I, *Arg); 3111 } 3112 3113 // Un-reverse the arguments we just evaluated so they match up with the LLVM 3114 // IR function. 3115 std::reverse(Args.begin() + CallArgsStart, Args.end()); 3116 return; 3117 } 3118 3119 for (unsigned I = 0, E = ArgTypes.size(); I != E; ++I) { 3120 CallExpr::const_arg_iterator Arg = ArgRange.begin() + I; 3121 assert(Arg != ArgRange.end()); 3122 EmitCallArg(Args, *Arg, ArgTypes[I]); 3123 EmitNonNullArgCheck(Args.back().RV, ArgTypes[I], (*Arg)->getExprLoc(), 3124 CalleeDecl, ParamsToSkip + I); 3125 MaybeEmitImplicitObjectSize(I, *Arg); 3126 } 3127 } 3128 3129 namespace { 3130 3131 struct DestroyUnpassedArg final : EHScopeStack::Cleanup { 3132 DestroyUnpassedArg(Address Addr, QualType Ty) 3133 : Addr(Addr), Ty(Ty) {} 3134 3135 Address Addr; 3136 QualType Ty; 3137 3138 void Emit(CodeGenFunction &CGF, Flags flags) override { 3139 const CXXDestructorDecl *Dtor = Ty->getAsCXXRecordDecl()->getDestructor(); 3140 assert(!Dtor->isTrivial()); 3141 CGF.EmitCXXDestructorCall(Dtor, Dtor_Complete, /*for vbase*/ false, 3142 /*Delegating=*/false, Addr); 3143 } 3144 }; 3145 3146 struct DisableDebugLocationUpdates { 3147 CodeGenFunction &CGF; 3148 bool disabledDebugInfo; 3149 DisableDebugLocationUpdates(CodeGenFunction &CGF, const Expr *E) : CGF(CGF) { 3150 if ((disabledDebugInfo = isa<CXXDefaultArgExpr>(E) && CGF.getDebugInfo())) 3151 CGF.disableDebugInfo(); 3152 } 3153 ~DisableDebugLocationUpdates() { 3154 if (disabledDebugInfo) 3155 CGF.enableDebugInfo(); 3156 } 3157 }; 3158 3159 } // end anonymous namespace 3160 3161 void CodeGenFunction::EmitCallArg(CallArgList &args, const Expr *E, 3162 QualType type) { 3163 DisableDebugLocationUpdates Dis(*this, E); 3164 if (const ObjCIndirectCopyRestoreExpr *CRE 3165 = dyn_cast<ObjCIndirectCopyRestoreExpr>(E)) { 3166 assert(getLangOpts().ObjCAutoRefCount); 3167 assert(getContext().hasSameType(E->getType(), type)); 3168 return emitWritebackArg(*this, args, CRE); 3169 } 3170 3171 assert(type->isReferenceType() == E->isGLValue() && 3172 "reference binding to unmaterialized r-value!"); 3173 3174 if (E->isGLValue()) { 3175 assert(E->getObjectKind() == OK_Ordinary); 3176 return args.add(EmitReferenceBindingToExpr(E), type); 3177 } 3178 3179 bool HasAggregateEvalKind = hasAggregateEvaluationKind(type); 3180 3181 // In the Microsoft C++ ABI, aggregate arguments are destructed by the callee. 3182 // However, we still have to push an EH-only cleanup in case we unwind before 3183 // we make it to the call. 3184 if (HasAggregateEvalKind && 3185 CGM.getTarget().getCXXABI().areArgsDestroyedLeftToRightInCallee()) { 3186 // If we're using inalloca, use the argument memory. Otherwise, use a 3187 // temporary. 3188 AggValueSlot Slot; 3189 if (args.isUsingInAlloca()) 3190 Slot = createPlaceholderSlot(*this, type); 3191 else 3192 Slot = CreateAggTemp(type, "agg.tmp"); 3193 3194 const CXXRecordDecl *RD = type->getAsCXXRecordDecl(); 3195 bool DestroyedInCallee = 3196 RD && RD->hasNonTrivialDestructor() && 3197 CGM.getCXXABI().getRecordArgABI(RD) != CGCXXABI::RAA_Default; 3198 if (DestroyedInCallee) 3199 Slot.setExternallyDestructed(); 3200 3201 EmitAggExpr(E, Slot); 3202 RValue RV = Slot.asRValue(); 3203 args.add(RV, type); 3204 3205 if (DestroyedInCallee) { 3206 // Create a no-op GEP between the placeholder and the cleanup so we can 3207 // RAUW it successfully. It also serves as a marker of the first 3208 // instruction where the cleanup is active. 3209 pushFullExprCleanup<DestroyUnpassedArg>(EHCleanup, Slot.getAddress(), 3210 type); 3211 // This unreachable is a temporary marker which will be removed later. 3212 llvm::Instruction *IsActive = Builder.CreateUnreachable(); 3213 args.addArgCleanupDeactivation(EHStack.getInnermostEHScope(), IsActive); 3214 } 3215 return; 3216 } 3217 3218 if (HasAggregateEvalKind && isa<ImplicitCastExpr>(E) && 3219 cast<CastExpr>(E)->getCastKind() == CK_LValueToRValue) { 3220 LValue L = EmitLValue(cast<CastExpr>(E)->getSubExpr()); 3221 assert(L.isSimple()); 3222 if (L.getAlignment() >= getContext().getTypeAlignInChars(type)) { 3223 args.add(L.asAggregateRValue(), type, /*NeedsCopy*/true); 3224 } else { 3225 // We can't represent a misaligned lvalue in the CallArgList, so copy 3226 // to an aligned temporary now. 3227 Address tmp = CreateMemTemp(type); 3228 EmitAggregateCopy(tmp, L.getAddress(), type, L.isVolatile()); 3229 args.add(RValue::getAggregate(tmp), type); 3230 } 3231 return; 3232 } 3233 3234 args.add(EmitAnyExprToTemp(E), type); 3235 } 3236 3237 QualType CodeGenFunction::getVarArgType(const Expr *Arg) { 3238 // System headers on Windows define NULL to 0 instead of 0LL on Win64. MSVC 3239 // implicitly widens null pointer constants that are arguments to varargs 3240 // functions to pointer-sized ints. 3241 if (!getTarget().getTriple().isOSWindows()) 3242 return Arg->getType(); 3243 3244 if (Arg->getType()->isIntegerType() && 3245 getContext().getTypeSize(Arg->getType()) < 3246 getContext().getTargetInfo().getPointerWidth(0) && 3247 Arg->isNullPointerConstant(getContext(), 3248 Expr::NPC_ValueDependentIsNotNull)) { 3249 return getContext().getIntPtrType(); 3250 } 3251 3252 return Arg->getType(); 3253 } 3254 3255 // In ObjC ARC mode with no ObjC ARC exception safety, tell the ARC 3256 // optimizer it can aggressively ignore unwind edges. 3257 void 3258 CodeGenFunction::AddObjCARCExceptionMetadata(llvm::Instruction *Inst) { 3259 if (CGM.getCodeGenOpts().OptimizationLevel != 0 && 3260 !CGM.getCodeGenOpts().ObjCAutoRefCountExceptions) 3261 Inst->setMetadata("clang.arc.no_objc_arc_exceptions", 3262 CGM.getNoObjCARCExceptionsMetadata()); 3263 } 3264 3265 /// Emits a call to the given no-arguments nounwind runtime function. 3266 llvm::CallInst * 3267 CodeGenFunction::EmitNounwindRuntimeCall(llvm::Value *callee, 3268 const llvm::Twine &name) { 3269 return EmitNounwindRuntimeCall(callee, None, name); 3270 } 3271 3272 /// Emits a call to the given nounwind runtime function. 3273 llvm::CallInst * 3274 CodeGenFunction::EmitNounwindRuntimeCall(llvm::Value *callee, 3275 ArrayRef<llvm::Value*> args, 3276 const llvm::Twine &name) { 3277 llvm::CallInst *call = EmitRuntimeCall(callee, args, name); 3278 call->setDoesNotThrow(); 3279 return call; 3280 } 3281 3282 /// Emits a simple call (never an invoke) to the given no-arguments 3283 /// runtime function. 3284 llvm::CallInst * 3285 CodeGenFunction::EmitRuntimeCall(llvm::Value *callee, 3286 const llvm::Twine &name) { 3287 return EmitRuntimeCall(callee, None, name); 3288 } 3289 3290 // Calls which may throw must have operand bundles indicating which funclet 3291 // they are nested within. 3292 static void 3293 getBundlesForFunclet(llvm::Value *Callee, llvm::Instruction *CurrentFuncletPad, 3294 SmallVectorImpl<llvm::OperandBundleDef> &BundleList) { 3295 // There is no need for a funclet operand bundle if we aren't inside a 3296 // funclet. 3297 if (!CurrentFuncletPad) 3298 return; 3299 3300 // Skip intrinsics which cannot throw. 3301 auto *CalleeFn = dyn_cast<llvm::Function>(Callee->stripPointerCasts()); 3302 if (CalleeFn && CalleeFn->isIntrinsic() && CalleeFn->doesNotThrow()) 3303 return; 3304 3305 BundleList.emplace_back("funclet", CurrentFuncletPad); 3306 } 3307 3308 /// Emits a simple call (never an invoke) to the given runtime function. 3309 llvm::CallInst * 3310 CodeGenFunction::EmitRuntimeCall(llvm::Value *callee, 3311 ArrayRef<llvm::Value*> args, 3312 const llvm::Twine &name) { 3313 SmallVector<llvm::OperandBundleDef, 1> BundleList; 3314 getBundlesForFunclet(callee, CurrentFuncletPad, BundleList); 3315 3316 llvm::CallInst *call = Builder.CreateCall(callee, args, BundleList, name); 3317 call->setCallingConv(getRuntimeCC()); 3318 return call; 3319 } 3320 3321 /// Emits a call or invoke to the given noreturn runtime function. 3322 void CodeGenFunction::EmitNoreturnRuntimeCallOrInvoke(llvm::Value *callee, 3323 ArrayRef<llvm::Value*> args) { 3324 SmallVector<llvm::OperandBundleDef, 1> BundleList; 3325 getBundlesForFunclet(callee, CurrentFuncletPad, BundleList); 3326 3327 if (getInvokeDest()) { 3328 llvm::InvokeInst *invoke = 3329 Builder.CreateInvoke(callee, 3330 getUnreachableBlock(), 3331 getInvokeDest(), 3332 args, 3333 BundleList); 3334 invoke->setDoesNotReturn(); 3335 invoke->setCallingConv(getRuntimeCC()); 3336 } else { 3337 llvm::CallInst *call = Builder.CreateCall(callee, args, BundleList); 3338 call->setDoesNotReturn(); 3339 call->setCallingConv(getRuntimeCC()); 3340 Builder.CreateUnreachable(); 3341 } 3342 } 3343 3344 /// Emits a call or invoke instruction to the given nullary runtime function. 3345 llvm::CallSite 3346 CodeGenFunction::EmitRuntimeCallOrInvoke(llvm::Value *callee, 3347 const Twine &name) { 3348 return EmitRuntimeCallOrInvoke(callee, None, name); 3349 } 3350 3351 /// Emits a call or invoke instruction to the given runtime function. 3352 llvm::CallSite 3353 CodeGenFunction::EmitRuntimeCallOrInvoke(llvm::Value *callee, 3354 ArrayRef<llvm::Value*> args, 3355 const Twine &name) { 3356 llvm::CallSite callSite = EmitCallOrInvoke(callee, args, name); 3357 callSite.setCallingConv(getRuntimeCC()); 3358 return callSite; 3359 } 3360 3361 /// Emits a call or invoke instruction to the given function, depending 3362 /// on the current state of the EH stack. 3363 llvm::CallSite 3364 CodeGenFunction::EmitCallOrInvoke(llvm::Value *Callee, 3365 ArrayRef<llvm::Value *> Args, 3366 const Twine &Name) { 3367 llvm::BasicBlock *InvokeDest = getInvokeDest(); 3368 SmallVector<llvm::OperandBundleDef, 1> BundleList; 3369 getBundlesForFunclet(Callee, CurrentFuncletPad, BundleList); 3370 3371 llvm::Instruction *Inst; 3372 if (!InvokeDest) 3373 Inst = Builder.CreateCall(Callee, Args, BundleList, Name); 3374 else { 3375 llvm::BasicBlock *ContBB = createBasicBlock("invoke.cont"); 3376 Inst = Builder.CreateInvoke(Callee, ContBB, InvokeDest, Args, BundleList, 3377 Name); 3378 EmitBlock(ContBB); 3379 } 3380 3381 // In ObjC ARC mode with no ObjC ARC exception safety, tell the ARC 3382 // optimizer it can aggressively ignore unwind edges. 3383 if (CGM.getLangOpts().ObjCAutoRefCount) 3384 AddObjCARCExceptionMetadata(Inst); 3385 3386 return llvm::CallSite(Inst); 3387 } 3388 3389 /// \brief Store a non-aggregate value to an address to initialize it. For 3390 /// initialization, a non-atomic store will be used. 3391 static void EmitInitStoreOfNonAggregate(CodeGenFunction &CGF, RValue Src, 3392 LValue Dst) { 3393 if (Src.isScalar()) 3394 CGF.EmitStoreOfScalar(Src.getScalarVal(), Dst, /*init=*/true); 3395 else 3396 CGF.EmitStoreOfComplex(Src.getComplexVal(), Dst, /*init=*/true); 3397 } 3398 3399 void CodeGenFunction::deferPlaceholderReplacement(llvm::Instruction *Old, 3400 llvm::Value *New) { 3401 DeferredReplacements.push_back(std::make_pair(Old, New)); 3402 } 3403 3404 RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo, 3405 llvm::Value *Callee, 3406 ReturnValueSlot ReturnValue, 3407 const CallArgList &CallArgs, 3408 CGCalleeInfo CalleeInfo, 3409 llvm::Instruction **callOrInvoke) { 3410 // FIXME: We no longer need the types from CallArgs; lift up and simplify. 3411 3412 // Handle struct-return functions by passing a pointer to the 3413 // location that we would like to return into. 3414 QualType RetTy = CallInfo.getReturnType(); 3415 const ABIArgInfo &RetAI = CallInfo.getReturnInfo(); 3416 3417 llvm::FunctionType *IRFuncTy = 3418 cast<llvm::FunctionType>( 3419 cast<llvm::PointerType>(Callee->getType())->getElementType()); 3420 3421 // If we're using inalloca, insert the allocation after the stack save. 3422 // FIXME: Do this earlier rather than hacking it in here! 3423 Address ArgMemory = Address::invalid(); 3424 const llvm::StructLayout *ArgMemoryLayout = nullptr; 3425 if (llvm::StructType *ArgStruct = CallInfo.getArgStruct()) { 3426 ArgMemoryLayout = CGM.getDataLayout().getStructLayout(ArgStruct); 3427 llvm::Instruction *IP = CallArgs.getStackBase(); 3428 llvm::AllocaInst *AI; 3429 if (IP) { 3430 IP = IP->getNextNode(); 3431 AI = new llvm::AllocaInst(ArgStruct, "argmem", IP); 3432 } else { 3433 AI = CreateTempAlloca(ArgStruct, "argmem"); 3434 } 3435 auto Align = CallInfo.getArgStructAlignment(); 3436 AI->setAlignment(Align.getQuantity()); 3437 AI->setUsedWithInAlloca(true); 3438 assert(AI->isUsedWithInAlloca() && !AI->isStaticAlloca()); 3439 ArgMemory = Address(AI, Align); 3440 } 3441 3442 // Helper function to drill into the inalloca allocation. 3443 auto createInAllocaStructGEP = [&](unsigned FieldIndex) -> Address { 3444 auto FieldOffset = 3445 CharUnits::fromQuantity(ArgMemoryLayout->getElementOffset(FieldIndex)); 3446 return Builder.CreateStructGEP(ArgMemory, FieldIndex, FieldOffset); 3447 }; 3448 3449 ClangToLLVMArgMapping IRFunctionArgs(CGM.getContext(), CallInfo); 3450 SmallVector<llvm::Value *, 16> IRCallArgs(IRFunctionArgs.totalIRArgs()); 3451 3452 // If the call returns a temporary with struct return, create a temporary 3453 // alloca to hold the result, unless one is given to us. 3454 Address SRetPtr = Address::invalid(); 3455 size_t UnusedReturnSize = 0; 3456 if (RetAI.isIndirect() || RetAI.isInAlloca() || RetAI.isCoerceAndExpand()) { 3457 if (!ReturnValue.isNull()) { 3458 SRetPtr = ReturnValue.getValue(); 3459 } else { 3460 SRetPtr = CreateMemTemp(RetTy); 3461 if (HaveInsertPoint() && ReturnValue.isUnused()) { 3462 uint64_t size = 3463 CGM.getDataLayout().getTypeAllocSize(ConvertTypeForMem(RetTy)); 3464 if (EmitLifetimeStart(size, SRetPtr.getPointer())) 3465 UnusedReturnSize = size; 3466 } 3467 } 3468 if (IRFunctionArgs.hasSRetArg()) { 3469 IRCallArgs[IRFunctionArgs.getSRetArgNo()] = SRetPtr.getPointer(); 3470 } else if (RetAI.isInAlloca()) { 3471 Address Addr = createInAllocaStructGEP(RetAI.getInAllocaFieldIndex()); 3472 Builder.CreateStore(SRetPtr.getPointer(), Addr); 3473 } 3474 } 3475 3476 assert(CallInfo.arg_size() == CallArgs.size() && 3477 "Mismatch between function signature & arguments."); 3478 unsigned ArgNo = 0; 3479 CGFunctionInfo::const_arg_iterator info_it = CallInfo.arg_begin(); 3480 for (CallArgList::const_iterator I = CallArgs.begin(), E = CallArgs.end(); 3481 I != E; ++I, ++info_it, ++ArgNo) { 3482 const ABIArgInfo &ArgInfo = info_it->info; 3483 RValue RV = I->RV; 3484 3485 // Insert a padding argument to ensure proper alignment. 3486 if (IRFunctionArgs.hasPaddingArg(ArgNo)) 3487 IRCallArgs[IRFunctionArgs.getPaddingArgNo(ArgNo)] = 3488 llvm::UndefValue::get(ArgInfo.getPaddingType()); 3489 3490 unsigned FirstIRArg, NumIRArgs; 3491 std::tie(FirstIRArg, NumIRArgs) = IRFunctionArgs.getIRArgs(ArgNo); 3492 3493 switch (ArgInfo.getKind()) { 3494 case ABIArgInfo::InAlloca: { 3495 assert(NumIRArgs == 0); 3496 assert(getTarget().getTriple().getArch() == llvm::Triple::x86); 3497 if (RV.isAggregate()) { 3498 // Replace the placeholder with the appropriate argument slot GEP. 3499 llvm::Instruction *Placeholder = 3500 cast<llvm::Instruction>(RV.getAggregatePointer()); 3501 CGBuilderTy::InsertPoint IP = Builder.saveIP(); 3502 Builder.SetInsertPoint(Placeholder); 3503 Address Addr = createInAllocaStructGEP(ArgInfo.getInAllocaFieldIndex()); 3504 Builder.restoreIP(IP); 3505 deferPlaceholderReplacement(Placeholder, Addr.getPointer()); 3506 } else { 3507 // Store the RValue into the argument struct. 3508 Address Addr = createInAllocaStructGEP(ArgInfo.getInAllocaFieldIndex()); 3509 unsigned AS = Addr.getType()->getPointerAddressSpace(); 3510 llvm::Type *MemType = ConvertTypeForMem(I->Ty)->getPointerTo(AS); 3511 // There are some cases where a trivial bitcast is not avoidable. The 3512 // definition of a type later in a translation unit may change it's type 3513 // from {}* to (%struct.foo*)*. 3514 if (Addr.getType() != MemType) 3515 Addr = Builder.CreateBitCast(Addr, MemType); 3516 LValue argLV = MakeAddrLValue(Addr, I->Ty); 3517 EmitInitStoreOfNonAggregate(*this, RV, argLV); 3518 } 3519 break; 3520 } 3521 3522 case ABIArgInfo::Indirect: { 3523 assert(NumIRArgs == 1); 3524 if (RV.isScalar() || RV.isComplex()) { 3525 // Make a temporary alloca to pass the argument. 3526 Address Addr = CreateMemTemp(I->Ty, ArgInfo.getIndirectAlign()); 3527 IRCallArgs[FirstIRArg] = Addr.getPointer(); 3528 3529 LValue argLV = MakeAddrLValue(Addr, I->Ty); 3530 EmitInitStoreOfNonAggregate(*this, RV, argLV); 3531 } else { 3532 // We want to avoid creating an unnecessary temporary+copy here; 3533 // however, we need one in three cases: 3534 // 1. If the argument is not byval, and we are required to copy the 3535 // source. (This case doesn't occur on any common architecture.) 3536 // 2. If the argument is byval, RV is not sufficiently aligned, and 3537 // we cannot force it to be sufficiently aligned. 3538 // 3. If the argument is byval, but RV is located in an address space 3539 // different than that of the argument (0). 3540 Address Addr = RV.getAggregateAddress(); 3541 CharUnits Align = ArgInfo.getIndirectAlign(); 3542 const llvm::DataLayout *TD = &CGM.getDataLayout(); 3543 const unsigned RVAddrSpace = Addr.getType()->getAddressSpace(); 3544 const unsigned ArgAddrSpace = 3545 (FirstIRArg < IRFuncTy->getNumParams() 3546 ? IRFuncTy->getParamType(FirstIRArg)->getPointerAddressSpace() 3547 : 0); 3548 if ((!ArgInfo.getIndirectByVal() && I->NeedsCopy) || 3549 (ArgInfo.getIndirectByVal() && Addr.getAlignment() < Align && 3550 llvm::getOrEnforceKnownAlignment(Addr.getPointer(), 3551 Align.getQuantity(), *TD) 3552 < Align.getQuantity()) || 3553 (ArgInfo.getIndirectByVal() && (RVAddrSpace != ArgAddrSpace))) { 3554 // Create an aligned temporary, and copy to it. 3555 Address AI = CreateMemTemp(I->Ty, ArgInfo.getIndirectAlign()); 3556 IRCallArgs[FirstIRArg] = AI.getPointer(); 3557 EmitAggregateCopy(AI, Addr, I->Ty, RV.isVolatileQualified()); 3558 } else { 3559 // Skip the extra memcpy call. 3560 IRCallArgs[FirstIRArg] = Addr.getPointer(); 3561 } 3562 } 3563 break; 3564 } 3565 3566 case ABIArgInfo::Ignore: 3567 assert(NumIRArgs == 0); 3568 break; 3569 3570 case ABIArgInfo::Extend: 3571 case ABIArgInfo::Direct: { 3572 if (!isa<llvm::StructType>(ArgInfo.getCoerceToType()) && 3573 ArgInfo.getCoerceToType() == ConvertType(info_it->type) && 3574 ArgInfo.getDirectOffset() == 0) { 3575 assert(NumIRArgs == 1); 3576 llvm::Value *V; 3577 if (RV.isScalar()) 3578 V = RV.getScalarVal(); 3579 else 3580 V = Builder.CreateLoad(RV.getAggregateAddress()); 3581 3582 // We might have to widen integers, but we should never truncate. 3583 if (ArgInfo.getCoerceToType() != V->getType() && 3584 V->getType()->isIntegerTy()) 3585 V = Builder.CreateZExt(V, ArgInfo.getCoerceToType()); 3586 3587 // If the argument doesn't match, perform a bitcast to coerce it. This 3588 // can happen due to trivial type mismatches. 3589 if (FirstIRArg < IRFuncTy->getNumParams() && 3590 V->getType() != IRFuncTy->getParamType(FirstIRArg)) 3591 V = Builder.CreateBitCast(V, IRFuncTy->getParamType(FirstIRArg)); 3592 IRCallArgs[FirstIRArg] = V; 3593 break; 3594 } 3595 3596 // FIXME: Avoid the conversion through memory if possible. 3597 Address Src = Address::invalid(); 3598 if (RV.isScalar() || RV.isComplex()) { 3599 Src = CreateMemTemp(I->Ty, "coerce"); 3600 LValue SrcLV = MakeAddrLValue(Src, I->Ty); 3601 EmitInitStoreOfNonAggregate(*this, RV, SrcLV); 3602 } else { 3603 Src = RV.getAggregateAddress(); 3604 } 3605 3606 // If the value is offset in memory, apply the offset now. 3607 Src = emitAddressAtOffset(*this, Src, ArgInfo); 3608 3609 // Fast-isel and the optimizer generally like scalar values better than 3610 // FCAs, so we flatten them if this is safe to do for this argument. 3611 llvm::StructType *STy = 3612 dyn_cast<llvm::StructType>(ArgInfo.getCoerceToType()); 3613 if (STy && ArgInfo.isDirect() && ArgInfo.getCanBeFlattened()) { 3614 llvm::Type *SrcTy = Src.getType()->getElementType(); 3615 uint64_t SrcSize = CGM.getDataLayout().getTypeAllocSize(SrcTy); 3616 uint64_t DstSize = CGM.getDataLayout().getTypeAllocSize(STy); 3617 3618 // If the source type is smaller than the destination type of the 3619 // coerce-to logic, copy the source value into a temp alloca the size 3620 // of the destination type to allow loading all of it. The bits past 3621 // the source value are left undef. 3622 if (SrcSize < DstSize) { 3623 Address TempAlloca 3624 = CreateTempAlloca(STy, Src.getAlignment(), 3625 Src.getName() + ".coerce"); 3626 Builder.CreateMemCpy(TempAlloca, Src, SrcSize); 3627 Src = TempAlloca; 3628 } else { 3629 Src = Builder.CreateBitCast(Src, llvm::PointerType::getUnqual(STy)); 3630 } 3631 3632 auto SrcLayout = CGM.getDataLayout().getStructLayout(STy); 3633 assert(NumIRArgs == STy->getNumElements()); 3634 for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) { 3635 auto Offset = CharUnits::fromQuantity(SrcLayout->getElementOffset(i)); 3636 Address EltPtr = Builder.CreateStructGEP(Src, i, Offset); 3637 llvm::Value *LI = Builder.CreateLoad(EltPtr); 3638 IRCallArgs[FirstIRArg + i] = LI; 3639 } 3640 } else { 3641 // In the simple case, just pass the coerced loaded value. 3642 assert(NumIRArgs == 1); 3643 IRCallArgs[FirstIRArg] = 3644 CreateCoercedLoad(Src, ArgInfo.getCoerceToType(), *this); 3645 } 3646 3647 break; 3648 } 3649 3650 case ABIArgInfo::CoerceAndExpand: { 3651 assert(RV.isAggregate() && 3652 "CoerceAndExpand does not support non-aggregate types yet"); 3653 3654 auto coercionType = ArgInfo.getCoerceAndExpandType(); 3655 auto layout = CGM.getDataLayout().getStructLayout(coercionType); 3656 3657 Address addr = RV.getAggregateAddress(); 3658 addr = Builder.CreateElementBitCast(addr, coercionType); 3659 3660 unsigned IRArgPos = FirstIRArg; 3661 for (unsigned i = 0, e = coercionType->getNumElements(); i != e; ++i) { 3662 llvm::Type *eltType = coercionType->getElementType(i); 3663 if (ABIArgInfo::isPaddingForCoerceAndExpand(eltType)) continue; 3664 Address eltAddr = Builder.CreateStructGEP(addr, i, layout); 3665 llvm::Value *elt = Builder.CreateLoad(eltAddr); 3666 IRCallArgs[IRArgPos++] = elt; 3667 } 3668 assert(IRArgPos == FirstIRArg + NumIRArgs); 3669 3670 break; 3671 } 3672 3673 case ABIArgInfo::Expand: 3674 unsigned IRArgPos = FirstIRArg; 3675 ExpandTypeToArgs(I->Ty, RV, IRFuncTy, IRCallArgs, IRArgPos); 3676 assert(IRArgPos == FirstIRArg + NumIRArgs); 3677 break; 3678 } 3679 } 3680 3681 if (ArgMemory.isValid()) { 3682 llvm::Value *Arg = ArgMemory.getPointer(); 3683 if (CallInfo.isVariadic()) { 3684 // When passing non-POD arguments by value to variadic functions, we will 3685 // end up with a variadic prototype and an inalloca call site. In such 3686 // cases, we can't do any parameter mismatch checks. Give up and bitcast 3687 // the callee. 3688 unsigned CalleeAS = 3689 cast<llvm::PointerType>(Callee->getType())->getAddressSpace(); 3690 Callee = Builder.CreateBitCast( 3691 Callee, getTypes().GetFunctionType(CallInfo)->getPointerTo(CalleeAS)); 3692 } else { 3693 llvm::Type *LastParamTy = 3694 IRFuncTy->getParamType(IRFuncTy->getNumParams() - 1); 3695 if (Arg->getType() != LastParamTy) { 3696 #ifndef NDEBUG 3697 // Assert that these structs have equivalent element types. 3698 llvm::StructType *FullTy = CallInfo.getArgStruct(); 3699 llvm::StructType *DeclaredTy = cast<llvm::StructType>( 3700 cast<llvm::PointerType>(LastParamTy)->getElementType()); 3701 assert(DeclaredTy->getNumElements() == FullTy->getNumElements()); 3702 for (llvm::StructType::element_iterator DI = DeclaredTy->element_begin(), 3703 DE = DeclaredTy->element_end(), 3704 FI = FullTy->element_begin(); 3705 DI != DE; ++DI, ++FI) 3706 assert(*DI == *FI); 3707 #endif 3708 Arg = Builder.CreateBitCast(Arg, LastParamTy); 3709 } 3710 } 3711 assert(IRFunctionArgs.hasInallocaArg()); 3712 IRCallArgs[IRFunctionArgs.getInallocaArgNo()] = Arg; 3713 } 3714 3715 if (!CallArgs.getCleanupsToDeactivate().empty()) 3716 deactivateArgCleanupsBeforeCall(*this, CallArgs); 3717 3718 // If the callee is a bitcast of a function to a varargs pointer to function 3719 // type, check to see if we can remove the bitcast. This handles some cases 3720 // with unprototyped functions. 3721 if (llvm::ConstantExpr *CE = dyn_cast<llvm::ConstantExpr>(Callee)) 3722 if (llvm::Function *CalleeF = dyn_cast<llvm::Function>(CE->getOperand(0))) { 3723 llvm::PointerType *CurPT=cast<llvm::PointerType>(Callee->getType()); 3724 llvm::FunctionType *CurFT = 3725 cast<llvm::FunctionType>(CurPT->getElementType()); 3726 llvm::FunctionType *ActualFT = CalleeF->getFunctionType(); 3727 3728 if (CE->getOpcode() == llvm::Instruction::BitCast && 3729 ActualFT->getReturnType() == CurFT->getReturnType() && 3730 ActualFT->getNumParams() == CurFT->getNumParams() && 3731 ActualFT->getNumParams() == IRCallArgs.size() && 3732 (CurFT->isVarArg() || !ActualFT->isVarArg())) { 3733 bool ArgsMatch = true; 3734 for (unsigned i = 0, e = ActualFT->getNumParams(); i != e; ++i) 3735 if (ActualFT->getParamType(i) != CurFT->getParamType(i)) { 3736 ArgsMatch = false; 3737 break; 3738 } 3739 3740 // Strip the cast if we can get away with it. This is a nice cleanup, 3741 // but also allows us to inline the function at -O0 if it is marked 3742 // always_inline. 3743 if (ArgsMatch) 3744 Callee = CalleeF; 3745 } 3746 } 3747 3748 assert(IRCallArgs.size() == IRFuncTy->getNumParams() || IRFuncTy->isVarArg()); 3749 for (unsigned i = 0; i < IRCallArgs.size(); ++i) { 3750 // Inalloca argument can have different type. 3751 if (IRFunctionArgs.hasInallocaArg() && 3752 i == IRFunctionArgs.getInallocaArgNo()) 3753 continue; 3754 if (i < IRFuncTy->getNumParams()) 3755 assert(IRCallArgs[i]->getType() == IRFuncTy->getParamType(i)); 3756 } 3757 3758 unsigned CallingConv; 3759 CodeGen::AttributeListType AttributeList; 3760 CGM.ConstructAttributeList(Callee->getName(), CallInfo, CalleeInfo, 3761 AttributeList, CallingConv, 3762 /*AttrOnCallSite=*/true); 3763 llvm::AttributeSet Attrs = llvm::AttributeSet::get(getLLVMContext(), 3764 AttributeList); 3765 3766 bool CannotThrow; 3767 if (currentFunctionUsesSEHTry()) { 3768 // SEH cares about asynchronous exceptions, everything can "throw." 3769 CannotThrow = false; 3770 } else if (isCleanupPadScope() && 3771 EHPersonality::get(*this).isMSVCXXPersonality()) { 3772 // The MSVC++ personality will implicitly terminate the program if an 3773 // exception is thrown. An unwind edge cannot be reached. 3774 CannotThrow = true; 3775 } else { 3776 // Otherwise, nowunind callsites will never throw. 3777 CannotThrow = Attrs.hasAttribute(llvm::AttributeSet::FunctionIndex, 3778 llvm::Attribute::NoUnwind); 3779 } 3780 llvm::BasicBlock *InvokeDest = CannotThrow ? nullptr : getInvokeDest(); 3781 3782 SmallVector<llvm::OperandBundleDef, 1> BundleList; 3783 getBundlesForFunclet(Callee, CurrentFuncletPad, BundleList); 3784 3785 llvm::CallSite CS; 3786 if (!InvokeDest) { 3787 CS = Builder.CreateCall(Callee, IRCallArgs, BundleList); 3788 } else { 3789 llvm::BasicBlock *Cont = createBasicBlock("invoke.cont"); 3790 CS = Builder.CreateInvoke(Callee, Cont, InvokeDest, IRCallArgs, 3791 BundleList); 3792 EmitBlock(Cont); 3793 } 3794 if (callOrInvoke) 3795 *callOrInvoke = CS.getInstruction(); 3796 3797 if (CurCodeDecl && CurCodeDecl->hasAttr<FlattenAttr>() && 3798 !CS.hasFnAttr(llvm::Attribute::NoInline)) 3799 Attrs = 3800 Attrs.addAttribute(getLLVMContext(), llvm::AttributeSet::FunctionIndex, 3801 llvm::Attribute::AlwaysInline); 3802 3803 // Disable inlining inside SEH __try blocks. 3804 if (isSEHTryScope()) 3805 Attrs = 3806 Attrs.addAttribute(getLLVMContext(), llvm::AttributeSet::FunctionIndex, 3807 llvm::Attribute::NoInline); 3808 3809 CS.setAttributes(Attrs); 3810 CS.setCallingConv(static_cast<llvm::CallingConv::ID>(CallingConv)); 3811 3812 // Insert instrumentation or attach profile metadata at indirect call sites 3813 if (!CS.getCalledFunction()) 3814 PGO.valueProfile(Builder, llvm::IPVK_IndirectCallTarget, 3815 CS.getInstruction(), Callee); 3816 3817 // In ObjC ARC mode with no ObjC ARC exception safety, tell the ARC 3818 // optimizer it can aggressively ignore unwind edges. 3819 if (CGM.getLangOpts().ObjCAutoRefCount) 3820 AddObjCARCExceptionMetadata(CS.getInstruction()); 3821 3822 // If the call doesn't return, finish the basic block and clear the 3823 // insertion point; this allows the rest of IRgen to discard 3824 // unreachable code. 3825 if (CS.doesNotReturn()) { 3826 if (UnusedReturnSize) 3827 EmitLifetimeEnd(llvm::ConstantInt::get(Int64Ty, UnusedReturnSize), 3828 SRetPtr.getPointer()); 3829 3830 Builder.CreateUnreachable(); 3831 Builder.ClearInsertionPoint(); 3832 3833 // FIXME: For now, emit a dummy basic block because expr emitters in 3834 // generally are not ready to handle emitting expressions at unreachable 3835 // points. 3836 EnsureInsertPoint(); 3837 3838 // Return a reasonable RValue. 3839 return GetUndefRValue(RetTy); 3840 } 3841 3842 llvm::Instruction *CI = CS.getInstruction(); 3843 if (!CI->getType()->isVoidTy()) 3844 CI->setName("call"); 3845 3846 // Emit any writebacks immediately. Arguably this should happen 3847 // after any return-value munging. 3848 if (CallArgs.hasWritebacks()) 3849 emitWritebacks(*this, CallArgs); 3850 3851 // The stack cleanup for inalloca arguments has to run out of the normal 3852 // lexical order, so deactivate it and run it manually here. 3853 CallArgs.freeArgumentMemory(*this); 3854 3855 if (llvm::CallInst *Call = dyn_cast<llvm::CallInst>(CI)) { 3856 const Decl *TargetDecl = CalleeInfo.getCalleeDecl(); 3857 if (TargetDecl && TargetDecl->hasAttr<NotTailCalledAttr>()) 3858 Call->setTailCallKind(llvm::CallInst::TCK_NoTail); 3859 } 3860 3861 RValue Ret = [&] { 3862 switch (RetAI.getKind()) { 3863 case ABIArgInfo::InAlloca: 3864 case ABIArgInfo::Indirect: { 3865 RValue ret = convertTempToRValue(SRetPtr, RetTy, SourceLocation()); 3866 if (UnusedReturnSize) 3867 EmitLifetimeEnd(llvm::ConstantInt::get(Int64Ty, UnusedReturnSize), 3868 SRetPtr.getPointer()); 3869 return ret; 3870 } 3871 3872 case ABIArgInfo::CoerceAndExpand: { 3873 auto coercionType = RetAI.getCoerceAndExpandType(); 3874 auto layout = CGM.getDataLayout().getStructLayout(coercionType); 3875 3876 Address addr = SRetPtr; 3877 addr = Builder.CreateElementBitCast(addr, coercionType); 3878 3879 unsigned unpaddedIndex = 0; 3880 for (unsigned i = 0, e = coercionType->getNumElements(); i != e; ++i) { 3881 llvm::Type *eltType = coercionType->getElementType(i); 3882 if (ABIArgInfo::isPaddingForCoerceAndExpand(eltType)) continue; 3883 Address eltAddr = Builder.CreateStructGEP(addr, i, layout); 3884 llvm::Value *elt = Builder.CreateExtractValue(CI, unpaddedIndex++); 3885 Builder.CreateStore(elt, eltAddr); 3886 } 3887 break; 3888 } 3889 3890 case ABIArgInfo::Ignore: 3891 // If we are ignoring an argument that had a result, make sure to 3892 // construct the appropriate return value for our caller. 3893 return GetUndefRValue(RetTy); 3894 3895 case ABIArgInfo::Extend: 3896 case ABIArgInfo::Direct: { 3897 llvm::Type *RetIRTy = ConvertType(RetTy); 3898 if (RetAI.getCoerceToType() == RetIRTy && RetAI.getDirectOffset() == 0) { 3899 switch (getEvaluationKind(RetTy)) { 3900 case TEK_Complex: { 3901 llvm::Value *Real = Builder.CreateExtractValue(CI, 0); 3902 llvm::Value *Imag = Builder.CreateExtractValue(CI, 1); 3903 return RValue::getComplex(std::make_pair(Real, Imag)); 3904 } 3905 case TEK_Aggregate: { 3906 Address DestPtr = ReturnValue.getValue(); 3907 bool DestIsVolatile = ReturnValue.isVolatile(); 3908 3909 if (!DestPtr.isValid()) { 3910 DestPtr = CreateMemTemp(RetTy, "agg.tmp"); 3911 DestIsVolatile = false; 3912 } 3913 BuildAggStore(*this, CI, DestPtr, DestIsVolatile); 3914 return RValue::getAggregate(DestPtr); 3915 } 3916 case TEK_Scalar: { 3917 // If the argument doesn't match, perform a bitcast to coerce it. This 3918 // can happen due to trivial type mismatches. 3919 llvm::Value *V = CI; 3920 if (V->getType() != RetIRTy) 3921 V = Builder.CreateBitCast(V, RetIRTy); 3922 return RValue::get(V); 3923 } 3924 } 3925 llvm_unreachable("bad evaluation kind"); 3926 } 3927 3928 Address DestPtr = ReturnValue.getValue(); 3929 bool DestIsVolatile = ReturnValue.isVolatile(); 3930 3931 if (!DestPtr.isValid()) { 3932 DestPtr = CreateMemTemp(RetTy, "coerce"); 3933 DestIsVolatile = false; 3934 } 3935 3936 // If the value is offset in memory, apply the offset now. 3937 Address StorePtr = emitAddressAtOffset(*this, DestPtr, RetAI); 3938 CreateCoercedStore(CI, StorePtr, DestIsVolatile, *this); 3939 3940 return convertTempToRValue(DestPtr, RetTy, SourceLocation()); 3941 } 3942 3943 case ABIArgInfo::Expand: 3944 llvm_unreachable("Invalid ABI kind for return argument"); 3945 } 3946 3947 llvm_unreachable("Unhandled ABIArgInfo::Kind"); 3948 } (); 3949 3950 const Decl *TargetDecl = CalleeInfo.getCalleeDecl(); 3951 3952 if (Ret.isScalar() && TargetDecl) { 3953 if (const auto *AA = TargetDecl->getAttr<AssumeAlignedAttr>()) { 3954 llvm::Value *OffsetValue = nullptr; 3955 if (const auto *Offset = AA->getOffset()) 3956 OffsetValue = EmitScalarExpr(Offset); 3957 3958 llvm::Value *Alignment = EmitScalarExpr(AA->getAlignment()); 3959 llvm::ConstantInt *AlignmentCI = cast<llvm::ConstantInt>(Alignment); 3960 EmitAlignmentAssumption(Ret.getScalarVal(), AlignmentCI->getZExtValue(), 3961 OffsetValue); 3962 } 3963 } 3964 3965 return Ret; 3966 } 3967 3968 /* VarArg handling */ 3969 3970 Address CodeGenFunction::EmitVAArg(VAArgExpr *VE, Address &VAListAddr) { 3971 VAListAddr = VE->isMicrosoftABI() 3972 ? EmitMSVAListRef(VE->getSubExpr()) 3973 : EmitVAListRef(VE->getSubExpr()); 3974 QualType Ty = VE->getType(); 3975 if (VE->isMicrosoftABI()) 3976 return CGM.getTypes().getABIInfo().EmitMSVAArg(*this, VAListAddr, Ty); 3977 return CGM.getTypes().getABIInfo().EmitVAArg(*this, VAListAddr, Ty); 3978 } 3979