1 //===--- CGCall.cpp - Encapsulate calling convention details --------------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // These classes wrap the information about a call or function 11 // definition used to handle ABI compliancy. 12 // 13 //===----------------------------------------------------------------------===// 14 15 #include "CGCall.h" 16 #include "ABIInfo.h" 17 #include "CGCXXABI.h" 18 #include "CodeGenFunction.h" 19 #include "CodeGenModule.h" 20 #include "TargetInfo.h" 21 #include "clang/AST/Decl.h" 22 #include "clang/AST/DeclCXX.h" 23 #include "clang/AST/DeclObjC.h" 24 #include "clang/Basic/TargetInfo.h" 25 #include "clang/CodeGen/CGFunctionInfo.h" 26 #include "clang/Frontend/CodeGenOptions.h" 27 #include "llvm/ADT/StringExtras.h" 28 #include "llvm/IR/Attributes.h" 29 #include "llvm/IR/CallSite.h" 30 #include "llvm/IR/DataLayout.h" 31 #include "llvm/IR/InlineAsm.h" 32 #include "llvm/IR/Intrinsics.h" 33 #include "llvm/IR/IntrinsicInst.h" 34 #include "llvm/Transforms/Utils/Local.h" 35 #include <sstream> 36 using namespace clang; 37 using namespace CodeGen; 38 39 /***/ 40 41 static unsigned ClangCallConvToLLVMCallConv(CallingConv CC) { 42 switch (CC) { 43 default: return llvm::CallingConv::C; 44 case CC_X86StdCall: return llvm::CallingConv::X86_StdCall; 45 case CC_X86FastCall: return llvm::CallingConv::X86_FastCall; 46 case CC_X86ThisCall: return llvm::CallingConv::X86_ThisCall; 47 case CC_X86_64Win64: return llvm::CallingConv::X86_64_Win64; 48 case CC_X86_64SysV: return llvm::CallingConv::X86_64_SysV; 49 case CC_AAPCS: return llvm::CallingConv::ARM_AAPCS; 50 case CC_AAPCS_VFP: return llvm::CallingConv::ARM_AAPCS_VFP; 51 case CC_IntelOclBicc: return llvm::CallingConv::Intel_OCL_BI; 52 // TODO: Add support for __pascal to LLVM. 53 case CC_X86Pascal: return llvm::CallingConv::C; 54 // TODO: Add support for __vectorcall to LLVM. 55 case CC_X86VectorCall: return llvm::CallingConv::X86_VectorCall; 56 case CC_SpirFunction: return llvm::CallingConv::SPIR_FUNC; 57 case CC_SpirKernel: return llvm::CallingConv::SPIR_KERNEL; 58 } 59 } 60 61 /// Derives the 'this' type for codegen purposes, i.e. ignoring method 62 /// qualification. 63 /// FIXME: address space qualification? 64 static CanQualType GetThisType(ASTContext &Context, const CXXRecordDecl *RD) { 65 QualType RecTy = Context.getTagDeclType(RD)->getCanonicalTypeInternal(); 66 return Context.getPointerType(CanQualType::CreateUnsafe(RecTy)); 67 } 68 69 /// Returns the canonical formal type of the given C++ method. 70 static CanQual<FunctionProtoType> GetFormalType(const CXXMethodDecl *MD) { 71 return MD->getType()->getCanonicalTypeUnqualified() 72 .getAs<FunctionProtoType>(); 73 } 74 75 /// Returns the "extra-canonicalized" return type, which discards 76 /// qualifiers on the return type. Codegen doesn't care about them, 77 /// and it makes ABI code a little easier to be able to assume that 78 /// all parameter and return types are top-level unqualified. 79 static CanQualType GetReturnType(QualType RetTy) { 80 return RetTy->getCanonicalTypeUnqualified().getUnqualifiedType(); 81 } 82 83 /// Arrange the argument and result information for a value of the given 84 /// unprototyped freestanding function type. 85 const CGFunctionInfo & 86 CodeGenTypes::arrangeFreeFunctionType(CanQual<FunctionNoProtoType> FTNP) { 87 // When translating an unprototyped function type, always use a 88 // variadic type. 89 return arrangeLLVMFunctionInfo(FTNP->getReturnType().getUnqualifiedType(), 90 /*instanceMethod=*/false, 91 /*chainCall=*/false, None, 92 FTNP->getExtInfo(), RequiredArgs(0)); 93 } 94 95 /// Arrange the LLVM function layout for a value of the given function 96 /// type, on top of any implicit parameters already stored. 97 static const CGFunctionInfo & 98 arrangeLLVMFunctionInfo(CodeGenTypes &CGT, bool instanceMethod, 99 SmallVectorImpl<CanQualType> &prefix, 100 CanQual<FunctionProtoType> FTP) { 101 RequiredArgs required = RequiredArgs::forPrototypePlus(FTP, prefix.size()); 102 // FIXME: Kill copy. 103 prefix.append(FTP->param_type_begin(), FTP->param_type_end()); 104 CanQualType resultType = FTP->getReturnType().getUnqualifiedType(); 105 return CGT.arrangeLLVMFunctionInfo(resultType, instanceMethod, 106 /*chainCall=*/false, prefix, 107 FTP->getExtInfo(), required); 108 } 109 110 /// Arrange the argument and result information for a value of the 111 /// given freestanding function type. 112 const CGFunctionInfo & 113 CodeGenTypes::arrangeFreeFunctionType(CanQual<FunctionProtoType> FTP) { 114 SmallVector<CanQualType, 16> argTypes; 115 return ::arrangeLLVMFunctionInfo(*this, /*instanceMethod=*/false, argTypes, 116 FTP); 117 } 118 119 static CallingConv getCallingConventionForDecl(const Decl *D, bool IsWindows) { 120 // Set the appropriate calling convention for the Function. 121 if (D->hasAttr<StdCallAttr>()) 122 return CC_X86StdCall; 123 124 if (D->hasAttr<FastCallAttr>()) 125 return CC_X86FastCall; 126 127 if (D->hasAttr<ThisCallAttr>()) 128 return CC_X86ThisCall; 129 130 if (D->hasAttr<VectorCallAttr>()) 131 return CC_X86VectorCall; 132 133 if (D->hasAttr<PascalAttr>()) 134 return CC_X86Pascal; 135 136 if (PcsAttr *PCS = D->getAttr<PcsAttr>()) 137 return (PCS->getPCS() == PcsAttr::AAPCS ? CC_AAPCS : CC_AAPCS_VFP); 138 139 if (D->hasAttr<IntelOclBiccAttr>()) 140 return CC_IntelOclBicc; 141 142 if (D->hasAttr<MSABIAttr>()) 143 return IsWindows ? CC_C : CC_X86_64Win64; 144 145 if (D->hasAttr<SysVABIAttr>()) 146 return IsWindows ? CC_X86_64SysV : CC_C; 147 148 return CC_C; 149 } 150 151 /// Arrange the argument and result information for a call to an 152 /// unknown C++ non-static member function of the given abstract type. 153 /// (Zero value of RD means we don't have any meaningful "this" argument type, 154 /// so fall back to a generic pointer type). 155 /// The member function must be an ordinary function, i.e. not a 156 /// constructor or destructor. 157 const CGFunctionInfo & 158 CodeGenTypes::arrangeCXXMethodType(const CXXRecordDecl *RD, 159 const FunctionProtoType *FTP) { 160 SmallVector<CanQualType, 16> argTypes; 161 162 // Add the 'this' pointer. 163 if (RD) 164 argTypes.push_back(GetThisType(Context, RD)); 165 else 166 argTypes.push_back(Context.VoidPtrTy); 167 168 return ::arrangeLLVMFunctionInfo( 169 *this, true, argTypes, 170 FTP->getCanonicalTypeUnqualified().getAs<FunctionProtoType>()); 171 } 172 173 /// Arrange the argument and result information for a declaration or 174 /// definition of the given C++ non-static member function. The 175 /// member function must be an ordinary function, i.e. not a 176 /// constructor or destructor. 177 const CGFunctionInfo & 178 CodeGenTypes::arrangeCXXMethodDeclaration(const CXXMethodDecl *MD) { 179 assert(!isa<CXXConstructorDecl>(MD) && "wrong method for constructors!"); 180 assert(!isa<CXXDestructorDecl>(MD) && "wrong method for destructors!"); 181 182 CanQual<FunctionProtoType> prototype = GetFormalType(MD); 183 184 if (MD->isInstance()) { 185 // The abstract case is perfectly fine. 186 const CXXRecordDecl *ThisType = TheCXXABI.getThisArgumentTypeForMethod(MD); 187 return arrangeCXXMethodType(ThisType, prototype.getTypePtr()); 188 } 189 190 return arrangeFreeFunctionType(prototype); 191 } 192 193 const CGFunctionInfo & 194 CodeGenTypes::arrangeCXXStructorDeclaration(const CXXMethodDecl *MD, 195 StructorType Type) { 196 197 SmallVector<CanQualType, 16> argTypes; 198 argTypes.push_back(GetThisType(Context, MD->getParent())); 199 200 GlobalDecl GD; 201 if (auto *CD = dyn_cast<CXXConstructorDecl>(MD)) { 202 GD = GlobalDecl(CD, toCXXCtorType(Type)); 203 } else { 204 auto *DD = dyn_cast<CXXDestructorDecl>(MD); 205 GD = GlobalDecl(DD, toCXXDtorType(Type)); 206 } 207 208 CanQual<FunctionProtoType> FTP = GetFormalType(MD); 209 210 // Add the formal parameters. 211 argTypes.append(FTP->param_type_begin(), FTP->param_type_end()); 212 213 TheCXXABI.buildStructorSignature(MD, Type, argTypes); 214 215 RequiredArgs required = 216 (MD->isVariadic() ? RequiredArgs(argTypes.size()) : RequiredArgs::All); 217 218 FunctionType::ExtInfo extInfo = FTP->getExtInfo(); 219 CanQualType resultType = TheCXXABI.HasThisReturn(GD) 220 ? argTypes.front() 221 : TheCXXABI.hasMostDerivedReturn(GD) 222 ? CGM.getContext().VoidPtrTy 223 : Context.VoidTy; 224 return arrangeLLVMFunctionInfo(resultType, /*instanceMethod=*/true, 225 /*chainCall=*/false, argTypes, extInfo, 226 required); 227 } 228 229 /// Arrange a call to a C++ method, passing the given arguments. 230 const CGFunctionInfo & 231 CodeGenTypes::arrangeCXXConstructorCall(const CallArgList &args, 232 const CXXConstructorDecl *D, 233 CXXCtorType CtorKind, 234 unsigned ExtraArgs) { 235 // FIXME: Kill copy. 236 SmallVector<CanQualType, 16> ArgTypes; 237 for (const auto &Arg : args) 238 ArgTypes.push_back(Context.getCanonicalParamType(Arg.Ty)); 239 240 CanQual<FunctionProtoType> FPT = GetFormalType(D); 241 RequiredArgs Required = RequiredArgs::forPrototypePlus(FPT, 1 + ExtraArgs); 242 GlobalDecl GD(D, CtorKind); 243 CanQualType ResultType = TheCXXABI.HasThisReturn(GD) 244 ? ArgTypes.front() 245 : TheCXXABI.hasMostDerivedReturn(GD) 246 ? CGM.getContext().VoidPtrTy 247 : Context.VoidTy; 248 249 FunctionType::ExtInfo Info = FPT->getExtInfo(); 250 return arrangeLLVMFunctionInfo(ResultType, /*instanceMethod=*/true, 251 /*chainCall=*/false, ArgTypes, Info, 252 Required); 253 } 254 255 /// Arrange the argument and result information for the declaration or 256 /// definition of the given function. 257 const CGFunctionInfo & 258 CodeGenTypes::arrangeFunctionDeclaration(const FunctionDecl *FD) { 259 if (const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(FD)) 260 if (MD->isInstance()) 261 return arrangeCXXMethodDeclaration(MD); 262 263 CanQualType FTy = FD->getType()->getCanonicalTypeUnqualified(); 264 265 assert(isa<FunctionType>(FTy)); 266 267 // When declaring a function without a prototype, always use a 268 // non-variadic type. 269 if (isa<FunctionNoProtoType>(FTy)) { 270 CanQual<FunctionNoProtoType> noProto = FTy.getAs<FunctionNoProtoType>(); 271 return arrangeLLVMFunctionInfo( 272 noProto->getReturnType(), /*instanceMethod=*/false, 273 /*chainCall=*/false, None, noProto->getExtInfo(), RequiredArgs::All); 274 } 275 276 assert(isa<FunctionProtoType>(FTy)); 277 return arrangeFreeFunctionType(FTy.getAs<FunctionProtoType>()); 278 } 279 280 /// Arrange the argument and result information for the declaration or 281 /// definition of an Objective-C method. 282 const CGFunctionInfo & 283 CodeGenTypes::arrangeObjCMethodDeclaration(const ObjCMethodDecl *MD) { 284 // It happens that this is the same as a call with no optional 285 // arguments, except also using the formal 'self' type. 286 return arrangeObjCMessageSendSignature(MD, MD->getSelfDecl()->getType()); 287 } 288 289 /// Arrange the argument and result information for the function type 290 /// through which to perform a send to the given Objective-C method, 291 /// using the given receiver type. The receiver type is not always 292 /// the 'self' type of the method or even an Objective-C pointer type. 293 /// This is *not* the right method for actually performing such a 294 /// message send, due to the possibility of optional arguments. 295 const CGFunctionInfo & 296 CodeGenTypes::arrangeObjCMessageSendSignature(const ObjCMethodDecl *MD, 297 QualType receiverType) { 298 SmallVector<CanQualType, 16> argTys; 299 argTys.push_back(Context.getCanonicalParamType(receiverType)); 300 argTys.push_back(Context.getCanonicalParamType(Context.getObjCSelType())); 301 // FIXME: Kill copy? 302 for (const auto *I : MD->params()) { 303 argTys.push_back(Context.getCanonicalParamType(I->getType())); 304 } 305 306 FunctionType::ExtInfo einfo; 307 bool IsWindows = getContext().getTargetInfo().getTriple().isOSWindows(); 308 einfo = einfo.withCallingConv(getCallingConventionForDecl(MD, IsWindows)); 309 310 if (getContext().getLangOpts().ObjCAutoRefCount && 311 MD->hasAttr<NSReturnsRetainedAttr>()) 312 einfo = einfo.withProducesResult(true); 313 314 RequiredArgs required = 315 (MD->isVariadic() ? RequiredArgs(argTys.size()) : RequiredArgs::All); 316 317 return arrangeLLVMFunctionInfo( 318 GetReturnType(MD->getReturnType()), /*instanceMethod=*/false, 319 /*chainCall=*/false, argTys, einfo, required); 320 } 321 322 const CGFunctionInfo & 323 CodeGenTypes::arrangeGlobalDeclaration(GlobalDecl GD) { 324 // FIXME: Do we need to handle ObjCMethodDecl? 325 const FunctionDecl *FD = cast<FunctionDecl>(GD.getDecl()); 326 327 if (const CXXConstructorDecl *CD = dyn_cast<CXXConstructorDecl>(FD)) 328 return arrangeCXXStructorDeclaration(CD, getFromCtorType(GD.getCtorType())); 329 330 if (const CXXDestructorDecl *DD = dyn_cast<CXXDestructorDecl>(FD)) 331 return arrangeCXXStructorDeclaration(DD, getFromDtorType(GD.getDtorType())); 332 333 return arrangeFunctionDeclaration(FD); 334 } 335 336 /// Arrange a thunk that takes 'this' as the first parameter followed by 337 /// varargs. Return a void pointer, regardless of the actual return type. 338 /// The body of the thunk will end in a musttail call to a function of the 339 /// correct type, and the caller will bitcast the function to the correct 340 /// prototype. 341 const CGFunctionInfo & 342 CodeGenTypes::arrangeMSMemberPointerThunk(const CXXMethodDecl *MD) { 343 assert(MD->isVirtual() && "only virtual memptrs have thunks"); 344 CanQual<FunctionProtoType> FTP = GetFormalType(MD); 345 CanQualType ArgTys[] = { GetThisType(Context, MD->getParent()) }; 346 return arrangeLLVMFunctionInfo(Context.VoidTy, /*instanceMethod=*/false, 347 /*chainCall=*/false, ArgTys, 348 FTP->getExtInfo(), RequiredArgs(1)); 349 } 350 351 const CGFunctionInfo & 352 CodeGenTypes::arrangeMSCtorClosure(const CXXConstructorDecl *CD, 353 CXXCtorType CT) { 354 assert(CT == Ctor_CopyingClosure || CT == Ctor_DefaultClosure); 355 356 CanQual<FunctionProtoType> FTP = GetFormalType(CD); 357 SmallVector<CanQualType, 2> ArgTys; 358 const CXXRecordDecl *RD = CD->getParent(); 359 ArgTys.push_back(GetThisType(Context, RD)); 360 if (CT == Ctor_CopyingClosure) 361 ArgTys.push_back(*FTP->param_type_begin()); 362 if (RD->getNumVBases() > 0) 363 ArgTys.push_back(Context.IntTy); 364 CallingConv CC = Context.getDefaultCallingConvention( 365 /*IsVariadic=*/false, /*IsCXXMethod=*/true); 366 return arrangeLLVMFunctionInfo(Context.VoidTy, /*instanceMethod=*/true, 367 /*chainCall=*/false, ArgTys, 368 FunctionType::ExtInfo(CC), RequiredArgs::All); 369 } 370 371 /// Arrange a call as unto a free function, except possibly with an 372 /// additional number of formal parameters considered required. 373 static const CGFunctionInfo & 374 arrangeFreeFunctionLikeCall(CodeGenTypes &CGT, 375 CodeGenModule &CGM, 376 const CallArgList &args, 377 const FunctionType *fnType, 378 unsigned numExtraRequiredArgs, 379 bool chainCall) { 380 assert(args.size() >= numExtraRequiredArgs); 381 382 // In most cases, there are no optional arguments. 383 RequiredArgs required = RequiredArgs::All; 384 385 // If we have a variadic prototype, the required arguments are the 386 // extra prefix plus the arguments in the prototype. 387 if (const FunctionProtoType *proto = dyn_cast<FunctionProtoType>(fnType)) { 388 if (proto->isVariadic()) 389 required = RequiredArgs(proto->getNumParams() + numExtraRequiredArgs); 390 391 // If we don't have a prototype at all, but we're supposed to 392 // explicitly use the variadic convention for unprototyped calls, 393 // treat all of the arguments as required but preserve the nominal 394 // possibility of variadics. 395 } else if (CGM.getTargetCodeGenInfo() 396 .isNoProtoCallVariadic(args, 397 cast<FunctionNoProtoType>(fnType))) { 398 required = RequiredArgs(args.size()); 399 } 400 401 // FIXME: Kill copy. 402 SmallVector<CanQualType, 16> argTypes; 403 for (const auto &arg : args) 404 argTypes.push_back(CGT.getContext().getCanonicalParamType(arg.Ty)); 405 return CGT.arrangeLLVMFunctionInfo(GetReturnType(fnType->getReturnType()), 406 /*instanceMethod=*/false, chainCall, 407 argTypes, fnType->getExtInfo(), required); 408 } 409 410 /// Figure out the rules for calling a function with the given formal 411 /// type using the given arguments. The arguments are necessary 412 /// because the function might be unprototyped, in which case it's 413 /// target-dependent in crazy ways. 414 const CGFunctionInfo & 415 CodeGenTypes::arrangeFreeFunctionCall(const CallArgList &args, 416 const FunctionType *fnType, 417 bool chainCall) { 418 return arrangeFreeFunctionLikeCall(*this, CGM, args, fnType, 419 chainCall ? 1 : 0, chainCall); 420 } 421 422 /// A block function call is essentially a free-function call with an 423 /// extra implicit argument. 424 const CGFunctionInfo & 425 CodeGenTypes::arrangeBlockFunctionCall(const CallArgList &args, 426 const FunctionType *fnType) { 427 return arrangeFreeFunctionLikeCall(*this, CGM, args, fnType, 1, 428 /*chainCall=*/false); 429 } 430 431 const CGFunctionInfo & 432 CodeGenTypes::arrangeFreeFunctionCall(QualType resultType, 433 const CallArgList &args, 434 FunctionType::ExtInfo info, 435 RequiredArgs required) { 436 // FIXME: Kill copy. 437 SmallVector<CanQualType, 16> argTypes; 438 for (const auto &Arg : args) 439 argTypes.push_back(Context.getCanonicalParamType(Arg.Ty)); 440 return arrangeLLVMFunctionInfo( 441 GetReturnType(resultType), /*instanceMethod=*/false, 442 /*chainCall=*/false, argTypes, info, required); 443 } 444 445 /// Arrange a call to a C++ method, passing the given arguments. 446 const CGFunctionInfo & 447 CodeGenTypes::arrangeCXXMethodCall(const CallArgList &args, 448 const FunctionProtoType *FPT, 449 RequiredArgs required) { 450 // FIXME: Kill copy. 451 SmallVector<CanQualType, 16> argTypes; 452 for (const auto &Arg : args) 453 argTypes.push_back(Context.getCanonicalParamType(Arg.Ty)); 454 455 FunctionType::ExtInfo info = FPT->getExtInfo(); 456 return arrangeLLVMFunctionInfo( 457 GetReturnType(FPT->getReturnType()), /*instanceMethod=*/true, 458 /*chainCall=*/false, argTypes, info, required); 459 } 460 461 const CGFunctionInfo &CodeGenTypes::arrangeFreeFunctionDeclaration( 462 QualType resultType, const FunctionArgList &args, 463 const FunctionType::ExtInfo &info, bool isVariadic) { 464 // FIXME: Kill copy. 465 SmallVector<CanQualType, 16> argTypes; 466 for (auto Arg : args) 467 argTypes.push_back(Context.getCanonicalParamType(Arg->getType())); 468 469 RequiredArgs required = 470 (isVariadic ? RequiredArgs(args.size()) : RequiredArgs::All); 471 return arrangeLLVMFunctionInfo( 472 GetReturnType(resultType), /*instanceMethod=*/false, 473 /*chainCall=*/false, argTypes, info, required); 474 } 475 476 const CGFunctionInfo &CodeGenTypes::arrangeNullaryFunction() { 477 return arrangeLLVMFunctionInfo( 478 getContext().VoidTy, /*instanceMethod=*/false, /*chainCall=*/false, 479 None, FunctionType::ExtInfo(), RequiredArgs::All); 480 } 481 482 /// Arrange the argument and result information for an abstract value 483 /// of a given function type. This is the method which all of the 484 /// above functions ultimately defer to. 485 const CGFunctionInfo & 486 CodeGenTypes::arrangeLLVMFunctionInfo(CanQualType resultType, 487 bool instanceMethod, 488 bool chainCall, 489 ArrayRef<CanQualType> argTypes, 490 FunctionType::ExtInfo info, 491 RequiredArgs required) { 492 assert(std::all_of(argTypes.begin(), argTypes.end(), 493 std::mem_fun_ref(&CanQualType::isCanonicalAsParam))); 494 495 unsigned CC = ClangCallConvToLLVMCallConv(info.getCC()); 496 497 // Lookup or create unique function info. 498 llvm::FoldingSetNodeID ID; 499 CGFunctionInfo::Profile(ID, instanceMethod, chainCall, info, required, 500 resultType, argTypes); 501 502 void *insertPos = nullptr; 503 CGFunctionInfo *FI = FunctionInfos.FindNodeOrInsertPos(ID, insertPos); 504 if (FI) 505 return *FI; 506 507 // Construct the function info. We co-allocate the ArgInfos. 508 FI = CGFunctionInfo::create(CC, instanceMethod, chainCall, info, 509 resultType, argTypes, required); 510 FunctionInfos.InsertNode(FI, insertPos); 511 512 bool inserted = FunctionsBeingProcessed.insert(FI).second; 513 (void)inserted; 514 assert(inserted && "Recursively being processed?"); 515 516 // Compute ABI information. 517 getABIInfo().computeInfo(*FI); 518 519 // Loop over all of the computed argument and return value info. If any of 520 // them are direct or extend without a specified coerce type, specify the 521 // default now. 522 ABIArgInfo &retInfo = FI->getReturnInfo(); 523 if (retInfo.canHaveCoerceToType() && retInfo.getCoerceToType() == nullptr) 524 retInfo.setCoerceToType(ConvertType(FI->getReturnType())); 525 526 for (auto &I : FI->arguments()) 527 if (I.info.canHaveCoerceToType() && I.info.getCoerceToType() == nullptr) 528 I.info.setCoerceToType(ConvertType(I.type)); 529 530 bool erased = FunctionsBeingProcessed.erase(FI); (void)erased; 531 assert(erased && "Not in set?"); 532 533 return *FI; 534 } 535 536 CGFunctionInfo *CGFunctionInfo::create(unsigned llvmCC, 537 bool instanceMethod, 538 bool chainCall, 539 const FunctionType::ExtInfo &info, 540 CanQualType resultType, 541 ArrayRef<CanQualType> argTypes, 542 RequiredArgs required) { 543 void *buffer = operator new(sizeof(CGFunctionInfo) + 544 sizeof(ArgInfo) * (argTypes.size() + 1)); 545 CGFunctionInfo *FI = new(buffer) CGFunctionInfo(); 546 FI->CallingConvention = llvmCC; 547 FI->EffectiveCallingConvention = llvmCC; 548 FI->ASTCallingConvention = info.getCC(); 549 FI->InstanceMethod = instanceMethod; 550 FI->ChainCall = chainCall; 551 FI->NoReturn = info.getNoReturn(); 552 FI->ReturnsRetained = info.getProducesResult(); 553 FI->Required = required; 554 FI->HasRegParm = info.getHasRegParm(); 555 FI->RegParm = info.getRegParm(); 556 FI->ArgStruct = nullptr; 557 FI->NumArgs = argTypes.size(); 558 FI->getArgsBuffer()[0].type = resultType; 559 for (unsigned i = 0, e = argTypes.size(); i != e; ++i) 560 FI->getArgsBuffer()[i + 1].type = argTypes[i]; 561 return FI; 562 } 563 564 /***/ 565 566 namespace { 567 // ABIArgInfo::Expand implementation. 568 569 // Specifies the way QualType passed as ABIArgInfo::Expand is expanded. 570 struct TypeExpansion { 571 enum TypeExpansionKind { 572 // Elements of constant arrays are expanded recursively. 573 TEK_ConstantArray, 574 // Record fields are expanded recursively (but if record is a union, only 575 // the field with the largest size is expanded). 576 TEK_Record, 577 // For complex types, real and imaginary parts are expanded recursively. 578 TEK_Complex, 579 // All other types are not expandable. 580 TEK_None 581 }; 582 583 const TypeExpansionKind Kind; 584 585 TypeExpansion(TypeExpansionKind K) : Kind(K) {} 586 virtual ~TypeExpansion() {} 587 }; 588 589 struct ConstantArrayExpansion : TypeExpansion { 590 QualType EltTy; 591 uint64_t NumElts; 592 593 ConstantArrayExpansion(QualType EltTy, uint64_t NumElts) 594 : TypeExpansion(TEK_ConstantArray), EltTy(EltTy), NumElts(NumElts) {} 595 static bool classof(const TypeExpansion *TE) { 596 return TE->Kind == TEK_ConstantArray; 597 } 598 }; 599 600 struct RecordExpansion : TypeExpansion { 601 SmallVector<const CXXBaseSpecifier *, 1> Bases; 602 603 SmallVector<const FieldDecl *, 1> Fields; 604 605 RecordExpansion(SmallVector<const CXXBaseSpecifier *, 1> &&Bases, 606 SmallVector<const FieldDecl *, 1> &&Fields) 607 : TypeExpansion(TEK_Record), Bases(Bases), Fields(Fields) {} 608 static bool classof(const TypeExpansion *TE) { 609 return TE->Kind == TEK_Record; 610 } 611 }; 612 613 struct ComplexExpansion : TypeExpansion { 614 QualType EltTy; 615 616 ComplexExpansion(QualType EltTy) : TypeExpansion(TEK_Complex), EltTy(EltTy) {} 617 static bool classof(const TypeExpansion *TE) { 618 return TE->Kind == TEK_Complex; 619 } 620 }; 621 622 struct NoExpansion : TypeExpansion { 623 NoExpansion() : TypeExpansion(TEK_None) {} 624 static bool classof(const TypeExpansion *TE) { 625 return TE->Kind == TEK_None; 626 } 627 }; 628 } // namespace 629 630 static std::unique_ptr<TypeExpansion> 631 getTypeExpansion(QualType Ty, const ASTContext &Context) { 632 if (const ConstantArrayType *AT = Context.getAsConstantArrayType(Ty)) { 633 return llvm::make_unique<ConstantArrayExpansion>( 634 AT->getElementType(), AT->getSize().getZExtValue()); 635 } 636 if (const RecordType *RT = Ty->getAs<RecordType>()) { 637 SmallVector<const CXXBaseSpecifier *, 1> Bases; 638 SmallVector<const FieldDecl *, 1> Fields; 639 const RecordDecl *RD = RT->getDecl(); 640 assert(!RD->hasFlexibleArrayMember() && 641 "Cannot expand structure with flexible array."); 642 if (RD->isUnion()) { 643 // Unions can be here only in degenerative cases - all the fields are same 644 // after flattening. Thus we have to use the "largest" field. 645 const FieldDecl *LargestFD = nullptr; 646 CharUnits UnionSize = CharUnits::Zero(); 647 648 for (const auto *FD : RD->fields()) { 649 // Skip zero length bitfields. 650 if (FD->isBitField() && FD->getBitWidthValue(Context) == 0) 651 continue; 652 assert(!FD->isBitField() && 653 "Cannot expand structure with bit-field members."); 654 CharUnits FieldSize = Context.getTypeSizeInChars(FD->getType()); 655 if (UnionSize < FieldSize) { 656 UnionSize = FieldSize; 657 LargestFD = FD; 658 } 659 } 660 if (LargestFD) 661 Fields.push_back(LargestFD); 662 } else { 663 if (const auto *CXXRD = dyn_cast<CXXRecordDecl>(RD)) { 664 assert(!CXXRD->isDynamicClass() && 665 "cannot expand vtable pointers in dynamic classes"); 666 for (const CXXBaseSpecifier &BS : CXXRD->bases()) 667 Bases.push_back(&BS); 668 } 669 670 for (const auto *FD : RD->fields()) { 671 // Skip zero length bitfields. 672 if (FD->isBitField() && FD->getBitWidthValue(Context) == 0) 673 continue; 674 assert(!FD->isBitField() && 675 "Cannot expand structure with bit-field members."); 676 Fields.push_back(FD); 677 } 678 } 679 return llvm::make_unique<RecordExpansion>(std::move(Bases), 680 std::move(Fields)); 681 } 682 if (const ComplexType *CT = Ty->getAs<ComplexType>()) { 683 return llvm::make_unique<ComplexExpansion>(CT->getElementType()); 684 } 685 return llvm::make_unique<NoExpansion>(); 686 } 687 688 static int getExpansionSize(QualType Ty, const ASTContext &Context) { 689 auto Exp = getTypeExpansion(Ty, Context); 690 if (auto CAExp = dyn_cast<ConstantArrayExpansion>(Exp.get())) { 691 return CAExp->NumElts * getExpansionSize(CAExp->EltTy, Context); 692 } 693 if (auto RExp = dyn_cast<RecordExpansion>(Exp.get())) { 694 int Res = 0; 695 for (auto BS : RExp->Bases) 696 Res += getExpansionSize(BS->getType(), Context); 697 for (auto FD : RExp->Fields) 698 Res += getExpansionSize(FD->getType(), Context); 699 return Res; 700 } 701 if (isa<ComplexExpansion>(Exp.get())) 702 return 2; 703 assert(isa<NoExpansion>(Exp.get())); 704 return 1; 705 } 706 707 void 708 CodeGenTypes::getExpandedTypes(QualType Ty, 709 SmallVectorImpl<llvm::Type *>::iterator &TI) { 710 auto Exp = getTypeExpansion(Ty, Context); 711 if (auto CAExp = dyn_cast<ConstantArrayExpansion>(Exp.get())) { 712 for (int i = 0, n = CAExp->NumElts; i < n; i++) { 713 getExpandedTypes(CAExp->EltTy, TI); 714 } 715 } else if (auto RExp = dyn_cast<RecordExpansion>(Exp.get())) { 716 for (auto BS : RExp->Bases) 717 getExpandedTypes(BS->getType(), TI); 718 for (auto FD : RExp->Fields) 719 getExpandedTypes(FD->getType(), TI); 720 } else if (auto CExp = dyn_cast<ComplexExpansion>(Exp.get())) { 721 llvm::Type *EltTy = ConvertType(CExp->EltTy); 722 *TI++ = EltTy; 723 *TI++ = EltTy; 724 } else { 725 assert(isa<NoExpansion>(Exp.get())); 726 *TI++ = ConvertType(Ty); 727 } 728 } 729 730 void CodeGenFunction::ExpandTypeFromArgs( 731 QualType Ty, LValue LV, SmallVectorImpl<llvm::Argument *>::iterator &AI) { 732 assert(LV.isSimple() && 733 "Unexpected non-simple lvalue during struct expansion."); 734 735 auto Exp = getTypeExpansion(Ty, getContext()); 736 if (auto CAExp = dyn_cast<ConstantArrayExpansion>(Exp.get())) { 737 for (int i = 0, n = CAExp->NumElts; i < n; i++) { 738 llvm::Value *EltAddr = 739 Builder.CreateConstGEP2_32(nullptr, LV.getAddress(), 0, i); 740 LValue LV = MakeAddrLValue(EltAddr, CAExp->EltTy); 741 ExpandTypeFromArgs(CAExp->EltTy, LV, AI); 742 } 743 } else if (auto RExp = dyn_cast<RecordExpansion>(Exp.get())) { 744 llvm::Value *This = LV.getAddress(); 745 for (const CXXBaseSpecifier *BS : RExp->Bases) { 746 // Perform a single step derived-to-base conversion. 747 llvm::Value *Base = 748 GetAddressOfBaseClass(This, Ty->getAsCXXRecordDecl(), &BS, &BS + 1, 749 /*NullCheckValue=*/false, SourceLocation()); 750 LValue SubLV = MakeAddrLValue(Base, BS->getType()); 751 752 // Recurse onto bases. 753 ExpandTypeFromArgs(BS->getType(), SubLV, AI); 754 } 755 for (auto FD : RExp->Fields) { 756 // FIXME: What are the right qualifiers here? 757 LValue SubLV = EmitLValueForField(LV, FD); 758 ExpandTypeFromArgs(FD->getType(), SubLV, AI); 759 } 760 } else if (auto CExp = dyn_cast<ComplexExpansion>(Exp.get())) { 761 llvm::Value *RealAddr = 762 Builder.CreateStructGEP(nullptr, LV.getAddress(), 0, "real"); 763 EmitStoreThroughLValue(RValue::get(*AI++), 764 MakeAddrLValue(RealAddr, CExp->EltTy)); 765 llvm::Value *ImagAddr = 766 Builder.CreateStructGEP(nullptr, LV.getAddress(), 1, "imag"); 767 EmitStoreThroughLValue(RValue::get(*AI++), 768 MakeAddrLValue(ImagAddr, CExp->EltTy)); 769 } else { 770 assert(isa<NoExpansion>(Exp.get())); 771 EmitStoreThroughLValue(RValue::get(*AI++), LV); 772 } 773 } 774 775 void CodeGenFunction::ExpandTypeToArgs( 776 QualType Ty, RValue RV, llvm::FunctionType *IRFuncTy, 777 SmallVectorImpl<llvm::Value *> &IRCallArgs, unsigned &IRCallArgPos) { 778 auto Exp = getTypeExpansion(Ty, getContext()); 779 if (auto CAExp = dyn_cast<ConstantArrayExpansion>(Exp.get())) { 780 llvm::Value *Addr = RV.getAggregateAddr(); 781 for (int i = 0, n = CAExp->NumElts; i < n; i++) { 782 llvm::Value *EltAddr = Builder.CreateConstGEP2_32(nullptr, Addr, 0, i); 783 RValue EltRV = 784 convertTempToRValue(EltAddr, CAExp->EltTy, SourceLocation()); 785 ExpandTypeToArgs(CAExp->EltTy, EltRV, IRFuncTy, IRCallArgs, IRCallArgPos); 786 } 787 } else if (auto RExp = dyn_cast<RecordExpansion>(Exp.get())) { 788 llvm::Value *This = RV.getAggregateAddr(); 789 for (const CXXBaseSpecifier *BS : RExp->Bases) { 790 // Perform a single step derived-to-base conversion. 791 llvm::Value *Base = 792 GetAddressOfBaseClass(This, Ty->getAsCXXRecordDecl(), &BS, &BS + 1, 793 /*NullCheckValue=*/false, SourceLocation()); 794 RValue BaseRV = RValue::getAggregate(Base); 795 796 // Recurse onto bases. 797 ExpandTypeToArgs(BS->getType(), BaseRV, IRFuncTy, IRCallArgs, 798 IRCallArgPos); 799 } 800 801 LValue LV = MakeAddrLValue(This, Ty); 802 for (auto FD : RExp->Fields) { 803 RValue FldRV = EmitRValueForField(LV, FD, SourceLocation()); 804 ExpandTypeToArgs(FD->getType(), FldRV, IRFuncTy, IRCallArgs, 805 IRCallArgPos); 806 } 807 } else if (isa<ComplexExpansion>(Exp.get())) { 808 ComplexPairTy CV = RV.getComplexVal(); 809 IRCallArgs[IRCallArgPos++] = CV.first; 810 IRCallArgs[IRCallArgPos++] = CV.second; 811 } else { 812 assert(isa<NoExpansion>(Exp.get())); 813 assert(RV.isScalar() && 814 "Unexpected non-scalar rvalue during struct expansion."); 815 816 // Insert a bitcast as needed. 817 llvm::Value *V = RV.getScalarVal(); 818 if (IRCallArgPos < IRFuncTy->getNumParams() && 819 V->getType() != IRFuncTy->getParamType(IRCallArgPos)) 820 V = Builder.CreateBitCast(V, IRFuncTy->getParamType(IRCallArgPos)); 821 822 IRCallArgs[IRCallArgPos++] = V; 823 } 824 } 825 826 /// EnterStructPointerForCoercedAccess - Given a struct pointer that we are 827 /// accessing some number of bytes out of it, try to gep into the struct to get 828 /// at its inner goodness. Dive as deep as possible without entering an element 829 /// with an in-memory size smaller than DstSize. 830 static llvm::Value * 831 EnterStructPointerForCoercedAccess(llvm::Value *SrcPtr, 832 llvm::StructType *SrcSTy, 833 uint64_t DstSize, CodeGenFunction &CGF) { 834 // We can't dive into a zero-element struct. 835 if (SrcSTy->getNumElements() == 0) return SrcPtr; 836 837 llvm::Type *FirstElt = SrcSTy->getElementType(0); 838 839 // If the first elt is at least as large as what we're looking for, or if the 840 // first element is the same size as the whole struct, we can enter it. The 841 // comparison must be made on the store size and not the alloca size. Using 842 // the alloca size may overstate the size of the load. 843 uint64_t FirstEltSize = 844 CGF.CGM.getDataLayout().getTypeStoreSize(FirstElt); 845 if (FirstEltSize < DstSize && 846 FirstEltSize < CGF.CGM.getDataLayout().getTypeStoreSize(SrcSTy)) 847 return SrcPtr; 848 849 // GEP into the first element. 850 SrcPtr = CGF.Builder.CreateConstGEP2_32(SrcSTy, SrcPtr, 0, 0, "coerce.dive"); 851 852 // If the first element is a struct, recurse. 853 llvm::Type *SrcTy = 854 cast<llvm::PointerType>(SrcPtr->getType())->getElementType(); 855 if (llvm::StructType *SrcSTy = dyn_cast<llvm::StructType>(SrcTy)) 856 return EnterStructPointerForCoercedAccess(SrcPtr, SrcSTy, DstSize, CGF); 857 858 return SrcPtr; 859 } 860 861 /// CoerceIntOrPtrToIntOrPtr - Convert a value Val to the specific Ty where both 862 /// are either integers or pointers. This does a truncation of the value if it 863 /// is too large or a zero extension if it is too small. 864 /// 865 /// This behaves as if the value were coerced through memory, so on big-endian 866 /// targets the high bits are preserved in a truncation, while little-endian 867 /// targets preserve the low bits. 868 static llvm::Value *CoerceIntOrPtrToIntOrPtr(llvm::Value *Val, 869 llvm::Type *Ty, 870 CodeGenFunction &CGF) { 871 if (Val->getType() == Ty) 872 return Val; 873 874 if (isa<llvm::PointerType>(Val->getType())) { 875 // If this is Pointer->Pointer avoid conversion to and from int. 876 if (isa<llvm::PointerType>(Ty)) 877 return CGF.Builder.CreateBitCast(Val, Ty, "coerce.val"); 878 879 // Convert the pointer to an integer so we can play with its width. 880 Val = CGF.Builder.CreatePtrToInt(Val, CGF.IntPtrTy, "coerce.val.pi"); 881 } 882 883 llvm::Type *DestIntTy = Ty; 884 if (isa<llvm::PointerType>(DestIntTy)) 885 DestIntTy = CGF.IntPtrTy; 886 887 if (Val->getType() != DestIntTy) { 888 const llvm::DataLayout &DL = CGF.CGM.getDataLayout(); 889 if (DL.isBigEndian()) { 890 // Preserve the high bits on big-endian targets. 891 // That is what memory coercion does. 892 uint64_t SrcSize = DL.getTypeSizeInBits(Val->getType()); 893 uint64_t DstSize = DL.getTypeSizeInBits(DestIntTy); 894 895 if (SrcSize > DstSize) { 896 Val = CGF.Builder.CreateLShr(Val, SrcSize - DstSize, "coerce.highbits"); 897 Val = CGF.Builder.CreateTrunc(Val, DestIntTy, "coerce.val.ii"); 898 } else { 899 Val = CGF.Builder.CreateZExt(Val, DestIntTy, "coerce.val.ii"); 900 Val = CGF.Builder.CreateShl(Val, DstSize - SrcSize, "coerce.highbits"); 901 } 902 } else { 903 // Little-endian targets preserve the low bits. No shifts required. 904 Val = CGF.Builder.CreateIntCast(Val, DestIntTy, false, "coerce.val.ii"); 905 } 906 } 907 908 if (isa<llvm::PointerType>(Ty)) 909 Val = CGF.Builder.CreateIntToPtr(Val, Ty, "coerce.val.ip"); 910 return Val; 911 } 912 913 914 915 /// CreateCoercedLoad - Create a load from \arg SrcPtr interpreted as 916 /// a pointer to an object of type \arg Ty. 917 /// 918 /// This safely handles the case when the src type is smaller than the 919 /// destination type; in this situation the values of bits which not 920 /// present in the src are undefined. 921 static llvm::Value *CreateCoercedLoad(llvm::Value *SrcPtr, 922 llvm::Type *Ty, 923 CodeGenFunction &CGF) { 924 llvm::Type *SrcTy = 925 cast<llvm::PointerType>(SrcPtr->getType())->getElementType(); 926 927 // If SrcTy and Ty are the same, just do a load. 928 if (SrcTy == Ty) 929 return CGF.Builder.CreateLoad(SrcPtr); 930 931 uint64_t DstSize = CGF.CGM.getDataLayout().getTypeAllocSize(Ty); 932 933 if (llvm::StructType *SrcSTy = dyn_cast<llvm::StructType>(SrcTy)) { 934 SrcPtr = EnterStructPointerForCoercedAccess(SrcPtr, SrcSTy, DstSize, CGF); 935 SrcTy = cast<llvm::PointerType>(SrcPtr->getType())->getElementType(); 936 } 937 938 uint64_t SrcSize = CGF.CGM.getDataLayout().getTypeAllocSize(SrcTy); 939 940 // If the source and destination are integer or pointer types, just do an 941 // extension or truncation to the desired type. 942 if ((isa<llvm::IntegerType>(Ty) || isa<llvm::PointerType>(Ty)) && 943 (isa<llvm::IntegerType>(SrcTy) || isa<llvm::PointerType>(SrcTy))) { 944 llvm::LoadInst *Load = CGF.Builder.CreateLoad(SrcPtr); 945 return CoerceIntOrPtrToIntOrPtr(Load, Ty, CGF); 946 } 947 948 // If load is legal, just bitcast the src pointer. 949 if (SrcSize >= DstSize) { 950 // Generally SrcSize is never greater than DstSize, since this means we are 951 // losing bits. However, this can happen in cases where the structure has 952 // additional padding, for example due to a user specified alignment. 953 // 954 // FIXME: Assert that we aren't truncating non-padding bits when have access 955 // to that information. 956 llvm::Value *Casted = 957 CGF.Builder.CreateBitCast(SrcPtr, llvm::PointerType::getUnqual(Ty)); 958 llvm::LoadInst *Load = CGF.Builder.CreateLoad(Casted); 959 // FIXME: Use better alignment / avoid requiring aligned load. 960 Load->setAlignment(1); 961 return Load; 962 } 963 964 // Otherwise do coercion through memory. This is stupid, but 965 // simple. 966 llvm::Value *Tmp = CGF.CreateTempAlloca(Ty); 967 llvm::Type *I8PtrTy = CGF.Builder.getInt8PtrTy(); 968 llvm::Value *Casted = CGF.Builder.CreateBitCast(Tmp, I8PtrTy); 969 llvm::Value *SrcCasted = CGF.Builder.CreateBitCast(SrcPtr, I8PtrTy); 970 // FIXME: Use better alignment. 971 CGF.Builder.CreateMemCpy(Casted, SrcCasted, 972 llvm::ConstantInt::get(CGF.IntPtrTy, SrcSize), 973 1, false); 974 return CGF.Builder.CreateLoad(Tmp); 975 } 976 977 // Function to store a first-class aggregate into memory. We prefer to 978 // store the elements rather than the aggregate to be more friendly to 979 // fast-isel. 980 // FIXME: Do we need to recurse here? 981 static void BuildAggStore(CodeGenFunction &CGF, llvm::Value *Val, 982 llvm::Value *DestPtr, bool DestIsVolatile, 983 bool LowAlignment) { 984 // Prefer scalar stores to first-class aggregate stores. 985 if (llvm::StructType *STy = 986 dyn_cast<llvm::StructType>(Val->getType())) { 987 for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) { 988 llvm::Value *EltPtr = CGF.Builder.CreateConstGEP2_32(STy, DestPtr, 0, i); 989 llvm::Value *Elt = CGF.Builder.CreateExtractValue(Val, i); 990 llvm::StoreInst *SI = CGF.Builder.CreateStore(Elt, EltPtr, 991 DestIsVolatile); 992 if (LowAlignment) 993 SI->setAlignment(1); 994 } 995 } else { 996 llvm::StoreInst *SI = CGF.Builder.CreateStore(Val, DestPtr, DestIsVolatile); 997 if (LowAlignment) 998 SI->setAlignment(1); 999 } 1000 } 1001 1002 /// CreateCoercedStore - Create a store to \arg DstPtr from \arg Src, 1003 /// where the source and destination may have different types. 1004 /// 1005 /// This safely handles the case when the src type is larger than the 1006 /// destination type; the upper bits of the src will be lost. 1007 static void CreateCoercedStore(llvm::Value *Src, 1008 llvm::Value *DstPtr, 1009 bool DstIsVolatile, 1010 CodeGenFunction &CGF) { 1011 llvm::Type *SrcTy = Src->getType(); 1012 llvm::Type *DstTy = 1013 cast<llvm::PointerType>(DstPtr->getType())->getElementType(); 1014 if (SrcTy == DstTy) { 1015 CGF.Builder.CreateStore(Src, DstPtr, DstIsVolatile); 1016 return; 1017 } 1018 1019 uint64_t SrcSize = CGF.CGM.getDataLayout().getTypeAllocSize(SrcTy); 1020 1021 if (llvm::StructType *DstSTy = dyn_cast<llvm::StructType>(DstTy)) { 1022 DstPtr = EnterStructPointerForCoercedAccess(DstPtr, DstSTy, SrcSize, CGF); 1023 DstTy = cast<llvm::PointerType>(DstPtr->getType())->getElementType(); 1024 } 1025 1026 // If the source and destination are integer or pointer types, just do an 1027 // extension or truncation to the desired type. 1028 if ((isa<llvm::IntegerType>(SrcTy) || isa<llvm::PointerType>(SrcTy)) && 1029 (isa<llvm::IntegerType>(DstTy) || isa<llvm::PointerType>(DstTy))) { 1030 Src = CoerceIntOrPtrToIntOrPtr(Src, DstTy, CGF); 1031 CGF.Builder.CreateStore(Src, DstPtr, DstIsVolatile); 1032 return; 1033 } 1034 1035 uint64_t DstSize = CGF.CGM.getDataLayout().getTypeAllocSize(DstTy); 1036 1037 // If store is legal, just bitcast the src pointer. 1038 if (SrcSize <= DstSize) { 1039 llvm::Value *Casted = 1040 CGF.Builder.CreateBitCast(DstPtr, llvm::PointerType::getUnqual(SrcTy)); 1041 // FIXME: Use better alignment / avoid requiring aligned store. 1042 BuildAggStore(CGF, Src, Casted, DstIsVolatile, true); 1043 } else { 1044 // Otherwise do coercion through memory. This is stupid, but 1045 // simple. 1046 1047 // Generally SrcSize is never greater than DstSize, since this means we are 1048 // losing bits. However, this can happen in cases where the structure has 1049 // additional padding, for example due to a user specified alignment. 1050 // 1051 // FIXME: Assert that we aren't truncating non-padding bits when have access 1052 // to that information. 1053 llvm::Value *Tmp = CGF.CreateTempAlloca(SrcTy); 1054 CGF.Builder.CreateStore(Src, Tmp); 1055 llvm::Type *I8PtrTy = CGF.Builder.getInt8PtrTy(); 1056 llvm::Value *Casted = CGF.Builder.CreateBitCast(Tmp, I8PtrTy); 1057 llvm::Value *DstCasted = CGF.Builder.CreateBitCast(DstPtr, I8PtrTy); 1058 // FIXME: Use better alignment. 1059 CGF.Builder.CreateMemCpy(DstCasted, Casted, 1060 llvm::ConstantInt::get(CGF.IntPtrTy, DstSize), 1061 1, false); 1062 } 1063 } 1064 1065 namespace { 1066 1067 /// Encapsulates information about the way function arguments from 1068 /// CGFunctionInfo should be passed to actual LLVM IR function. 1069 class ClangToLLVMArgMapping { 1070 static const unsigned InvalidIndex = ~0U; 1071 unsigned InallocaArgNo; 1072 unsigned SRetArgNo; 1073 unsigned TotalIRArgs; 1074 1075 /// Arguments of LLVM IR function corresponding to single Clang argument. 1076 struct IRArgs { 1077 unsigned PaddingArgIndex; 1078 // Argument is expanded to IR arguments at positions 1079 // [FirstArgIndex, FirstArgIndex + NumberOfArgs). 1080 unsigned FirstArgIndex; 1081 unsigned NumberOfArgs; 1082 1083 IRArgs() 1084 : PaddingArgIndex(InvalidIndex), FirstArgIndex(InvalidIndex), 1085 NumberOfArgs(0) {} 1086 }; 1087 1088 SmallVector<IRArgs, 8> ArgInfo; 1089 1090 public: 1091 ClangToLLVMArgMapping(const ASTContext &Context, const CGFunctionInfo &FI, 1092 bool OnlyRequiredArgs = false) 1093 : InallocaArgNo(InvalidIndex), SRetArgNo(InvalidIndex), TotalIRArgs(0), 1094 ArgInfo(OnlyRequiredArgs ? FI.getNumRequiredArgs() : FI.arg_size()) { 1095 construct(Context, FI, OnlyRequiredArgs); 1096 } 1097 1098 bool hasInallocaArg() const { return InallocaArgNo != InvalidIndex; } 1099 unsigned getInallocaArgNo() const { 1100 assert(hasInallocaArg()); 1101 return InallocaArgNo; 1102 } 1103 1104 bool hasSRetArg() const { return SRetArgNo != InvalidIndex; } 1105 unsigned getSRetArgNo() const { 1106 assert(hasSRetArg()); 1107 return SRetArgNo; 1108 } 1109 1110 unsigned totalIRArgs() const { return TotalIRArgs; } 1111 1112 bool hasPaddingArg(unsigned ArgNo) const { 1113 assert(ArgNo < ArgInfo.size()); 1114 return ArgInfo[ArgNo].PaddingArgIndex != InvalidIndex; 1115 } 1116 unsigned getPaddingArgNo(unsigned ArgNo) const { 1117 assert(hasPaddingArg(ArgNo)); 1118 return ArgInfo[ArgNo].PaddingArgIndex; 1119 } 1120 1121 /// Returns index of first IR argument corresponding to ArgNo, and their 1122 /// quantity. 1123 std::pair<unsigned, unsigned> getIRArgs(unsigned ArgNo) const { 1124 assert(ArgNo < ArgInfo.size()); 1125 return std::make_pair(ArgInfo[ArgNo].FirstArgIndex, 1126 ArgInfo[ArgNo].NumberOfArgs); 1127 } 1128 1129 private: 1130 void construct(const ASTContext &Context, const CGFunctionInfo &FI, 1131 bool OnlyRequiredArgs); 1132 }; 1133 1134 void ClangToLLVMArgMapping::construct(const ASTContext &Context, 1135 const CGFunctionInfo &FI, 1136 bool OnlyRequiredArgs) { 1137 unsigned IRArgNo = 0; 1138 bool SwapThisWithSRet = false; 1139 const ABIArgInfo &RetAI = FI.getReturnInfo(); 1140 1141 if (RetAI.getKind() == ABIArgInfo::Indirect) { 1142 SwapThisWithSRet = RetAI.isSRetAfterThis(); 1143 SRetArgNo = SwapThisWithSRet ? 1 : IRArgNo++; 1144 } 1145 1146 unsigned ArgNo = 0; 1147 unsigned NumArgs = OnlyRequiredArgs ? FI.getNumRequiredArgs() : FI.arg_size(); 1148 for (CGFunctionInfo::const_arg_iterator I = FI.arg_begin(); ArgNo < NumArgs; 1149 ++I, ++ArgNo) { 1150 assert(I != FI.arg_end()); 1151 QualType ArgType = I->type; 1152 const ABIArgInfo &AI = I->info; 1153 // Collect data about IR arguments corresponding to Clang argument ArgNo. 1154 auto &IRArgs = ArgInfo[ArgNo]; 1155 1156 if (AI.getPaddingType()) 1157 IRArgs.PaddingArgIndex = IRArgNo++; 1158 1159 switch (AI.getKind()) { 1160 case ABIArgInfo::Extend: 1161 case ABIArgInfo::Direct: { 1162 // FIXME: handle sseregparm someday... 1163 llvm::StructType *STy = dyn_cast<llvm::StructType>(AI.getCoerceToType()); 1164 if (AI.isDirect() && AI.getCanBeFlattened() && STy) { 1165 IRArgs.NumberOfArgs = STy->getNumElements(); 1166 } else { 1167 IRArgs.NumberOfArgs = 1; 1168 } 1169 break; 1170 } 1171 case ABIArgInfo::Indirect: 1172 IRArgs.NumberOfArgs = 1; 1173 break; 1174 case ABIArgInfo::Ignore: 1175 case ABIArgInfo::InAlloca: 1176 // ignore and inalloca doesn't have matching LLVM parameters. 1177 IRArgs.NumberOfArgs = 0; 1178 break; 1179 case ABIArgInfo::Expand: { 1180 IRArgs.NumberOfArgs = getExpansionSize(ArgType, Context); 1181 break; 1182 } 1183 } 1184 1185 if (IRArgs.NumberOfArgs > 0) { 1186 IRArgs.FirstArgIndex = IRArgNo; 1187 IRArgNo += IRArgs.NumberOfArgs; 1188 } 1189 1190 // Skip over the sret parameter when it comes second. We already handled it 1191 // above. 1192 if (IRArgNo == 1 && SwapThisWithSRet) 1193 IRArgNo++; 1194 } 1195 assert(ArgNo == ArgInfo.size()); 1196 1197 if (FI.usesInAlloca()) 1198 InallocaArgNo = IRArgNo++; 1199 1200 TotalIRArgs = IRArgNo; 1201 } 1202 } // namespace 1203 1204 /***/ 1205 1206 bool CodeGenModule::ReturnTypeUsesSRet(const CGFunctionInfo &FI) { 1207 return FI.getReturnInfo().isIndirect(); 1208 } 1209 1210 bool CodeGenModule::ReturnSlotInterferesWithArgs(const CGFunctionInfo &FI) { 1211 return ReturnTypeUsesSRet(FI) && 1212 getTargetCodeGenInfo().doesReturnSlotInterfereWithArgs(); 1213 } 1214 1215 bool CodeGenModule::ReturnTypeUsesFPRet(QualType ResultType) { 1216 if (const BuiltinType *BT = ResultType->getAs<BuiltinType>()) { 1217 switch (BT->getKind()) { 1218 default: 1219 return false; 1220 case BuiltinType::Float: 1221 return getTarget().useObjCFPRetForRealType(TargetInfo::Float); 1222 case BuiltinType::Double: 1223 return getTarget().useObjCFPRetForRealType(TargetInfo::Double); 1224 case BuiltinType::LongDouble: 1225 return getTarget().useObjCFPRetForRealType(TargetInfo::LongDouble); 1226 } 1227 } 1228 1229 return false; 1230 } 1231 1232 bool CodeGenModule::ReturnTypeUsesFP2Ret(QualType ResultType) { 1233 if (const ComplexType *CT = ResultType->getAs<ComplexType>()) { 1234 if (const BuiltinType *BT = CT->getElementType()->getAs<BuiltinType>()) { 1235 if (BT->getKind() == BuiltinType::LongDouble) 1236 return getTarget().useObjCFP2RetForComplexLongDouble(); 1237 } 1238 } 1239 1240 return false; 1241 } 1242 1243 llvm::FunctionType *CodeGenTypes::GetFunctionType(GlobalDecl GD) { 1244 const CGFunctionInfo &FI = arrangeGlobalDeclaration(GD); 1245 return GetFunctionType(FI); 1246 } 1247 1248 llvm::FunctionType * 1249 CodeGenTypes::GetFunctionType(const CGFunctionInfo &FI) { 1250 1251 bool Inserted = FunctionsBeingProcessed.insert(&FI).second; 1252 (void)Inserted; 1253 assert(Inserted && "Recursively being processed?"); 1254 1255 llvm::Type *resultType = nullptr; 1256 const ABIArgInfo &retAI = FI.getReturnInfo(); 1257 switch (retAI.getKind()) { 1258 case ABIArgInfo::Expand: 1259 llvm_unreachable("Invalid ABI kind for return argument"); 1260 1261 case ABIArgInfo::Extend: 1262 case ABIArgInfo::Direct: 1263 resultType = retAI.getCoerceToType(); 1264 break; 1265 1266 case ABIArgInfo::InAlloca: 1267 if (retAI.getInAllocaSRet()) { 1268 // sret things on win32 aren't void, they return the sret pointer. 1269 QualType ret = FI.getReturnType(); 1270 llvm::Type *ty = ConvertType(ret); 1271 unsigned addressSpace = Context.getTargetAddressSpace(ret); 1272 resultType = llvm::PointerType::get(ty, addressSpace); 1273 } else { 1274 resultType = llvm::Type::getVoidTy(getLLVMContext()); 1275 } 1276 break; 1277 1278 case ABIArgInfo::Indirect: { 1279 assert(!retAI.getIndirectAlign() && "Align unused on indirect return."); 1280 resultType = llvm::Type::getVoidTy(getLLVMContext()); 1281 break; 1282 } 1283 1284 case ABIArgInfo::Ignore: 1285 resultType = llvm::Type::getVoidTy(getLLVMContext()); 1286 break; 1287 } 1288 1289 ClangToLLVMArgMapping IRFunctionArgs(getContext(), FI, true); 1290 SmallVector<llvm::Type*, 8> ArgTypes(IRFunctionArgs.totalIRArgs()); 1291 1292 // Add type for sret argument. 1293 if (IRFunctionArgs.hasSRetArg()) { 1294 QualType Ret = FI.getReturnType(); 1295 llvm::Type *Ty = ConvertType(Ret); 1296 unsigned AddressSpace = Context.getTargetAddressSpace(Ret); 1297 ArgTypes[IRFunctionArgs.getSRetArgNo()] = 1298 llvm::PointerType::get(Ty, AddressSpace); 1299 } 1300 1301 // Add type for inalloca argument. 1302 if (IRFunctionArgs.hasInallocaArg()) { 1303 auto ArgStruct = FI.getArgStruct(); 1304 assert(ArgStruct); 1305 ArgTypes[IRFunctionArgs.getInallocaArgNo()] = ArgStruct->getPointerTo(); 1306 } 1307 1308 // Add in all of the required arguments. 1309 unsigned ArgNo = 0; 1310 CGFunctionInfo::const_arg_iterator it = FI.arg_begin(), 1311 ie = it + FI.getNumRequiredArgs(); 1312 for (; it != ie; ++it, ++ArgNo) { 1313 const ABIArgInfo &ArgInfo = it->info; 1314 1315 // Insert a padding type to ensure proper alignment. 1316 if (IRFunctionArgs.hasPaddingArg(ArgNo)) 1317 ArgTypes[IRFunctionArgs.getPaddingArgNo(ArgNo)] = 1318 ArgInfo.getPaddingType(); 1319 1320 unsigned FirstIRArg, NumIRArgs; 1321 std::tie(FirstIRArg, NumIRArgs) = IRFunctionArgs.getIRArgs(ArgNo); 1322 1323 switch (ArgInfo.getKind()) { 1324 case ABIArgInfo::Ignore: 1325 case ABIArgInfo::InAlloca: 1326 assert(NumIRArgs == 0); 1327 break; 1328 1329 case ABIArgInfo::Indirect: { 1330 assert(NumIRArgs == 1); 1331 // indirect arguments are always on the stack, which is addr space #0. 1332 llvm::Type *LTy = ConvertTypeForMem(it->type); 1333 ArgTypes[FirstIRArg] = LTy->getPointerTo(); 1334 break; 1335 } 1336 1337 case ABIArgInfo::Extend: 1338 case ABIArgInfo::Direct: { 1339 // Fast-isel and the optimizer generally like scalar values better than 1340 // FCAs, so we flatten them if this is safe to do for this argument. 1341 llvm::Type *argType = ArgInfo.getCoerceToType(); 1342 llvm::StructType *st = dyn_cast<llvm::StructType>(argType); 1343 if (st && ArgInfo.isDirect() && ArgInfo.getCanBeFlattened()) { 1344 assert(NumIRArgs == st->getNumElements()); 1345 for (unsigned i = 0, e = st->getNumElements(); i != e; ++i) 1346 ArgTypes[FirstIRArg + i] = st->getElementType(i); 1347 } else { 1348 assert(NumIRArgs == 1); 1349 ArgTypes[FirstIRArg] = argType; 1350 } 1351 break; 1352 } 1353 1354 case ABIArgInfo::Expand: 1355 auto ArgTypesIter = ArgTypes.begin() + FirstIRArg; 1356 getExpandedTypes(it->type, ArgTypesIter); 1357 assert(ArgTypesIter == ArgTypes.begin() + FirstIRArg + NumIRArgs); 1358 break; 1359 } 1360 } 1361 1362 bool Erased = FunctionsBeingProcessed.erase(&FI); (void)Erased; 1363 assert(Erased && "Not in set?"); 1364 1365 return llvm::FunctionType::get(resultType, ArgTypes, FI.isVariadic()); 1366 } 1367 1368 llvm::Type *CodeGenTypes::GetFunctionTypeForVTable(GlobalDecl GD) { 1369 const CXXMethodDecl *MD = cast<CXXMethodDecl>(GD.getDecl()); 1370 const FunctionProtoType *FPT = MD->getType()->getAs<FunctionProtoType>(); 1371 1372 if (!isFuncTypeConvertible(FPT)) 1373 return llvm::StructType::get(getLLVMContext()); 1374 1375 const CGFunctionInfo *Info; 1376 if (isa<CXXDestructorDecl>(MD)) 1377 Info = 1378 &arrangeCXXStructorDeclaration(MD, getFromDtorType(GD.getDtorType())); 1379 else 1380 Info = &arrangeCXXMethodDeclaration(MD); 1381 return GetFunctionType(*Info); 1382 } 1383 1384 void CodeGenModule::ConstructAttributeList(const CGFunctionInfo &FI, 1385 const Decl *TargetDecl, 1386 AttributeListType &PAL, 1387 unsigned &CallingConv, 1388 bool AttrOnCallSite) { 1389 llvm::AttrBuilder FuncAttrs; 1390 llvm::AttrBuilder RetAttrs; 1391 bool HasOptnone = false; 1392 1393 CallingConv = FI.getEffectiveCallingConvention(); 1394 1395 if (FI.isNoReturn()) 1396 FuncAttrs.addAttribute(llvm::Attribute::NoReturn); 1397 1398 // FIXME: handle sseregparm someday... 1399 if (TargetDecl) { 1400 if (TargetDecl->hasAttr<ReturnsTwiceAttr>()) 1401 FuncAttrs.addAttribute(llvm::Attribute::ReturnsTwice); 1402 if (TargetDecl->hasAttr<NoThrowAttr>()) 1403 FuncAttrs.addAttribute(llvm::Attribute::NoUnwind); 1404 if (TargetDecl->hasAttr<NoReturnAttr>()) 1405 FuncAttrs.addAttribute(llvm::Attribute::NoReturn); 1406 if (TargetDecl->hasAttr<NoDuplicateAttr>()) 1407 FuncAttrs.addAttribute(llvm::Attribute::NoDuplicate); 1408 1409 if (const FunctionDecl *Fn = dyn_cast<FunctionDecl>(TargetDecl)) { 1410 const FunctionProtoType *FPT = Fn->getType()->getAs<FunctionProtoType>(); 1411 if (FPT && FPT->isNothrow(getContext())) 1412 FuncAttrs.addAttribute(llvm::Attribute::NoUnwind); 1413 // Don't use [[noreturn]] or _Noreturn for a call to a virtual function. 1414 // These attributes are not inherited by overloads. 1415 const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(Fn); 1416 if (Fn->isNoReturn() && !(AttrOnCallSite && MD && MD->isVirtual())) 1417 FuncAttrs.addAttribute(llvm::Attribute::NoReturn); 1418 } 1419 1420 // 'const' and 'pure' attribute functions are also nounwind. 1421 if (TargetDecl->hasAttr<ConstAttr>()) { 1422 FuncAttrs.addAttribute(llvm::Attribute::ReadNone); 1423 FuncAttrs.addAttribute(llvm::Attribute::NoUnwind); 1424 } else if (TargetDecl->hasAttr<PureAttr>()) { 1425 FuncAttrs.addAttribute(llvm::Attribute::ReadOnly); 1426 FuncAttrs.addAttribute(llvm::Attribute::NoUnwind); 1427 } 1428 if (TargetDecl->hasAttr<RestrictAttr>()) 1429 RetAttrs.addAttribute(llvm::Attribute::NoAlias); 1430 if (TargetDecl->hasAttr<ReturnsNonNullAttr>()) 1431 RetAttrs.addAttribute(llvm::Attribute::NonNull); 1432 1433 HasOptnone = TargetDecl->hasAttr<OptimizeNoneAttr>(); 1434 } 1435 1436 // OptimizeNoneAttr takes precedence over -Os or -Oz. No warning needed. 1437 if (!HasOptnone) { 1438 if (CodeGenOpts.OptimizeSize) 1439 FuncAttrs.addAttribute(llvm::Attribute::OptimizeForSize); 1440 if (CodeGenOpts.OptimizeSize == 2) 1441 FuncAttrs.addAttribute(llvm::Attribute::MinSize); 1442 } 1443 1444 if (CodeGenOpts.DisableRedZone) 1445 FuncAttrs.addAttribute(llvm::Attribute::NoRedZone); 1446 if (CodeGenOpts.NoImplicitFloat) 1447 FuncAttrs.addAttribute(llvm::Attribute::NoImplicitFloat); 1448 if (CodeGenOpts.EnableSegmentedStacks && 1449 !(TargetDecl && TargetDecl->hasAttr<NoSplitStackAttr>())) 1450 FuncAttrs.addAttribute("split-stack"); 1451 1452 if (AttrOnCallSite) { 1453 // Attributes that should go on the call site only. 1454 if (!CodeGenOpts.SimplifyLibCalls) 1455 FuncAttrs.addAttribute(llvm::Attribute::NoBuiltin); 1456 } else { 1457 // Attributes that should go on the function, but not the call site. 1458 if (!CodeGenOpts.DisableFPElim) { 1459 FuncAttrs.addAttribute("no-frame-pointer-elim", "false"); 1460 } else if (CodeGenOpts.OmitLeafFramePointer) { 1461 FuncAttrs.addAttribute("no-frame-pointer-elim", "false"); 1462 FuncAttrs.addAttribute("no-frame-pointer-elim-non-leaf"); 1463 } else { 1464 FuncAttrs.addAttribute("no-frame-pointer-elim", "true"); 1465 FuncAttrs.addAttribute("no-frame-pointer-elim-non-leaf"); 1466 } 1467 1468 FuncAttrs.addAttribute("less-precise-fpmad", 1469 llvm::toStringRef(CodeGenOpts.LessPreciseFPMAD)); 1470 FuncAttrs.addAttribute("no-infs-fp-math", 1471 llvm::toStringRef(CodeGenOpts.NoInfsFPMath)); 1472 FuncAttrs.addAttribute("no-nans-fp-math", 1473 llvm::toStringRef(CodeGenOpts.NoNaNsFPMath)); 1474 FuncAttrs.addAttribute("unsafe-fp-math", 1475 llvm::toStringRef(CodeGenOpts.UnsafeFPMath)); 1476 FuncAttrs.addAttribute("use-soft-float", 1477 llvm::toStringRef(CodeGenOpts.SoftFloat)); 1478 FuncAttrs.addAttribute("stack-protector-buffer-size", 1479 llvm::utostr(CodeGenOpts.SSPBufferSize)); 1480 1481 if (!CodeGenOpts.StackRealignment) 1482 FuncAttrs.addAttribute("no-realign-stack"); 1483 1484 // Add target-cpu and target-features work if they differ from the defaults. 1485 std::string &CPU = getTarget().getTargetOpts().CPU; 1486 if (CPU != "") 1487 FuncAttrs.addAttribute("target-cpu", CPU); 1488 1489 // TODO: Features gets us the features on the command line including 1490 // feature dependencies. For canonicalization purposes we might want to 1491 // avoid putting features in the target-features set if we know it'll be one 1492 // of the default features in the backend, e.g. corei7-avx and +avx or figure 1493 // out non-explicit dependencies. 1494 std::vector<std::string> &Features = getTarget().getTargetOpts().Features; 1495 if (!Features.empty()) { 1496 std::stringstream S; 1497 std::copy(Features.begin(), Features.end(), 1498 std::ostream_iterator<std::string>(S, ",")); 1499 // The drop_back gets rid of the trailing space. 1500 FuncAttrs.addAttribute("target-features", 1501 StringRef(S.str()).drop_back(1)); 1502 } 1503 } 1504 1505 ClangToLLVMArgMapping IRFunctionArgs(getContext(), FI); 1506 1507 QualType RetTy = FI.getReturnType(); 1508 const ABIArgInfo &RetAI = FI.getReturnInfo(); 1509 switch (RetAI.getKind()) { 1510 case ABIArgInfo::Extend: 1511 if (RetTy->hasSignedIntegerRepresentation()) 1512 RetAttrs.addAttribute(llvm::Attribute::SExt); 1513 else if (RetTy->hasUnsignedIntegerRepresentation()) 1514 RetAttrs.addAttribute(llvm::Attribute::ZExt); 1515 // FALL THROUGH 1516 case ABIArgInfo::Direct: 1517 if (RetAI.getInReg()) 1518 RetAttrs.addAttribute(llvm::Attribute::InReg); 1519 break; 1520 case ABIArgInfo::Ignore: 1521 break; 1522 1523 case ABIArgInfo::InAlloca: 1524 case ABIArgInfo::Indirect: { 1525 // inalloca and sret disable readnone and readonly 1526 FuncAttrs.removeAttribute(llvm::Attribute::ReadOnly) 1527 .removeAttribute(llvm::Attribute::ReadNone); 1528 break; 1529 } 1530 1531 case ABIArgInfo::Expand: 1532 llvm_unreachable("Invalid ABI kind for return argument"); 1533 } 1534 1535 if (const auto *RefTy = RetTy->getAs<ReferenceType>()) { 1536 QualType PTy = RefTy->getPointeeType(); 1537 if (!PTy->isIncompleteType() && PTy->isConstantSizeType()) 1538 RetAttrs.addDereferenceableAttr(getContext().getTypeSizeInChars(PTy) 1539 .getQuantity()); 1540 else if (getContext().getTargetAddressSpace(PTy) == 0) 1541 RetAttrs.addAttribute(llvm::Attribute::NonNull); 1542 } 1543 1544 // Attach return attributes. 1545 if (RetAttrs.hasAttributes()) { 1546 PAL.push_back(llvm::AttributeSet::get( 1547 getLLVMContext(), llvm::AttributeSet::ReturnIndex, RetAttrs)); 1548 } 1549 1550 // Attach attributes to sret. 1551 if (IRFunctionArgs.hasSRetArg()) { 1552 llvm::AttrBuilder SRETAttrs; 1553 SRETAttrs.addAttribute(llvm::Attribute::StructRet); 1554 if (RetAI.getInReg()) 1555 SRETAttrs.addAttribute(llvm::Attribute::InReg); 1556 PAL.push_back(llvm::AttributeSet::get( 1557 getLLVMContext(), IRFunctionArgs.getSRetArgNo() + 1, SRETAttrs)); 1558 } 1559 1560 // Attach attributes to inalloca argument. 1561 if (IRFunctionArgs.hasInallocaArg()) { 1562 llvm::AttrBuilder Attrs; 1563 Attrs.addAttribute(llvm::Attribute::InAlloca); 1564 PAL.push_back(llvm::AttributeSet::get( 1565 getLLVMContext(), IRFunctionArgs.getInallocaArgNo() + 1, Attrs)); 1566 } 1567 1568 unsigned ArgNo = 0; 1569 for (CGFunctionInfo::const_arg_iterator I = FI.arg_begin(), 1570 E = FI.arg_end(); 1571 I != E; ++I, ++ArgNo) { 1572 QualType ParamType = I->type; 1573 const ABIArgInfo &AI = I->info; 1574 llvm::AttrBuilder Attrs; 1575 1576 // Add attribute for padding argument, if necessary. 1577 if (IRFunctionArgs.hasPaddingArg(ArgNo)) { 1578 if (AI.getPaddingInReg()) 1579 PAL.push_back(llvm::AttributeSet::get( 1580 getLLVMContext(), IRFunctionArgs.getPaddingArgNo(ArgNo) + 1, 1581 llvm::Attribute::InReg)); 1582 } 1583 1584 // 'restrict' -> 'noalias' is done in EmitFunctionProlog when we 1585 // have the corresponding parameter variable. It doesn't make 1586 // sense to do it here because parameters are so messed up. 1587 switch (AI.getKind()) { 1588 case ABIArgInfo::Extend: 1589 if (ParamType->isSignedIntegerOrEnumerationType()) 1590 Attrs.addAttribute(llvm::Attribute::SExt); 1591 else if (ParamType->isUnsignedIntegerOrEnumerationType()) 1592 Attrs.addAttribute(llvm::Attribute::ZExt); 1593 // FALL THROUGH 1594 case ABIArgInfo::Direct: 1595 if (ArgNo == 0 && FI.isChainCall()) 1596 Attrs.addAttribute(llvm::Attribute::Nest); 1597 else if (AI.getInReg()) 1598 Attrs.addAttribute(llvm::Attribute::InReg); 1599 break; 1600 1601 case ABIArgInfo::Indirect: 1602 if (AI.getInReg()) 1603 Attrs.addAttribute(llvm::Attribute::InReg); 1604 1605 if (AI.getIndirectByVal()) 1606 Attrs.addAttribute(llvm::Attribute::ByVal); 1607 1608 Attrs.addAlignmentAttr(AI.getIndirectAlign()); 1609 1610 // byval disables readnone and readonly. 1611 FuncAttrs.removeAttribute(llvm::Attribute::ReadOnly) 1612 .removeAttribute(llvm::Attribute::ReadNone); 1613 break; 1614 1615 case ABIArgInfo::Ignore: 1616 case ABIArgInfo::Expand: 1617 continue; 1618 1619 case ABIArgInfo::InAlloca: 1620 // inalloca disables readnone and readonly. 1621 FuncAttrs.removeAttribute(llvm::Attribute::ReadOnly) 1622 .removeAttribute(llvm::Attribute::ReadNone); 1623 continue; 1624 } 1625 1626 if (const auto *RefTy = ParamType->getAs<ReferenceType>()) { 1627 QualType PTy = RefTy->getPointeeType(); 1628 if (!PTy->isIncompleteType() && PTy->isConstantSizeType()) 1629 Attrs.addDereferenceableAttr(getContext().getTypeSizeInChars(PTy) 1630 .getQuantity()); 1631 else if (getContext().getTargetAddressSpace(PTy) == 0) 1632 Attrs.addAttribute(llvm::Attribute::NonNull); 1633 } 1634 1635 if (Attrs.hasAttributes()) { 1636 unsigned FirstIRArg, NumIRArgs; 1637 std::tie(FirstIRArg, NumIRArgs) = IRFunctionArgs.getIRArgs(ArgNo); 1638 for (unsigned i = 0; i < NumIRArgs; i++) 1639 PAL.push_back(llvm::AttributeSet::get(getLLVMContext(), 1640 FirstIRArg + i + 1, Attrs)); 1641 } 1642 } 1643 assert(ArgNo == FI.arg_size()); 1644 1645 if (FuncAttrs.hasAttributes()) 1646 PAL.push_back(llvm:: 1647 AttributeSet::get(getLLVMContext(), 1648 llvm::AttributeSet::FunctionIndex, 1649 FuncAttrs)); 1650 } 1651 1652 /// An argument came in as a promoted argument; demote it back to its 1653 /// declared type. 1654 static llvm::Value *emitArgumentDemotion(CodeGenFunction &CGF, 1655 const VarDecl *var, 1656 llvm::Value *value) { 1657 llvm::Type *varType = CGF.ConvertType(var->getType()); 1658 1659 // This can happen with promotions that actually don't change the 1660 // underlying type, like the enum promotions. 1661 if (value->getType() == varType) return value; 1662 1663 assert((varType->isIntegerTy() || varType->isFloatingPointTy()) 1664 && "unexpected promotion type"); 1665 1666 if (isa<llvm::IntegerType>(varType)) 1667 return CGF.Builder.CreateTrunc(value, varType, "arg.unpromote"); 1668 1669 return CGF.Builder.CreateFPCast(value, varType, "arg.unpromote"); 1670 } 1671 1672 /// Returns the attribute (either parameter attribute, or function 1673 /// attribute), which declares argument ArgNo to be non-null. 1674 static const NonNullAttr *getNonNullAttr(const Decl *FD, const ParmVarDecl *PVD, 1675 QualType ArgType, unsigned ArgNo) { 1676 // FIXME: __attribute__((nonnull)) can also be applied to: 1677 // - references to pointers, where the pointee is known to be 1678 // nonnull (apparently a Clang extension) 1679 // - transparent unions containing pointers 1680 // In the former case, LLVM IR cannot represent the constraint. In 1681 // the latter case, we have no guarantee that the transparent union 1682 // is in fact passed as a pointer. 1683 if (!ArgType->isAnyPointerType() && !ArgType->isBlockPointerType()) 1684 return nullptr; 1685 // First, check attribute on parameter itself. 1686 if (PVD) { 1687 if (auto ParmNNAttr = PVD->getAttr<NonNullAttr>()) 1688 return ParmNNAttr; 1689 } 1690 // Check function attributes. 1691 if (!FD) 1692 return nullptr; 1693 for (const auto *NNAttr : FD->specific_attrs<NonNullAttr>()) { 1694 if (NNAttr->isNonNull(ArgNo)) 1695 return NNAttr; 1696 } 1697 return nullptr; 1698 } 1699 1700 void CodeGenFunction::EmitFunctionProlog(const CGFunctionInfo &FI, 1701 llvm::Function *Fn, 1702 const FunctionArgList &Args) { 1703 if (CurCodeDecl && CurCodeDecl->hasAttr<NakedAttr>()) 1704 // Naked functions don't have prologues. 1705 return; 1706 1707 // If this is an implicit-return-zero function, go ahead and 1708 // initialize the return value. TODO: it might be nice to have 1709 // a more general mechanism for this that didn't require synthesized 1710 // return statements. 1711 if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(CurCodeDecl)) { 1712 if (FD->hasImplicitReturnZero()) { 1713 QualType RetTy = FD->getReturnType().getUnqualifiedType(); 1714 llvm::Type* LLVMTy = CGM.getTypes().ConvertType(RetTy); 1715 llvm::Constant* Zero = llvm::Constant::getNullValue(LLVMTy); 1716 Builder.CreateStore(Zero, ReturnValue); 1717 } 1718 } 1719 1720 // FIXME: We no longer need the types from FunctionArgList; lift up and 1721 // simplify. 1722 1723 ClangToLLVMArgMapping IRFunctionArgs(CGM.getContext(), FI); 1724 // Flattened function arguments. 1725 SmallVector<llvm::Argument *, 16> FnArgs; 1726 FnArgs.reserve(IRFunctionArgs.totalIRArgs()); 1727 for (auto &Arg : Fn->args()) { 1728 FnArgs.push_back(&Arg); 1729 } 1730 assert(FnArgs.size() == IRFunctionArgs.totalIRArgs()); 1731 1732 // If we're using inalloca, all the memory arguments are GEPs off of the last 1733 // parameter, which is a pointer to the complete memory area. 1734 llvm::Value *ArgStruct = nullptr; 1735 if (IRFunctionArgs.hasInallocaArg()) { 1736 ArgStruct = FnArgs[IRFunctionArgs.getInallocaArgNo()]; 1737 assert(ArgStruct->getType() == FI.getArgStruct()->getPointerTo()); 1738 } 1739 1740 // Name the struct return parameter. 1741 if (IRFunctionArgs.hasSRetArg()) { 1742 auto AI = FnArgs[IRFunctionArgs.getSRetArgNo()]; 1743 AI->setName("agg.result"); 1744 AI->addAttr(llvm::AttributeSet::get(getLLVMContext(), AI->getArgNo() + 1, 1745 llvm::Attribute::NoAlias)); 1746 } 1747 1748 // Track if we received the parameter as a pointer (indirect, byval, or 1749 // inalloca). If already have a pointer, EmitParmDecl doesn't need to copy it 1750 // into a local alloca for us. 1751 enum ValOrPointer { HaveValue = 0, HavePointer = 1 }; 1752 typedef llvm::PointerIntPair<llvm::Value *, 1> ValueAndIsPtr; 1753 SmallVector<ValueAndIsPtr, 16> ArgVals; 1754 ArgVals.reserve(Args.size()); 1755 1756 // Create a pointer value for every parameter declaration. This usually 1757 // entails copying one or more LLVM IR arguments into an alloca. Don't push 1758 // any cleanups or do anything that might unwind. We do that separately, so 1759 // we can push the cleanups in the correct order for the ABI. 1760 assert(FI.arg_size() == Args.size() && 1761 "Mismatch between function signature & arguments."); 1762 unsigned ArgNo = 0; 1763 CGFunctionInfo::const_arg_iterator info_it = FI.arg_begin(); 1764 for (FunctionArgList::const_iterator i = Args.begin(), e = Args.end(); 1765 i != e; ++i, ++info_it, ++ArgNo) { 1766 const VarDecl *Arg = *i; 1767 QualType Ty = info_it->type; 1768 const ABIArgInfo &ArgI = info_it->info; 1769 1770 bool isPromoted = 1771 isa<ParmVarDecl>(Arg) && cast<ParmVarDecl>(Arg)->isKNRPromoted(); 1772 1773 unsigned FirstIRArg, NumIRArgs; 1774 std::tie(FirstIRArg, NumIRArgs) = IRFunctionArgs.getIRArgs(ArgNo); 1775 1776 switch (ArgI.getKind()) { 1777 case ABIArgInfo::InAlloca: { 1778 assert(NumIRArgs == 0); 1779 llvm::Value *V = 1780 Builder.CreateStructGEP(FI.getArgStruct(), ArgStruct, 1781 ArgI.getInAllocaFieldIndex(), Arg->getName()); 1782 ArgVals.push_back(ValueAndIsPtr(V, HavePointer)); 1783 break; 1784 } 1785 1786 case ABIArgInfo::Indirect: { 1787 assert(NumIRArgs == 1); 1788 llvm::Value *V = FnArgs[FirstIRArg]; 1789 1790 if (!hasScalarEvaluationKind(Ty)) { 1791 // Aggregates and complex variables are accessed by reference. All we 1792 // need to do is realign the value, if requested 1793 if (ArgI.getIndirectRealign()) { 1794 llvm::Value *AlignedTemp = CreateMemTemp(Ty, "coerce"); 1795 1796 // Copy from the incoming argument pointer to the temporary with the 1797 // appropriate alignment. 1798 // 1799 // FIXME: We should have a common utility for generating an aggregate 1800 // copy. 1801 llvm::Type *I8PtrTy = Builder.getInt8PtrTy(); 1802 CharUnits Size = getContext().getTypeSizeInChars(Ty); 1803 llvm::Value *Dst = Builder.CreateBitCast(AlignedTemp, I8PtrTy); 1804 llvm::Value *Src = Builder.CreateBitCast(V, I8PtrTy); 1805 Builder.CreateMemCpy(Dst, 1806 Src, 1807 llvm::ConstantInt::get(IntPtrTy, 1808 Size.getQuantity()), 1809 ArgI.getIndirectAlign(), 1810 false); 1811 V = AlignedTemp; 1812 } 1813 ArgVals.push_back(ValueAndIsPtr(V, HavePointer)); 1814 } else { 1815 // Load scalar value from indirect argument. 1816 V = EmitLoadOfScalar(V, false, ArgI.getIndirectAlign(), Ty, 1817 Arg->getLocStart()); 1818 1819 if (isPromoted) 1820 V = emitArgumentDemotion(*this, Arg, V); 1821 ArgVals.push_back(ValueAndIsPtr(V, HaveValue)); 1822 } 1823 break; 1824 } 1825 1826 case ABIArgInfo::Extend: 1827 case ABIArgInfo::Direct: { 1828 1829 // If we have the trivial case, handle it with no muss and fuss. 1830 if (!isa<llvm::StructType>(ArgI.getCoerceToType()) && 1831 ArgI.getCoerceToType() == ConvertType(Ty) && 1832 ArgI.getDirectOffset() == 0) { 1833 assert(NumIRArgs == 1); 1834 auto AI = FnArgs[FirstIRArg]; 1835 llvm::Value *V = AI; 1836 1837 if (const ParmVarDecl *PVD = dyn_cast<ParmVarDecl>(Arg)) { 1838 if (getNonNullAttr(CurCodeDecl, PVD, PVD->getType(), 1839 PVD->getFunctionScopeIndex())) 1840 AI->addAttr(llvm::AttributeSet::get(getLLVMContext(), 1841 AI->getArgNo() + 1, 1842 llvm::Attribute::NonNull)); 1843 1844 QualType OTy = PVD->getOriginalType(); 1845 if (const auto *ArrTy = 1846 getContext().getAsConstantArrayType(OTy)) { 1847 // A C99 array parameter declaration with the static keyword also 1848 // indicates dereferenceability, and if the size is constant we can 1849 // use the dereferenceable attribute (which requires the size in 1850 // bytes). 1851 if (ArrTy->getSizeModifier() == ArrayType::Static) { 1852 QualType ETy = ArrTy->getElementType(); 1853 uint64_t ArrSize = ArrTy->getSize().getZExtValue(); 1854 if (!ETy->isIncompleteType() && ETy->isConstantSizeType() && 1855 ArrSize) { 1856 llvm::AttrBuilder Attrs; 1857 Attrs.addDereferenceableAttr( 1858 getContext().getTypeSizeInChars(ETy).getQuantity()*ArrSize); 1859 AI->addAttr(llvm::AttributeSet::get(getLLVMContext(), 1860 AI->getArgNo() + 1, Attrs)); 1861 } else if (getContext().getTargetAddressSpace(ETy) == 0) { 1862 AI->addAttr(llvm::AttributeSet::get(getLLVMContext(), 1863 AI->getArgNo() + 1, 1864 llvm::Attribute::NonNull)); 1865 } 1866 } 1867 } else if (const auto *ArrTy = 1868 getContext().getAsVariableArrayType(OTy)) { 1869 // For C99 VLAs with the static keyword, we don't know the size so 1870 // we can't use the dereferenceable attribute, but in addrspace(0) 1871 // we know that it must be nonnull. 1872 if (ArrTy->getSizeModifier() == VariableArrayType::Static && 1873 !getContext().getTargetAddressSpace(ArrTy->getElementType())) 1874 AI->addAttr(llvm::AttributeSet::get(getLLVMContext(), 1875 AI->getArgNo() + 1, 1876 llvm::Attribute::NonNull)); 1877 } 1878 1879 const auto *AVAttr = PVD->getAttr<AlignValueAttr>(); 1880 if (!AVAttr) 1881 if (const auto *TOTy = dyn_cast<TypedefType>(OTy)) 1882 AVAttr = TOTy->getDecl()->getAttr<AlignValueAttr>(); 1883 if (AVAttr) { 1884 llvm::Value *AlignmentValue = 1885 EmitScalarExpr(AVAttr->getAlignment()); 1886 llvm::ConstantInt *AlignmentCI = 1887 cast<llvm::ConstantInt>(AlignmentValue); 1888 unsigned Alignment = 1889 std::min((unsigned) AlignmentCI->getZExtValue(), 1890 +llvm::Value::MaximumAlignment); 1891 1892 llvm::AttrBuilder Attrs; 1893 Attrs.addAlignmentAttr(Alignment); 1894 AI->addAttr(llvm::AttributeSet::get(getLLVMContext(), 1895 AI->getArgNo() + 1, Attrs)); 1896 } 1897 } 1898 1899 if (Arg->getType().isRestrictQualified()) 1900 AI->addAttr(llvm::AttributeSet::get(getLLVMContext(), 1901 AI->getArgNo() + 1, 1902 llvm::Attribute::NoAlias)); 1903 1904 // Ensure the argument is the correct type. 1905 if (V->getType() != ArgI.getCoerceToType()) 1906 V = Builder.CreateBitCast(V, ArgI.getCoerceToType()); 1907 1908 if (isPromoted) 1909 V = emitArgumentDemotion(*this, Arg, V); 1910 1911 if (const CXXMethodDecl *MD = 1912 dyn_cast_or_null<CXXMethodDecl>(CurCodeDecl)) { 1913 if (MD->isVirtual() && Arg == CXXABIThisDecl) 1914 V = CGM.getCXXABI(). 1915 adjustThisParameterInVirtualFunctionPrologue(*this, CurGD, V); 1916 } 1917 1918 // Because of merging of function types from multiple decls it is 1919 // possible for the type of an argument to not match the corresponding 1920 // type in the function type. Since we are codegening the callee 1921 // in here, add a cast to the argument type. 1922 llvm::Type *LTy = ConvertType(Arg->getType()); 1923 if (V->getType() != LTy) 1924 V = Builder.CreateBitCast(V, LTy); 1925 1926 ArgVals.push_back(ValueAndIsPtr(V, HaveValue)); 1927 break; 1928 } 1929 1930 llvm::AllocaInst *Alloca = CreateMemTemp(Ty, Arg->getName()); 1931 1932 // The alignment we need to use is the max of the requested alignment for 1933 // the argument plus the alignment required by our access code below. 1934 unsigned AlignmentToUse = 1935 CGM.getDataLayout().getABITypeAlignment(ArgI.getCoerceToType()); 1936 AlignmentToUse = std::max(AlignmentToUse, 1937 (unsigned)getContext().getDeclAlign(Arg).getQuantity()); 1938 1939 Alloca->setAlignment(AlignmentToUse); 1940 llvm::Value *V = Alloca; 1941 llvm::Value *Ptr = V; // Pointer to store into. 1942 1943 // If the value is offset in memory, apply the offset now. 1944 if (unsigned Offs = ArgI.getDirectOffset()) { 1945 Ptr = Builder.CreateBitCast(Ptr, Builder.getInt8PtrTy()); 1946 Ptr = Builder.CreateConstGEP1_32(Builder.getInt8Ty(), Ptr, Offs); 1947 Ptr = Builder.CreateBitCast(Ptr, 1948 llvm::PointerType::getUnqual(ArgI.getCoerceToType())); 1949 } 1950 1951 // Fast-isel and the optimizer generally like scalar values better than 1952 // FCAs, so we flatten them if this is safe to do for this argument. 1953 llvm::StructType *STy = dyn_cast<llvm::StructType>(ArgI.getCoerceToType()); 1954 if (ArgI.isDirect() && ArgI.getCanBeFlattened() && STy && 1955 STy->getNumElements() > 1) { 1956 uint64_t SrcSize = CGM.getDataLayout().getTypeAllocSize(STy); 1957 llvm::Type *DstTy = 1958 cast<llvm::PointerType>(Ptr->getType())->getElementType(); 1959 uint64_t DstSize = CGM.getDataLayout().getTypeAllocSize(DstTy); 1960 1961 if (SrcSize <= DstSize) { 1962 Ptr = Builder.CreateBitCast(Ptr, llvm::PointerType::getUnqual(STy)); 1963 1964 assert(STy->getNumElements() == NumIRArgs); 1965 for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) { 1966 auto AI = FnArgs[FirstIRArg + i]; 1967 AI->setName(Arg->getName() + ".coerce" + Twine(i)); 1968 llvm::Value *EltPtr = Builder.CreateConstGEP2_32(STy, Ptr, 0, i); 1969 Builder.CreateStore(AI, EltPtr); 1970 } 1971 } else { 1972 llvm::AllocaInst *TempAlloca = 1973 CreateTempAlloca(ArgI.getCoerceToType(), "coerce"); 1974 TempAlloca->setAlignment(AlignmentToUse); 1975 llvm::Value *TempV = TempAlloca; 1976 1977 assert(STy->getNumElements() == NumIRArgs); 1978 for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) { 1979 auto AI = FnArgs[FirstIRArg + i]; 1980 AI->setName(Arg->getName() + ".coerce" + Twine(i)); 1981 llvm::Value *EltPtr = 1982 Builder.CreateConstGEP2_32(ArgI.getCoerceToType(), TempV, 0, i); 1983 Builder.CreateStore(AI, EltPtr); 1984 } 1985 1986 Builder.CreateMemCpy(Ptr, TempV, DstSize, AlignmentToUse); 1987 } 1988 } else { 1989 // Simple case, just do a coerced store of the argument into the alloca. 1990 assert(NumIRArgs == 1); 1991 auto AI = FnArgs[FirstIRArg]; 1992 AI->setName(Arg->getName() + ".coerce"); 1993 CreateCoercedStore(AI, Ptr, /*DestIsVolatile=*/false, *this); 1994 } 1995 1996 1997 // Match to what EmitParmDecl is expecting for this type. 1998 if (CodeGenFunction::hasScalarEvaluationKind(Ty)) { 1999 V = EmitLoadOfScalar(V, false, AlignmentToUse, Ty, Arg->getLocStart()); 2000 if (isPromoted) 2001 V = emitArgumentDemotion(*this, Arg, V); 2002 ArgVals.push_back(ValueAndIsPtr(V, HaveValue)); 2003 } else { 2004 ArgVals.push_back(ValueAndIsPtr(V, HavePointer)); 2005 } 2006 break; 2007 } 2008 2009 case ABIArgInfo::Expand: { 2010 // If this structure was expanded into multiple arguments then 2011 // we need to create a temporary and reconstruct it from the 2012 // arguments. 2013 llvm::AllocaInst *Alloca = CreateMemTemp(Ty); 2014 CharUnits Align = getContext().getDeclAlign(Arg); 2015 Alloca->setAlignment(Align.getQuantity()); 2016 LValue LV = MakeAddrLValue(Alloca, Ty, Align); 2017 ArgVals.push_back(ValueAndIsPtr(Alloca, HavePointer)); 2018 2019 auto FnArgIter = FnArgs.begin() + FirstIRArg; 2020 ExpandTypeFromArgs(Ty, LV, FnArgIter); 2021 assert(FnArgIter == FnArgs.begin() + FirstIRArg + NumIRArgs); 2022 for (unsigned i = 0, e = NumIRArgs; i != e; ++i) { 2023 auto AI = FnArgs[FirstIRArg + i]; 2024 AI->setName(Arg->getName() + "." + Twine(i)); 2025 } 2026 break; 2027 } 2028 2029 case ABIArgInfo::Ignore: 2030 assert(NumIRArgs == 0); 2031 // Initialize the local variable appropriately. 2032 if (!hasScalarEvaluationKind(Ty)) { 2033 ArgVals.push_back(ValueAndIsPtr(CreateMemTemp(Ty), HavePointer)); 2034 } else { 2035 llvm::Value *U = llvm::UndefValue::get(ConvertType(Arg->getType())); 2036 ArgVals.push_back(ValueAndIsPtr(U, HaveValue)); 2037 } 2038 break; 2039 } 2040 } 2041 2042 if (getTarget().getCXXABI().areArgsDestroyedLeftToRightInCallee()) { 2043 for (int I = Args.size() - 1; I >= 0; --I) 2044 EmitParmDecl(*Args[I], ArgVals[I].getPointer(), ArgVals[I].getInt(), 2045 I + 1); 2046 } else { 2047 for (unsigned I = 0, E = Args.size(); I != E; ++I) 2048 EmitParmDecl(*Args[I], ArgVals[I].getPointer(), ArgVals[I].getInt(), 2049 I + 1); 2050 } 2051 } 2052 2053 static void eraseUnusedBitCasts(llvm::Instruction *insn) { 2054 while (insn->use_empty()) { 2055 llvm::BitCastInst *bitcast = dyn_cast<llvm::BitCastInst>(insn); 2056 if (!bitcast) return; 2057 2058 // This is "safe" because we would have used a ConstantExpr otherwise. 2059 insn = cast<llvm::Instruction>(bitcast->getOperand(0)); 2060 bitcast->eraseFromParent(); 2061 } 2062 } 2063 2064 /// Try to emit a fused autorelease of a return result. 2065 static llvm::Value *tryEmitFusedAutoreleaseOfResult(CodeGenFunction &CGF, 2066 llvm::Value *result) { 2067 // We must be immediately followed the cast. 2068 llvm::BasicBlock *BB = CGF.Builder.GetInsertBlock(); 2069 if (BB->empty()) return nullptr; 2070 if (&BB->back() != result) return nullptr; 2071 2072 llvm::Type *resultType = result->getType(); 2073 2074 // result is in a BasicBlock and is therefore an Instruction. 2075 llvm::Instruction *generator = cast<llvm::Instruction>(result); 2076 2077 SmallVector<llvm::Instruction*,4> insnsToKill; 2078 2079 // Look for: 2080 // %generator = bitcast %type1* %generator2 to %type2* 2081 while (llvm::BitCastInst *bitcast = dyn_cast<llvm::BitCastInst>(generator)) { 2082 // We would have emitted this as a constant if the operand weren't 2083 // an Instruction. 2084 generator = cast<llvm::Instruction>(bitcast->getOperand(0)); 2085 2086 // Require the generator to be immediately followed by the cast. 2087 if (generator->getNextNode() != bitcast) 2088 return nullptr; 2089 2090 insnsToKill.push_back(bitcast); 2091 } 2092 2093 // Look for: 2094 // %generator = call i8* @objc_retain(i8* %originalResult) 2095 // or 2096 // %generator = call i8* @objc_retainAutoreleasedReturnValue(i8* %originalResult) 2097 llvm::CallInst *call = dyn_cast<llvm::CallInst>(generator); 2098 if (!call) return nullptr; 2099 2100 bool doRetainAutorelease; 2101 2102 if (call->getCalledValue() == CGF.CGM.getARCEntrypoints().objc_retain) { 2103 doRetainAutorelease = true; 2104 } else if (call->getCalledValue() == CGF.CGM.getARCEntrypoints() 2105 .objc_retainAutoreleasedReturnValue) { 2106 doRetainAutorelease = false; 2107 2108 // If we emitted an assembly marker for this call (and the 2109 // ARCEntrypoints field should have been set if so), go looking 2110 // for that call. If we can't find it, we can't do this 2111 // optimization. But it should always be the immediately previous 2112 // instruction, unless we needed bitcasts around the call. 2113 if (CGF.CGM.getARCEntrypoints().retainAutoreleasedReturnValueMarker) { 2114 llvm::Instruction *prev = call->getPrevNode(); 2115 assert(prev); 2116 if (isa<llvm::BitCastInst>(prev)) { 2117 prev = prev->getPrevNode(); 2118 assert(prev); 2119 } 2120 assert(isa<llvm::CallInst>(prev)); 2121 assert(cast<llvm::CallInst>(prev)->getCalledValue() == 2122 CGF.CGM.getARCEntrypoints().retainAutoreleasedReturnValueMarker); 2123 insnsToKill.push_back(prev); 2124 } 2125 } else { 2126 return nullptr; 2127 } 2128 2129 result = call->getArgOperand(0); 2130 insnsToKill.push_back(call); 2131 2132 // Keep killing bitcasts, for sanity. Note that we no longer care 2133 // about precise ordering as long as there's exactly one use. 2134 while (llvm::BitCastInst *bitcast = dyn_cast<llvm::BitCastInst>(result)) { 2135 if (!bitcast->hasOneUse()) break; 2136 insnsToKill.push_back(bitcast); 2137 result = bitcast->getOperand(0); 2138 } 2139 2140 // Delete all the unnecessary instructions, from latest to earliest. 2141 for (SmallVectorImpl<llvm::Instruction*>::iterator 2142 i = insnsToKill.begin(), e = insnsToKill.end(); i != e; ++i) 2143 (*i)->eraseFromParent(); 2144 2145 // Do the fused retain/autorelease if we were asked to. 2146 if (doRetainAutorelease) 2147 result = CGF.EmitARCRetainAutoreleaseReturnValue(result); 2148 2149 // Cast back to the result type. 2150 return CGF.Builder.CreateBitCast(result, resultType); 2151 } 2152 2153 /// If this is a +1 of the value of an immutable 'self', remove it. 2154 static llvm::Value *tryRemoveRetainOfSelf(CodeGenFunction &CGF, 2155 llvm::Value *result) { 2156 // This is only applicable to a method with an immutable 'self'. 2157 const ObjCMethodDecl *method = 2158 dyn_cast_or_null<ObjCMethodDecl>(CGF.CurCodeDecl); 2159 if (!method) return nullptr; 2160 const VarDecl *self = method->getSelfDecl(); 2161 if (!self->getType().isConstQualified()) return nullptr; 2162 2163 // Look for a retain call. 2164 llvm::CallInst *retainCall = 2165 dyn_cast<llvm::CallInst>(result->stripPointerCasts()); 2166 if (!retainCall || 2167 retainCall->getCalledValue() != CGF.CGM.getARCEntrypoints().objc_retain) 2168 return nullptr; 2169 2170 // Look for an ordinary load of 'self'. 2171 llvm::Value *retainedValue = retainCall->getArgOperand(0); 2172 llvm::LoadInst *load = 2173 dyn_cast<llvm::LoadInst>(retainedValue->stripPointerCasts()); 2174 if (!load || load->isAtomic() || load->isVolatile() || 2175 load->getPointerOperand() != CGF.GetAddrOfLocalVar(self)) 2176 return nullptr; 2177 2178 // Okay! Burn it all down. This relies for correctness on the 2179 // assumption that the retain is emitted as part of the return and 2180 // that thereafter everything is used "linearly". 2181 llvm::Type *resultType = result->getType(); 2182 eraseUnusedBitCasts(cast<llvm::Instruction>(result)); 2183 assert(retainCall->use_empty()); 2184 retainCall->eraseFromParent(); 2185 eraseUnusedBitCasts(cast<llvm::Instruction>(retainedValue)); 2186 2187 return CGF.Builder.CreateBitCast(load, resultType); 2188 } 2189 2190 /// Emit an ARC autorelease of the result of a function. 2191 /// 2192 /// \return the value to actually return from the function 2193 static llvm::Value *emitAutoreleaseOfResult(CodeGenFunction &CGF, 2194 llvm::Value *result) { 2195 // If we're returning 'self', kill the initial retain. This is a 2196 // heuristic attempt to "encourage correctness" in the really unfortunate 2197 // case where we have a return of self during a dealloc and we desperately 2198 // need to avoid the possible autorelease. 2199 if (llvm::Value *self = tryRemoveRetainOfSelf(CGF, result)) 2200 return self; 2201 2202 // At -O0, try to emit a fused retain/autorelease. 2203 if (CGF.shouldUseFusedARCCalls()) 2204 if (llvm::Value *fused = tryEmitFusedAutoreleaseOfResult(CGF, result)) 2205 return fused; 2206 2207 return CGF.EmitARCAutoreleaseReturnValue(result); 2208 } 2209 2210 /// Heuristically search for a dominating store to the return-value slot. 2211 static llvm::StoreInst *findDominatingStoreToReturnValue(CodeGenFunction &CGF) { 2212 // If there are multiple uses of the return-value slot, just check 2213 // for something immediately preceding the IP. Sometimes this can 2214 // happen with how we generate implicit-returns; it can also happen 2215 // with noreturn cleanups. 2216 if (!CGF.ReturnValue->hasOneUse()) { 2217 llvm::BasicBlock *IP = CGF.Builder.GetInsertBlock(); 2218 if (IP->empty()) return nullptr; 2219 llvm::Instruction *I = &IP->back(); 2220 2221 // Skip lifetime markers 2222 for (llvm::BasicBlock::reverse_iterator II = IP->rbegin(), 2223 IE = IP->rend(); 2224 II != IE; ++II) { 2225 if (llvm::IntrinsicInst *Intrinsic = 2226 dyn_cast<llvm::IntrinsicInst>(&*II)) { 2227 if (Intrinsic->getIntrinsicID() == llvm::Intrinsic::lifetime_end) { 2228 const llvm::Value *CastAddr = Intrinsic->getArgOperand(1); 2229 ++II; 2230 if (isa<llvm::BitCastInst>(&*II)) { 2231 if (CastAddr == &*II) { 2232 continue; 2233 } 2234 } 2235 } 2236 } 2237 I = &*II; 2238 break; 2239 } 2240 2241 llvm::StoreInst *store = dyn_cast<llvm::StoreInst>(I); 2242 if (!store) return nullptr; 2243 if (store->getPointerOperand() != CGF.ReturnValue) return nullptr; 2244 assert(!store->isAtomic() && !store->isVolatile()); // see below 2245 return store; 2246 } 2247 2248 llvm::StoreInst *store = 2249 dyn_cast<llvm::StoreInst>(CGF.ReturnValue->user_back()); 2250 if (!store) return nullptr; 2251 2252 // These aren't actually possible for non-coerced returns, and we 2253 // only care about non-coerced returns on this code path. 2254 assert(!store->isAtomic() && !store->isVolatile()); 2255 2256 // Now do a first-and-dirty dominance check: just walk up the 2257 // single-predecessors chain from the current insertion point. 2258 llvm::BasicBlock *StoreBB = store->getParent(); 2259 llvm::BasicBlock *IP = CGF.Builder.GetInsertBlock(); 2260 while (IP != StoreBB) { 2261 if (!(IP = IP->getSinglePredecessor())) 2262 return nullptr; 2263 } 2264 2265 // Okay, the store's basic block dominates the insertion point; we 2266 // can do our thing. 2267 return store; 2268 } 2269 2270 void CodeGenFunction::EmitFunctionEpilog(const CGFunctionInfo &FI, 2271 bool EmitRetDbgLoc, 2272 SourceLocation EndLoc) { 2273 if (CurCodeDecl && CurCodeDecl->hasAttr<NakedAttr>()) { 2274 // Naked functions don't have epilogues. 2275 Builder.CreateUnreachable(); 2276 return; 2277 } 2278 2279 // Functions with no result always return void. 2280 if (!ReturnValue) { 2281 Builder.CreateRetVoid(); 2282 return; 2283 } 2284 2285 llvm::DebugLoc RetDbgLoc; 2286 llvm::Value *RV = nullptr; 2287 QualType RetTy = FI.getReturnType(); 2288 const ABIArgInfo &RetAI = FI.getReturnInfo(); 2289 2290 switch (RetAI.getKind()) { 2291 case ABIArgInfo::InAlloca: 2292 // Aggregrates get evaluated directly into the destination. Sometimes we 2293 // need to return the sret value in a register, though. 2294 assert(hasAggregateEvaluationKind(RetTy)); 2295 if (RetAI.getInAllocaSRet()) { 2296 llvm::Function::arg_iterator EI = CurFn->arg_end(); 2297 --EI; 2298 llvm::Value *ArgStruct = EI; 2299 llvm::Value *SRet = Builder.CreateStructGEP( 2300 nullptr, ArgStruct, RetAI.getInAllocaFieldIndex()); 2301 RV = Builder.CreateLoad(SRet, "sret"); 2302 } 2303 break; 2304 2305 case ABIArgInfo::Indirect: { 2306 auto AI = CurFn->arg_begin(); 2307 if (RetAI.isSRetAfterThis()) 2308 ++AI; 2309 switch (getEvaluationKind(RetTy)) { 2310 case TEK_Complex: { 2311 ComplexPairTy RT = 2312 EmitLoadOfComplex(MakeNaturalAlignAddrLValue(ReturnValue, RetTy), 2313 EndLoc); 2314 EmitStoreOfComplex(RT, MakeNaturalAlignAddrLValue(AI, RetTy), 2315 /*isInit*/ true); 2316 break; 2317 } 2318 case TEK_Aggregate: 2319 // Do nothing; aggregrates get evaluated directly into the destination. 2320 break; 2321 case TEK_Scalar: 2322 EmitStoreOfScalar(Builder.CreateLoad(ReturnValue), 2323 MakeNaturalAlignAddrLValue(AI, RetTy), 2324 /*isInit*/ true); 2325 break; 2326 } 2327 break; 2328 } 2329 2330 case ABIArgInfo::Extend: 2331 case ABIArgInfo::Direct: 2332 if (RetAI.getCoerceToType() == ConvertType(RetTy) && 2333 RetAI.getDirectOffset() == 0) { 2334 // The internal return value temp always will have pointer-to-return-type 2335 // type, just do a load. 2336 2337 // If there is a dominating store to ReturnValue, we can elide 2338 // the load, zap the store, and usually zap the alloca. 2339 if (llvm::StoreInst *SI = 2340 findDominatingStoreToReturnValue(*this)) { 2341 // Reuse the debug location from the store unless there is 2342 // cleanup code to be emitted between the store and return 2343 // instruction. 2344 if (EmitRetDbgLoc && !AutoreleaseResult) 2345 RetDbgLoc = SI->getDebugLoc(); 2346 // Get the stored value and nuke the now-dead store. 2347 RV = SI->getValueOperand(); 2348 SI->eraseFromParent(); 2349 2350 // If that was the only use of the return value, nuke it as well now. 2351 if (ReturnValue->use_empty() && isa<llvm::AllocaInst>(ReturnValue)) { 2352 cast<llvm::AllocaInst>(ReturnValue)->eraseFromParent(); 2353 ReturnValue = nullptr; 2354 } 2355 2356 // Otherwise, we have to do a simple load. 2357 } else { 2358 RV = Builder.CreateLoad(ReturnValue); 2359 } 2360 } else { 2361 llvm::Value *V = ReturnValue; 2362 // If the value is offset in memory, apply the offset now. 2363 if (unsigned Offs = RetAI.getDirectOffset()) { 2364 V = Builder.CreateBitCast(V, Builder.getInt8PtrTy()); 2365 V = Builder.CreateConstGEP1_32(Builder.getInt8Ty(), V, Offs); 2366 V = Builder.CreateBitCast(V, 2367 llvm::PointerType::getUnqual(RetAI.getCoerceToType())); 2368 } 2369 2370 RV = CreateCoercedLoad(V, RetAI.getCoerceToType(), *this); 2371 } 2372 2373 // In ARC, end functions that return a retainable type with a call 2374 // to objc_autoreleaseReturnValue. 2375 if (AutoreleaseResult) { 2376 assert(getLangOpts().ObjCAutoRefCount && 2377 !FI.isReturnsRetained() && 2378 RetTy->isObjCRetainableType()); 2379 RV = emitAutoreleaseOfResult(*this, RV); 2380 } 2381 2382 break; 2383 2384 case ABIArgInfo::Ignore: 2385 break; 2386 2387 case ABIArgInfo::Expand: 2388 llvm_unreachable("Invalid ABI kind for return argument"); 2389 } 2390 2391 llvm::Instruction *Ret; 2392 if (RV) { 2393 if (SanOpts.has(SanitizerKind::ReturnsNonnullAttribute)) { 2394 if (auto RetNNAttr = CurGD.getDecl()->getAttr<ReturnsNonNullAttr>()) { 2395 SanitizerScope SanScope(this); 2396 llvm::Value *Cond = Builder.CreateICmpNE( 2397 RV, llvm::Constant::getNullValue(RV->getType())); 2398 llvm::Constant *StaticData[] = { 2399 EmitCheckSourceLocation(EndLoc), 2400 EmitCheckSourceLocation(RetNNAttr->getLocation()), 2401 }; 2402 EmitCheck(std::make_pair(Cond, SanitizerKind::ReturnsNonnullAttribute), 2403 "nonnull_return", StaticData, None); 2404 } 2405 } 2406 Ret = Builder.CreateRet(RV); 2407 } else { 2408 Ret = Builder.CreateRetVoid(); 2409 } 2410 2411 if (RetDbgLoc) 2412 Ret->setDebugLoc(std::move(RetDbgLoc)); 2413 } 2414 2415 static bool isInAllocaArgument(CGCXXABI &ABI, QualType type) { 2416 const CXXRecordDecl *RD = type->getAsCXXRecordDecl(); 2417 return RD && ABI.getRecordArgABI(RD) == CGCXXABI::RAA_DirectInMemory; 2418 } 2419 2420 static AggValueSlot createPlaceholderSlot(CodeGenFunction &CGF, QualType Ty) { 2421 // FIXME: Generate IR in one pass, rather than going back and fixing up these 2422 // placeholders. 2423 llvm::Type *IRTy = CGF.ConvertTypeForMem(Ty); 2424 llvm::Value *Placeholder = 2425 llvm::UndefValue::get(IRTy->getPointerTo()->getPointerTo()); 2426 Placeholder = CGF.Builder.CreateLoad(Placeholder); 2427 return AggValueSlot::forAddr(Placeholder, CharUnits::Zero(), 2428 Ty.getQualifiers(), 2429 AggValueSlot::IsNotDestructed, 2430 AggValueSlot::DoesNotNeedGCBarriers, 2431 AggValueSlot::IsNotAliased); 2432 } 2433 2434 void CodeGenFunction::EmitDelegateCallArg(CallArgList &args, 2435 const VarDecl *param, 2436 SourceLocation loc) { 2437 // StartFunction converted the ABI-lowered parameter(s) into a 2438 // local alloca. We need to turn that into an r-value suitable 2439 // for EmitCall. 2440 llvm::Value *local = GetAddrOfLocalVar(param); 2441 2442 QualType type = param->getType(); 2443 2444 // For the most part, we just need to load the alloca, except: 2445 // 1) aggregate r-values are actually pointers to temporaries, and 2446 // 2) references to non-scalars are pointers directly to the aggregate. 2447 // I don't know why references to scalars are different here. 2448 if (const ReferenceType *ref = type->getAs<ReferenceType>()) { 2449 if (!hasScalarEvaluationKind(ref->getPointeeType())) 2450 return args.add(RValue::getAggregate(local), type); 2451 2452 // Locals which are references to scalars are represented 2453 // with allocas holding the pointer. 2454 return args.add(RValue::get(Builder.CreateLoad(local)), type); 2455 } 2456 2457 assert(!isInAllocaArgument(CGM.getCXXABI(), type) && 2458 "cannot emit delegate call arguments for inalloca arguments!"); 2459 2460 args.add(convertTempToRValue(local, type, loc), type); 2461 } 2462 2463 static bool isProvablyNull(llvm::Value *addr) { 2464 return isa<llvm::ConstantPointerNull>(addr); 2465 } 2466 2467 static bool isProvablyNonNull(llvm::Value *addr) { 2468 return isa<llvm::AllocaInst>(addr); 2469 } 2470 2471 /// Emit the actual writing-back of a writeback. 2472 static void emitWriteback(CodeGenFunction &CGF, 2473 const CallArgList::Writeback &writeback) { 2474 const LValue &srcLV = writeback.Source; 2475 llvm::Value *srcAddr = srcLV.getAddress(); 2476 assert(!isProvablyNull(srcAddr) && 2477 "shouldn't have writeback for provably null argument"); 2478 2479 llvm::BasicBlock *contBB = nullptr; 2480 2481 // If the argument wasn't provably non-null, we need to null check 2482 // before doing the store. 2483 bool provablyNonNull = isProvablyNonNull(srcAddr); 2484 if (!provablyNonNull) { 2485 llvm::BasicBlock *writebackBB = CGF.createBasicBlock("icr.writeback"); 2486 contBB = CGF.createBasicBlock("icr.done"); 2487 2488 llvm::Value *isNull = CGF.Builder.CreateIsNull(srcAddr, "icr.isnull"); 2489 CGF.Builder.CreateCondBr(isNull, contBB, writebackBB); 2490 CGF.EmitBlock(writebackBB); 2491 } 2492 2493 // Load the value to writeback. 2494 llvm::Value *value = CGF.Builder.CreateLoad(writeback.Temporary); 2495 2496 // Cast it back, in case we're writing an id to a Foo* or something. 2497 value = CGF.Builder.CreateBitCast(value, 2498 cast<llvm::PointerType>(srcAddr->getType())->getElementType(), 2499 "icr.writeback-cast"); 2500 2501 // Perform the writeback. 2502 2503 // If we have a "to use" value, it's something we need to emit a use 2504 // of. This has to be carefully threaded in: if it's done after the 2505 // release it's potentially undefined behavior (and the optimizer 2506 // will ignore it), and if it happens before the retain then the 2507 // optimizer could move the release there. 2508 if (writeback.ToUse) { 2509 assert(srcLV.getObjCLifetime() == Qualifiers::OCL_Strong); 2510 2511 // Retain the new value. No need to block-copy here: the block's 2512 // being passed up the stack. 2513 value = CGF.EmitARCRetainNonBlock(value); 2514 2515 // Emit the intrinsic use here. 2516 CGF.EmitARCIntrinsicUse(writeback.ToUse); 2517 2518 // Load the old value (primitively). 2519 llvm::Value *oldValue = CGF.EmitLoadOfScalar(srcLV, SourceLocation()); 2520 2521 // Put the new value in place (primitively). 2522 CGF.EmitStoreOfScalar(value, srcLV, /*init*/ false); 2523 2524 // Release the old value. 2525 CGF.EmitARCRelease(oldValue, srcLV.isARCPreciseLifetime()); 2526 2527 // Otherwise, we can just do a normal lvalue store. 2528 } else { 2529 CGF.EmitStoreThroughLValue(RValue::get(value), srcLV); 2530 } 2531 2532 // Jump to the continuation block. 2533 if (!provablyNonNull) 2534 CGF.EmitBlock(contBB); 2535 } 2536 2537 static void emitWritebacks(CodeGenFunction &CGF, 2538 const CallArgList &args) { 2539 for (const auto &I : args.writebacks()) 2540 emitWriteback(CGF, I); 2541 } 2542 2543 static void deactivateArgCleanupsBeforeCall(CodeGenFunction &CGF, 2544 const CallArgList &CallArgs) { 2545 assert(CGF.getTarget().getCXXABI().areArgsDestroyedLeftToRightInCallee()); 2546 ArrayRef<CallArgList::CallArgCleanup> Cleanups = 2547 CallArgs.getCleanupsToDeactivate(); 2548 // Iterate in reverse to increase the likelihood of popping the cleanup. 2549 for (ArrayRef<CallArgList::CallArgCleanup>::reverse_iterator 2550 I = Cleanups.rbegin(), E = Cleanups.rend(); I != E; ++I) { 2551 CGF.DeactivateCleanupBlock(I->Cleanup, I->IsActiveIP); 2552 I->IsActiveIP->eraseFromParent(); 2553 } 2554 } 2555 2556 static const Expr *maybeGetUnaryAddrOfOperand(const Expr *E) { 2557 if (const UnaryOperator *uop = dyn_cast<UnaryOperator>(E->IgnoreParens())) 2558 if (uop->getOpcode() == UO_AddrOf) 2559 return uop->getSubExpr(); 2560 return nullptr; 2561 } 2562 2563 /// Emit an argument that's being passed call-by-writeback. That is, 2564 /// we are passing the address of 2565 static void emitWritebackArg(CodeGenFunction &CGF, CallArgList &args, 2566 const ObjCIndirectCopyRestoreExpr *CRE) { 2567 LValue srcLV; 2568 2569 // Make an optimistic effort to emit the address as an l-value. 2570 // This can fail if the the argument expression is more complicated. 2571 if (const Expr *lvExpr = maybeGetUnaryAddrOfOperand(CRE->getSubExpr())) { 2572 srcLV = CGF.EmitLValue(lvExpr); 2573 2574 // Otherwise, just emit it as a scalar. 2575 } else { 2576 llvm::Value *srcAddr = CGF.EmitScalarExpr(CRE->getSubExpr()); 2577 2578 QualType srcAddrType = 2579 CRE->getSubExpr()->getType()->castAs<PointerType>()->getPointeeType(); 2580 srcLV = CGF.MakeNaturalAlignAddrLValue(srcAddr, srcAddrType); 2581 } 2582 llvm::Value *srcAddr = srcLV.getAddress(); 2583 2584 // The dest and src types don't necessarily match in LLVM terms 2585 // because of the crazy ObjC compatibility rules. 2586 2587 llvm::PointerType *destType = 2588 cast<llvm::PointerType>(CGF.ConvertType(CRE->getType())); 2589 2590 // If the address is a constant null, just pass the appropriate null. 2591 if (isProvablyNull(srcAddr)) { 2592 args.add(RValue::get(llvm::ConstantPointerNull::get(destType)), 2593 CRE->getType()); 2594 return; 2595 } 2596 2597 // Create the temporary. 2598 llvm::Value *temp = CGF.CreateTempAlloca(destType->getElementType(), 2599 "icr.temp"); 2600 // Loading an l-value can introduce a cleanup if the l-value is __weak, 2601 // and that cleanup will be conditional if we can't prove that the l-value 2602 // isn't null, so we need to register a dominating point so that the cleanups 2603 // system will make valid IR. 2604 CodeGenFunction::ConditionalEvaluation condEval(CGF); 2605 2606 // Zero-initialize it if we're not doing a copy-initialization. 2607 bool shouldCopy = CRE->shouldCopy(); 2608 if (!shouldCopy) { 2609 llvm::Value *null = 2610 llvm::ConstantPointerNull::get( 2611 cast<llvm::PointerType>(destType->getElementType())); 2612 CGF.Builder.CreateStore(null, temp); 2613 } 2614 2615 llvm::BasicBlock *contBB = nullptr; 2616 llvm::BasicBlock *originBB = nullptr; 2617 2618 // If the address is *not* known to be non-null, we need to switch. 2619 llvm::Value *finalArgument; 2620 2621 bool provablyNonNull = isProvablyNonNull(srcAddr); 2622 if (provablyNonNull) { 2623 finalArgument = temp; 2624 } else { 2625 llvm::Value *isNull = CGF.Builder.CreateIsNull(srcAddr, "icr.isnull"); 2626 2627 finalArgument = CGF.Builder.CreateSelect(isNull, 2628 llvm::ConstantPointerNull::get(destType), 2629 temp, "icr.argument"); 2630 2631 // If we need to copy, then the load has to be conditional, which 2632 // means we need control flow. 2633 if (shouldCopy) { 2634 originBB = CGF.Builder.GetInsertBlock(); 2635 contBB = CGF.createBasicBlock("icr.cont"); 2636 llvm::BasicBlock *copyBB = CGF.createBasicBlock("icr.copy"); 2637 CGF.Builder.CreateCondBr(isNull, contBB, copyBB); 2638 CGF.EmitBlock(copyBB); 2639 condEval.begin(CGF); 2640 } 2641 } 2642 2643 llvm::Value *valueToUse = nullptr; 2644 2645 // Perform a copy if necessary. 2646 if (shouldCopy) { 2647 RValue srcRV = CGF.EmitLoadOfLValue(srcLV, SourceLocation()); 2648 assert(srcRV.isScalar()); 2649 2650 llvm::Value *src = srcRV.getScalarVal(); 2651 src = CGF.Builder.CreateBitCast(src, destType->getElementType(), 2652 "icr.cast"); 2653 2654 // Use an ordinary store, not a store-to-lvalue. 2655 CGF.Builder.CreateStore(src, temp); 2656 2657 // If optimization is enabled, and the value was held in a 2658 // __strong variable, we need to tell the optimizer that this 2659 // value has to stay alive until we're doing the store back. 2660 // This is because the temporary is effectively unretained, 2661 // and so otherwise we can violate the high-level semantics. 2662 if (CGF.CGM.getCodeGenOpts().OptimizationLevel != 0 && 2663 srcLV.getObjCLifetime() == Qualifiers::OCL_Strong) { 2664 valueToUse = src; 2665 } 2666 } 2667 2668 // Finish the control flow if we needed it. 2669 if (shouldCopy && !provablyNonNull) { 2670 llvm::BasicBlock *copyBB = CGF.Builder.GetInsertBlock(); 2671 CGF.EmitBlock(contBB); 2672 2673 // Make a phi for the value to intrinsically use. 2674 if (valueToUse) { 2675 llvm::PHINode *phiToUse = CGF.Builder.CreatePHI(valueToUse->getType(), 2, 2676 "icr.to-use"); 2677 phiToUse->addIncoming(valueToUse, copyBB); 2678 phiToUse->addIncoming(llvm::UndefValue::get(valueToUse->getType()), 2679 originBB); 2680 valueToUse = phiToUse; 2681 } 2682 2683 condEval.end(CGF); 2684 } 2685 2686 args.addWriteback(srcLV, temp, valueToUse); 2687 args.add(RValue::get(finalArgument), CRE->getType()); 2688 } 2689 2690 void CallArgList::allocateArgumentMemory(CodeGenFunction &CGF) { 2691 assert(!StackBase && !StackCleanup.isValid()); 2692 2693 // Save the stack. 2694 llvm::Function *F = CGF.CGM.getIntrinsic(llvm::Intrinsic::stacksave); 2695 StackBase = CGF.Builder.CreateCall(F, "inalloca.save"); 2696 2697 // Control gets really tied up in landing pads, so we have to spill the 2698 // stacksave to an alloca to avoid violating SSA form. 2699 // TODO: This is dead if we never emit the cleanup. We should create the 2700 // alloca and store lazily on the first cleanup emission. 2701 StackBaseMem = CGF.CreateTempAlloca(CGF.Int8PtrTy, "inalloca.spmem"); 2702 CGF.Builder.CreateStore(StackBase, StackBaseMem); 2703 CGF.pushStackRestore(EHCleanup, StackBaseMem); 2704 StackCleanup = CGF.EHStack.getInnermostEHScope(); 2705 assert(StackCleanup.isValid()); 2706 } 2707 2708 void CallArgList::freeArgumentMemory(CodeGenFunction &CGF) const { 2709 if (StackBase) { 2710 CGF.DeactivateCleanupBlock(StackCleanup, StackBase); 2711 llvm::Value *F = CGF.CGM.getIntrinsic(llvm::Intrinsic::stackrestore); 2712 // We could load StackBase from StackBaseMem, but in the non-exceptional 2713 // case we can skip it. 2714 CGF.Builder.CreateCall(F, StackBase); 2715 } 2716 } 2717 2718 static void emitNonNullArgCheck(CodeGenFunction &CGF, RValue RV, 2719 QualType ArgType, SourceLocation ArgLoc, 2720 const FunctionDecl *FD, unsigned ParmNum) { 2721 if (!CGF.SanOpts.has(SanitizerKind::NonnullAttribute) || !FD) 2722 return; 2723 auto PVD = ParmNum < FD->getNumParams() ? FD->getParamDecl(ParmNum) : nullptr; 2724 unsigned ArgNo = PVD ? PVD->getFunctionScopeIndex() : ParmNum; 2725 auto NNAttr = getNonNullAttr(FD, PVD, ArgType, ArgNo); 2726 if (!NNAttr) 2727 return; 2728 CodeGenFunction::SanitizerScope SanScope(&CGF); 2729 assert(RV.isScalar()); 2730 llvm::Value *V = RV.getScalarVal(); 2731 llvm::Value *Cond = 2732 CGF.Builder.CreateICmpNE(V, llvm::Constant::getNullValue(V->getType())); 2733 llvm::Constant *StaticData[] = { 2734 CGF.EmitCheckSourceLocation(ArgLoc), 2735 CGF.EmitCheckSourceLocation(NNAttr->getLocation()), 2736 llvm::ConstantInt::get(CGF.Int32Ty, ArgNo + 1), 2737 }; 2738 CGF.EmitCheck(std::make_pair(Cond, SanitizerKind::NonnullAttribute), 2739 "nonnull_arg", StaticData, None); 2740 } 2741 2742 void CodeGenFunction::EmitCallArgs(CallArgList &Args, 2743 ArrayRef<QualType> ArgTypes, 2744 CallExpr::const_arg_iterator ArgBeg, 2745 CallExpr::const_arg_iterator ArgEnd, 2746 const FunctionDecl *CalleeDecl, 2747 unsigned ParamsToSkip) { 2748 // We *have* to evaluate arguments from right to left in the MS C++ ABI, 2749 // because arguments are destroyed left to right in the callee. 2750 if (CGM.getTarget().getCXXABI().areArgsDestroyedLeftToRightInCallee()) { 2751 // Insert a stack save if we're going to need any inalloca args. 2752 bool HasInAllocaArgs = false; 2753 for (ArrayRef<QualType>::iterator I = ArgTypes.begin(), E = ArgTypes.end(); 2754 I != E && !HasInAllocaArgs; ++I) 2755 HasInAllocaArgs = isInAllocaArgument(CGM.getCXXABI(), *I); 2756 if (HasInAllocaArgs) { 2757 assert(getTarget().getTriple().getArch() == llvm::Triple::x86); 2758 Args.allocateArgumentMemory(*this); 2759 } 2760 2761 // Evaluate each argument. 2762 size_t CallArgsStart = Args.size(); 2763 for (int I = ArgTypes.size() - 1; I >= 0; --I) { 2764 CallExpr::const_arg_iterator Arg = ArgBeg + I; 2765 EmitCallArg(Args, *Arg, ArgTypes[I]); 2766 emitNonNullArgCheck(*this, Args.back().RV, ArgTypes[I], Arg->getExprLoc(), 2767 CalleeDecl, ParamsToSkip + I); 2768 } 2769 2770 // Un-reverse the arguments we just evaluated so they match up with the LLVM 2771 // IR function. 2772 std::reverse(Args.begin() + CallArgsStart, Args.end()); 2773 return; 2774 } 2775 2776 for (unsigned I = 0, E = ArgTypes.size(); I != E; ++I) { 2777 CallExpr::const_arg_iterator Arg = ArgBeg + I; 2778 assert(Arg != ArgEnd); 2779 EmitCallArg(Args, *Arg, ArgTypes[I]); 2780 emitNonNullArgCheck(*this, Args.back().RV, ArgTypes[I], Arg->getExprLoc(), 2781 CalleeDecl, ParamsToSkip + I); 2782 } 2783 } 2784 2785 namespace { 2786 2787 struct DestroyUnpassedArg : EHScopeStack::Cleanup { 2788 DestroyUnpassedArg(llvm::Value *Addr, QualType Ty) 2789 : Addr(Addr), Ty(Ty) {} 2790 2791 llvm::Value *Addr; 2792 QualType Ty; 2793 2794 void Emit(CodeGenFunction &CGF, Flags flags) override { 2795 const CXXDestructorDecl *Dtor = Ty->getAsCXXRecordDecl()->getDestructor(); 2796 assert(!Dtor->isTrivial()); 2797 CGF.EmitCXXDestructorCall(Dtor, Dtor_Complete, /*for vbase*/ false, 2798 /*Delegating=*/false, Addr); 2799 } 2800 }; 2801 2802 } 2803 2804 struct DisableDebugLocationUpdates { 2805 CodeGenFunction &CGF; 2806 bool disabledDebugInfo; 2807 DisableDebugLocationUpdates(CodeGenFunction &CGF, const Expr *E) : CGF(CGF) { 2808 if ((disabledDebugInfo = isa<CXXDefaultArgExpr>(E) && CGF.getDebugInfo())) 2809 CGF.disableDebugInfo(); 2810 } 2811 ~DisableDebugLocationUpdates() { 2812 if (disabledDebugInfo) 2813 CGF.enableDebugInfo(); 2814 } 2815 }; 2816 2817 void CodeGenFunction::EmitCallArg(CallArgList &args, const Expr *E, 2818 QualType type) { 2819 DisableDebugLocationUpdates Dis(*this, E); 2820 if (const ObjCIndirectCopyRestoreExpr *CRE 2821 = dyn_cast<ObjCIndirectCopyRestoreExpr>(E)) { 2822 assert(getLangOpts().ObjCAutoRefCount); 2823 assert(getContext().hasSameType(E->getType(), type)); 2824 return emitWritebackArg(*this, args, CRE); 2825 } 2826 2827 assert(type->isReferenceType() == E->isGLValue() && 2828 "reference binding to unmaterialized r-value!"); 2829 2830 if (E->isGLValue()) { 2831 assert(E->getObjectKind() == OK_Ordinary); 2832 return args.add(EmitReferenceBindingToExpr(E), type); 2833 } 2834 2835 bool HasAggregateEvalKind = hasAggregateEvaluationKind(type); 2836 2837 // In the Microsoft C++ ABI, aggregate arguments are destructed by the callee. 2838 // However, we still have to push an EH-only cleanup in case we unwind before 2839 // we make it to the call. 2840 if (HasAggregateEvalKind && 2841 CGM.getTarget().getCXXABI().areArgsDestroyedLeftToRightInCallee()) { 2842 // If we're using inalloca, use the argument memory. Otherwise, use a 2843 // temporary. 2844 AggValueSlot Slot; 2845 if (args.isUsingInAlloca()) 2846 Slot = createPlaceholderSlot(*this, type); 2847 else 2848 Slot = CreateAggTemp(type, "agg.tmp"); 2849 2850 const CXXRecordDecl *RD = type->getAsCXXRecordDecl(); 2851 bool DestroyedInCallee = 2852 RD && RD->hasNonTrivialDestructor() && 2853 CGM.getCXXABI().getRecordArgABI(RD) != CGCXXABI::RAA_Default; 2854 if (DestroyedInCallee) 2855 Slot.setExternallyDestructed(); 2856 2857 EmitAggExpr(E, Slot); 2858 RValue RV = Slot.asRValue(); 2859 args.add(RV, type); 2860 2861 if (DestroyedInCallee) { 2862 // Create a no-op GEP between the placeholder and the cleanup so we can 2863 // RAUW it successfully. It also serves as a marker of the first 2864 // instruction where the cleanup is active. 2865 pushFullExprCleanup<DestroyUnpassedArg>(EHCleanup, Slot.getAddr(), type); 2866 // This unreachable is a temporary marker which will be removed later. 2867 llvm::Instruction *IsActive = Builder.CreateUnreachable(); 2868 args.addArgCleanupDeactivation(EHStack.getInnermostEHScope(), IsActive); 2869 } 2870 return; 2871 } 2872 2873 if (HasAggregateEvalKind && isa<ImplicitCastExpr>(E) && 2874 cast<CastExpr>(E)->getCastKind() == CK_LValueToRValue) { 2875 LValue L = EmitLValue(cast<CastExpr>(E)->getSubExpr()); 2876 assert(L.isSimple()); 2877 if (L.getAlignment() >= getContext().getTypeAlignInChars(type)) { 2878 args.add(L.asAggregateRValue(), type, /*NeedsCopy*/true); 2879 } else { 2880 // We can't represent a misaligned lvalue in the CallArgList, so copy 2881 // to an aligned temporary now. 2882 llvm::Value *tmp = CreateMemTemp(type); 2883 EmitAggregateCopy(tmp, L.getAddress(), type, L.isVolatile(), 2884 L.getAlignment()); 2885 args.add(RValue::getAggregate(tmp), type); 2886 } 2887 return; 2888 } 2889 2890 args.add(EmitAnyExprToTemp(E), type); 2891 } 2892 2893 QualType CodeGenFunction::getVarArgType(const Expr *Arg) { 2894 // System headers on Windows define NULL to 0 instead of 0LL on Win64. MSVC 2895 // implicitly widens null pointer constants that are arguments to varargs 2896 // functions to pointer-sized ints. 2897 if (!getTarget().getTriple().isOSWindows()) 2898 return Arg->getType(); 2899 2900 if (Arg->getType()->isIntegerType() && 2901 getContext().getTypeSize(Arg->getType()) < 2902 getContext().getTargetInfo().getPointerWidth(0) && 2903 Arg->isNullPointerConstant(getContext(), 2904 Expr::NPC_ValueDependentIsNotNull)) { 2905 return getContext().getIntPtrType(); 2906 } 2907 2908 return Arg->getType(); 2909 } 2910 2911 // In ObjC ARC mode with no ObjC ARC exception safety, tell the ARC 2912 // optimizer it can aggressively ignore unwind edges. 2913 void 2914 CodeGenFunction::AddObjCARCExceptionMetadata(llvm::Instruction *Inst) { 2915 if (CGM.getCodeGenOpts().OptimizationLevel != 0 && 2916 !CGM.getCodeGenOpts().ObjCAutoRefCountExceptions) 2917 Inst->setMetadata("clang.arc.no_objc_arc_exceptions", 2918 CGM.getNoObjCARCExceptionsMetadata()); 2919 } 2920 2921 /// Emits a call to the given no-arguments nounwind runtime function. 2922 llvm::CallInst * 2923 CodeGenFunction::EmitNounwindRuntimeCall(llvm::Value *callee, 2924 const llvm::Twine &name) { 2925 return EmitNounwindRuntimeCall(callee, None, name); 2926 } 2927 2928 /// Emits a call to the given nounwind runtime function. 2929 llvm::CallInst * 2930 CodeGenFunction::EmitNounwindRuntimeCall(llvm::Value *callee, 2931 ArrayRef<llvm::Value*> args, 2932 const llvm::Twine &name) { 2933 llvm::CallInst *call = EmitRuntimeCall(callee, args, name); 2934 call->setDoesNotThrow(); 2935 return call; 2936 } 2937 2938 /// Emits a simple call (never an invoke) to the given no-arguments 2939 /// runtime function. 2940 llvm::CallInst * 2941 CodeGenFunction::EmitRuntimeCall(llvm::Value *callee, 2942 const llvm::Twine &name) { 2943 return EmitRuntimeCall(callee, None, name); 2944 } 2945 2946 /// Emits a simple call (never an invoke) to the given runtime 2947 /// function. 2948 llvm::CallInst * 2949 CodeGenFunction::EmitRuntimeCall(llvm::Value *callee, 2950 ArrayRef<llvm::Value*> args, 2951 const llvm::Twine &name) { 2952 llvm::CallInst *call = Builder.CreateCall(callee, args, name); 2953 call->setCallingConv(getRuntimeCC()); 2954 return call; 2955 } 2956 2957 /// Emits a call or invoke to the given noreturn runtime function. 2958 void CodeGenFunction::EmitNoreturnRuntimeCallOrInvoke(llvm::Value *callee, 2959 ArrayRef<llvm::Value*> args) { 2960 if (getInvokeDest()) { 2961 llvm::InvokeInst *invoke = 2962 Builder.CreateInvoke(callee, 2963 getUnreachableBlock(), 2964 getInvokeDest(), 2965 args); 2966 invoke->setDoesNotReturn(); 2967 invoke->setCallingConv(getRuntimeCC()); 2968 } else { 2969 llvm::CallInst *call = Builder.CreateCall(callee, args); 2970 call->setDoesNotReturn(); 2971 call->setCallingConv(getRuntimeCC()); 2972 Builder.CreateUnreachable(); 2973 } 2974 } 2975 2976 /// Emits a call or invoke instruction to the given nullary runtime 2977 /// function. 2978 llvm::CallSite 2979 CodeGenFunction::EmitRuntimeCallOrInvoke(llvm::Value *callee, 2980 const Twine &name) { 2981 return EmitRuntimeCallOrInvoke(callee, None, name); 2982 } 2983 2984 /// Emits a call or invoke instruction to the given runtime function. 2985 llvm::CallSite 2986 CodeGenFunction::EmitRuntimeCallOrInvoke(llvm::Value *callee, 2987 ArrayRef<llvm::Value*> args, 2988 const Twine &name) { 2989 llvm::CallSite callSite = EmitCallOrInvoke(callee, args, name); 2990 callSite.setCallingConv(getRuntimeCC()); 2991 return callSite; 2992 } 2993 2994 llvm::CallSite 2995 CodeGenFunction::EmitCallOrInvoke(llvm::Value *Callee, 2996 const Twine &Name) { 2997 return EmitCallOrInvoke(Callee, None, Name); 2998 } 2999 3000 /// Emits a call or invoke instruction to the given function, depending 3001 /// on the current state of the EH stack. 3002 llvm::CallSite 3003 CodeGenFunction::EmitCallOrInvoke(llvm::Value *Callee, 3004 ArrayRef<llvm::Value *> Args, 3005 const Twine &Name) { 3006 llvm::BasicBlock *InvokeDest = getInvokeDest(); 3007 3008 llvm::Instruction *Inst; 3009 if (!InvokeDest) 3010 Inst = Builder.CreateCall(Callee, Args, Name); 3011 else { 3012 llvm::BasicBlock *ContBB = createBasicBlock("invoke.cont"); 3013 Inst = Builder.CreateInvoke(Callee, ContBB, InvokeDest, Args, Name); 3014 EmitBlock(ContBB); 3015 } 3016 3017 // In ObjC ARC mode with no ObjC ARC exception safety, tell the ARC 3018 // optimizer it can aggressively ignore unwind edges. 3019 if (CGM.getLangOpts().ObjCAutoRefCount) 3020 AddObjCARCExceptionMetadata(Inst); 3021 3022 return llvm::CallSite(Inst); 3023 } 3024 3025 /// \brief Store a non-aggregate value to an address to initialize it. For 3026 /// initialization, a non-atomic store will be used. 3027 static void EmitInitStoreOfNonAggregate(CodeGenFunction &CGF, RValue Src, 3028 LValue Dst) { 3029 if (Src.isScalar()) 3030 CGF.EmitStoreOfScalar(Src.getScalarVal(), Dst, /*init=*/true); 3031 else 3032 CGF.EmitStoreOfComplex(Src.getComplexVal(), Dst, /*init=*/true); 3033 } 3034 3035 void CodeGenFunction::deferPlaceholderReplacement(llvm::Instruction *Old, 3036 llvm::Value *New) { 3037 DeferredReplacements.push_back(std::make_pair(Old, New)); 3038 } 3039 3040 RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo, 3041 llvm::Value *Callee, 3042 ReturnValueSlot ReturnValue, 3043 const CallArgList &CallArgs, 3044 const Decl *TargetDecl, 3045 llvm::Instruction **callOrInvoke) { 3046 // FIXME: We no longer need the types from CallArgs; lift up and simplify. 3047 3048 // Handle struct-return functions by passing a pointer to the 3049 // location that we would like to return into. 3050 QualType RetTy = CallInfo.getReturnType(); 3051 const ABIArgInfo &RetAI = CallInfo.getReturnInfo(); 3052 3053 llvm::FunctionType *IRFuncTy = 3054 cast<llvm::FunctionType>( 3055 cast<llvm::PointerType>(Callee->getType())->getElementType()); 3056 3057 // If we're using inalloca, insert the allocation after the stack save. 3058 // FIXME: Do this earlier rather than hacking it in here! 3059 llvm::AllocaInst *ArgMemory = nullptr; 3060 if (llvm::StructType *ArgStruct = CallInfo.getArgStruct()) { 3061 llvm::Instruction *IP = CallArgs.getStackBase(); 3062 llvm::AllocaInst *AI; 3063 if (IP) { 3064 IP = IP->getNextNode(); 3065 AI = new llvm::AllocaInst(ArgStruct, "argmem", IP); 3066 } else { 3067 AI = CreateTempAlloca(ArgStruct, "argmem"); 3068 } 3069 AI->setUsedWithInAlloca(true); 3070 assert(AI->isUsedWithInAlloca() && !AI->isStaticAlloca()); 3071 ArgMemory = AI; 3072 } 3073 3074 ClangToLLVMArgMapping IRFunctionArgs(CGM.getContext(), CallInfo); 3075 SmallVector<llvm::Value *, 16> IRCallArgs(IRFunctionArgs.totalIRArgs()); 3076 3077 // If the call returns a temporary with struct return, create a temporary 3078 // alloca to hold the result, unless one is given to us. 3079 llvm::Value *SRetPtr = nullptr; 3080 if (RetAI.isIndirect() || RetAI.isInAlloca()) { 3081 SRetPtr = ReturnValue.getValue(); 3082 if (!SRetPtr) 3083 SRetPtr = CreateMemTemp(RetTy); 3084 if (IRFunctionArgs.hasSRetArg()) { 3085 IRCallArgs[IRFunctionArgs.getSRetArgNo()] = SRetPtr; 3086 } else { 3087 llvm::Value *Addr = 3088 Builder.CreateStructGEP(ArgMemory->getAllocatedType(), ArgMemory, 3089 RetAI.getInAllocaFieldIndex()); 3090 Builder.CreateStore(SRetPtr, Addr); 3091 } 3092 } 3093 3094 assert(CallInfo.arg_size() == CallArgs.size() && 3095 "Mismatch between function signature & arguments."); 3096 unsigned ArgNo = 0; 3097 CGFunctionInfo::const_arg_iterator info_it = CallInfo.arg_begin(); 3098 for (CallArgList::const_iterator I = CallArgs.begin(), E = CallArgs.end(); 3099 I != E; ++I, ++info_it, ++ArgNo) { 3100 const ABIArgInfo &ArgInfo = info_it->info; 3101 RValue RV = I->RV; 3102 3103 CharUnits TypeAlign = getContext().getTypeAlignInChars(I->Ty); 3104 3105 // Insert a padding argument to ensure proper alignment. 3106 if (IRFunctionArgs.hasPaddingArg(ArgNo)) 3107 IRCallArgs[IRFunctionArgs.getPaddingArgNo(ArgNo)] = 3108 llvm::UndefValue::get(ArgInfo.getPaddingType()); 3109 3110 unsigned FirstIRArg, NumIRArgs; 3111 std::tie(FirstIRArg, NumIRArgs) = IRFunctionArgs.getIRArgs(ArgNo); 3112 3113 switch (ArgInfo.getKind()) { 3114 case ABIArgInfo::InAlloca: { 3115 assert(NumIRArgs == 0); 3116 assert(getTarget().getTriple().getArch() == llvm::Triple::x86); 3117 if (RV.isAggregate()) { 3118 // Replace the placeholder with the appropriate argument slot GEP. 3119 llvm::Instruction *Placeholder = 3120 cast<llvm::Instruction>(RV.getAggregateAddr()); 3121 CGBuilderTy::InsertPoint IP = Builder.saveIP(); 3122 Builder.SetInsertPoint(Placeholder); 3123 llvm::Value *Addr = 3124 Builder.CreateStructGEP(ArgMemory->getAllocatedType(), ArgMemory, 3125 ArgInfo.getInAllocaFieldIndex()); 3126 Builder.restoreIP(IP); 3127 deferPlaceholderReplacement(Placeholder, Addr); 3128 } else { 3129 // Store the RValue into the argument struct. 3130 llvm::Value *Addr = 3131 Builder.CreateStructGEP(ArgMemory->getAllocatedType(), ArgMemory, 3132 ArgInfo.getInAllocaFieldIndex()); 3133 unsigned AS = Addr->getType()->getPointerAddressSpace(); 3134 llvm::Type *MemType = ConvertTypeForMem(I->Ty)->getPointerTo(AS); 3135 // There are some cases where a trivial bitcast is not avoidable. The 3136 // definition of a type later in a translation unit may change it's type 3137 // from {}* to (%struct.foo*)*. 3138 if (Addr->getType() != MemType) 3139 Addr = Builder.CreateBitCast(Addr, MemType); 3140 LValue argLV = MakeAddrLValue(Addr, I->Ty, TypeAlign); 3141 EmitInitStoreOfNonAggregate(*this, RV, argLV); 3142 } 3143 break; 3144 } 3145 3146 case ABIArgInfo::Indirect: { 3147 assert(NumIRArgs == 1); 3148 if (RV.isScalar() || RV.isComplex()) { 3149 // Make a temporary alloca to pass the argument. 3150 llvm::AllocaInst *AI = CreateMemTemp(I->Ty); 3151 if (ArgInfo.getIndirectAlign() > AI->getAlignment()) 3152 AI->setAlignment(ArgInfo.getIndirectAlign()); 3153 IRCallArgs[FirstIRArg] = AI; 3154 3155 LValue argLV = MakeAddrLValue(AI, I->Ty, TypeAlign); 3156 EmitInitStoreOfNonAggregate(*this, RV, argLV); 3157 } else { 3158 // We want to avoid creating an unnecessary temporary+copy here; 3159 // however, we need one in three cases: 3160 // 1. If the argument is not byval, and we are required to copy the 3161 // source. (This case doesn't occur on any common architecture.) 3162 // 2. If the argument is byval, RV is not sufficiently aligned, and 3163 // we cannot force it to be sufficiently aligned. 3164 // 3. If the argument is byval, but RV is located in an address space 3165 // different than that of the argument (0). 3166 llvm::Value *Addr = RV.getAggregateAddr(); 3167 unsigned Align = ArgInfo.getIndirectAlign(); 3168 const llvm::DataLayout *TD = &CGM.getDataLayout(); 3169 const unsigned RVAddrSpace = Addr->getType()->getPointerAddressSpace(); 3170 const unsigned ArgAddrSpace = 3171 (FirstIRArg < IRFuncTy->getNumParams() 3172 ? IRFuncTy->getParamType(FirstIRArg)->getPointerAddressSpace() 3173 : 0); 3174 if ((!ArgInfo.getIndirectByVal() && I->NeedsCopy) || 3175 (ArgInfo.getIndirectByVal() && TypeAlign.getQuantity() < Align && 3176 llvm::getOrEnforceKnownAlignment(Addr, Align, *TD) < Align) || 3177 (ArgInfo.getIndirectByVal() && (RVAddrSpace != ArgAddrSpace))) { 3178 // Create an aligned temporary, and copy to it. 3179 llvm::AllocaInst *AI = CreateMemTemp(I->Ty); 3180 if (Align > AI->getAlignment()) 3181 AI->setAlignment(Align); 3182 IRCallArgs[FirstIRArg] = AI; 3183 EmitAggregateCopy(AI, Addr, I->Ty, RV.isVolatileQualified()); 3184 } else { 3185 // Skip the extra memcpy call. 3186 IRCallArgs[FirstIRArg] = Addr; 3187 } 3188 } 3189 break; 3190 } 3191 3192 case ABIArgInfo::Ignore: 3193 assert(NumIRArgs == 0); 3194 break; 3195 3196 case ABIArgInfo::Extend: 3197 case ABIArgInfo::Direct: { 3198 if (!isa<llvm::StructType>(ArgInfo.getCoerceToType()) && 3199 ArgInfo.getCoerceToType() == ConvertType(info_it->type) && 3200 ArgInfo.getDirectOffset() == 0) { 3201 assert(NumIRArgs == 1); 3202 llvm::Value *V; 3203 if (RV.isScalar()) 3204 V = RV.getScalarVal(); 3205 else 3206 V = Builder.CreateLoad(RV.getAggregateAddr()); 3207 3208 // We might have to widen integers, but we should never truncate. 3209 if (ArgInfo.getCoerceToType() != V->getType() && 3210 V->getType()->isIntegerTy()) 3211 V = Builder.CreateZExt(V, ArgInfo.getCoerceToType()); 3212 3213 // If the argument doesn't match, perform a bitcast to coerce it. This 3214 // can happen due to trivial type mismatches. 3215 if (FirstIRArg < IRFuncTy->getNumParams() && 3216 V->getType() != IRFuncTy->getParamType(FirstIRArg)) 3217 V = Builder.CreateBitCast(V, IRFuncTy->getParamType(FirstIRArg)); 3218 IRCallArgs[FirstIRArg] = V; 3219 break; 3220 } 3221 3222 // FIXME: Avoid the conversion through memory if possible. 3223 llvm::Value *SrcPtr; 3224 if (RV.isScalar() || RV.isComplex()) { 3225 SrcPtr = CreateMemTemp(I->Ty, "coerce"); 3226 LValue SrcLV = MakeAddrLValue(SrcPtr, I->Ty, TypeAlign); 3227 EmitInitStoreOfNonAggregate(*this, RV, SrcLV); 3228 } else 3229 SrcPtr = RV.getAggregateAddr(); 3230 3231 // If the value is offset in memory, apply the offset now. 3232 if (unsigned Offs = ArgInfo.getDirectOffset()) { 3233 SrcPtr = Builder.CreateBitCast(SrcPtr, Builder.getInt8PtrTy()); 3234 SrcPtr = Builder.CreateConstGEP1_32(Builder.getInt8Ty(), SrcPtr, Offs); 3235 SrcPtr = Builder.CreateBitCast(SrcPtr, 3236 llvm::PointerType::getUnqual(ArgInfo.getCoerceToType())); 3237 3238 } 3239 3240 // Fast-isel and the optimizer generally like scalar values better than 3241 // FCAs, so we flatten them if this is safe to do for this argument. 3242 llvm::StructType *STy = 3243 dyn_cast<llvm::StructType>(ArgInfo.getCoerceToType()); 3244 if (STy && ArgInfo.isDirect() && ArgInfo.getCanBeFlattened()) { 3245 llvm::Type *SrcTy = 3246 cast<llvm::PointerType>(SrcPtr->getType())->getElementType(); 3247 uint64_t SrcSize = CGM.getDataLayout().getTypeAllocSize(SrcTy); 3248 uint64_t DstSize = CGM.getDataLayout().getTypeAllocSize(STy); 3249 3250 // If the source type is smaller than the destination type of the 3251 // coerce-to logic, copy the source value into a temp alloca the size 3252 // of the destination type to allow loading all of it. The bits past 3253 // the source value are left undef. 3254 if (SrcSize < DstSize) { 3255 llvm::AllocaInst *TempAlloca 3256 = CreateTempAlloca(STy, SrcPtr->getName() + ".coerce"); 3257 Builder.CreateMemCpy(TempAlloca, SrcPtr, SrcSize, 0); 3258 SrcPtr = TempAlloca; 3259 } else { 3260 SrcPtr = Builder.CreateBitCast(SrcPtr, 3261 llvm::PointerType::getUnqual(STy)); 3262 } 3263 3264 assert(NumIRArgs == STy->getNumElements()); 3265 for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) { 3266 llvm::Value *EltPtr = Builder.CreateConstGEP2_32(STy, SrcPtr, 0, i); 3267 llvm::LoadInst *LI = Builder.CreateLoad(EltPtr); 3268 // We don't know what we're loading from. 3269 LI->setAlignment(1); 3270 IRCallArgs[FirstIRArg + i] = LI; 3271 } 3272 } else { 3273 // In the simple case, just pass the coerced loaded value. 3274 assert(NumIRArgs == 1); 3275 IRCallArgs[FirstIRArg] = 3276 CreateCoercedLoad(SrcPtr, ArgInfo.getCoerceToType(), *this); 3277 } 3278 3279 break; 3280 } 3281 3282 case ABIArgInfo::Expand: 3283 unsigned IRArgPos = FirstIRArg; 3284 ExpandTypeToArgs(I->Ty, RV, IRFuncTy, IRCallArgs, IRArgPos); 3285 assert(IRArgPos == FirstIRArg + NumIRArgs); 3286 break; 3287 } 3288 } 3289 3290 if (ArgMemory) { 3291 llvm::Value *Arg = ArgMemory; 3292 if (CallInfo.isVariadic()) { 3293 // When passing non-POD arguments by value to variadic functions, we will 3294 // end up with a variadic prototype and an inalloca call site. In such 3295 // cases, we can't do any parameter mismatch checks. Give up and bitcast 3296 // the callee. 3297 unsigned CalleeAS = 3298 cast<llvm::PointerType>(Callee->getType())->getAddressSpace(); 3299 Callee = Builder.CreateBitCast( 3300 Callee, getTypes().GetFunctionType(CallInfo)->getPointerTo(CalleeAS)); 3301 } else { 3302 llvm::Type *LastParamTy = 3303 IRFuncTy->getParamType(IRFuncTy->getNumParams() - 1); 3304 if (Arg->getType() != LastParamTy) { 3305 #ifndef NDEBUG 3306 // Assert that these structs have equivalent element types. 3307 llvm::StructType *FullTy = CallInfo.getArgStruct(); 3308 llvm::StructType *DeclaredTy = cast<llvm::StructType>( 3309 cast<llvm::PointerType>(LastParamTy)->getElementType()); 3310 assert(DeclaredTy->getNumElements() == FullTy->getNumElements()); 3311 for (llvm::StructType::element_iterator DI = DeclaredTy->element_begin(), 3312 DE = DeclaredTy->element_end(), 3313 FI = FullTy->element_begin(); 3314 DI != DE; ++DI, ++FI) 3315 assert(*DI == *FI); 3316 #endif 3317 Arg = Builder.CreateBitCast(Arg, LastParamTy); 3318 } 3319 } 3320 assert(IRFunctionArgs.hasInallocaArg()); 3321 IRCallArgs[IRFunctionArgs.getInallocaArgNo()] = Arg; 3322 } 3323 3324 if (!CallArgs.getCleanupsToDeactivate().empty()) 3325 deactivateArgCleanupsBeforeCall(*this, CallArgs); 3326 3327 // If the callee is a bitcast of a function to a varargs pointer to function 3328 // type, check to see if we can remove the bitcast. This handles some cases 3329 // with unprototyped functions. 3330 if (llvm::ConstantExpr *CE = dyn_cast<llvm::ConstantExpr>(Callee)) 3331 if (llvm::Function *CalleeF = dyn_cast<llvm::Function>(CE->getOperand(0))) { 3332 llvm::PointerType *CurPT=cast<llvm::PointerType>(Callee->getType()); 3333 llvm::FunctionType *CurFT = 3334 cast<llvm::FunctionType>(CurPT->getElementType()); 3335 llvm::FunctionType *ActualFT = CalleeF->getFunctionType(); 3336 3337 if (CE->getOpcode() == llvm::Instruction::BitCast && 3338 ActualFT->getReturnType() == CurFT->getReturnType() && 3339 ActualFT->getNumParams() == CurFT->getNumParams() && 3340 ActualFT->getNumParams() == IRCallArgs.size() && 3341 (CurFT->isVarArg() || !ActualFT->isVarArg())) { 3342 bool ArgsMatch = true; 3343 for (unsigned i = 0, e = ActualFT->getNumParams(); i != e; ++i) 3344 if (ActualFT->getParamType(i) != CurFT->getParamType(i)) { 3345 ArgsMatch = false; 3346 break; 3347 } 3348 3349 // Strip the cast if we can get away with it. This is a nice cleanup, 3350 // but also allows us to inline the function at -O0 if it is marked 3351 // always_inline. 3352 if (ArgsMatch) 3353 Callee = CalleeF; 3354 } 3355 } 3356 3357 assert(IRCallArgs.size() == IRFuncTy->getNumParams() || IRFuncTy->isVarArg()); 3358 for (unsigned i = 0; i < IRCallArgs.size(); ++i) { 3359 // Inalloca argument can have different type. 3360 if (IRFunctionArgs.hasInallocaArg() && 3361 i == IRFunctionArgs.getInallocaArgNo()) 3362 continue; 3363 if (i < IRFuncTy->getNumParams()) 3364 assert(IRCallArgs[i]->getType() == IRFuncTy->getParamType(i)); 3365 } 3366 3367 unsigned CallingConv; 3368 CodeGen::AttributeListType AttributeList; 3369 CGM.ConstructAttributeList(CallInfo, TargetDecl, AttributeList, 3370 CallingConv, true); 3371 llvm::AttributeSet Attrs = llvm::AttributeSet::get(getLLVMContext(), 3372 AttributeList); 3373 3374 llvm::BasicBlock *InvokeDest = nullptr; 3375 if (!Attrs.hasAttribute(llvm::AttributeSet::FunctionIndex, 3376 llvm::Attribute::NoUnwind) || 3377 currentFunctionUsesSEHTry()) 3378 InvokeDest = getInvokeDest(); 3379 3380 llvm::CallSite CS; 3381 if (!InvokeDest) { 3382 CS = Builder.CreateCall(Callee, IRCallArgs); 3383 } else { 3384 llvm::BasicBlock *Cont = createBasicBlock("invoke.cont"); 3385 CS = Builder.CreateInvoke(Callee, Cont, InvokeDest, IRCallArgs); 3386 EmitBlock(Cont); 3387 } 3388 if (callOrInvoke) 3389 *callOrInvoke = CS.getInstruction(); 3390 3391 if (CurCodeDecl && CurCodeDecl->hasAttr<FlattenAttr>() && 3392 !CS.hasFnAttr(llvm::Attribute::NoInline)) 3393 Attrs = 3394 Attrs.addAttribute(getLLVMContext(), llvm::AttributeSet::FunctionIndex, 3395 llvm::Attribute::AlwaysInline); 3396 3397 // Disable inlining inside SEH __try blocks. 3398 if (isSEHTryScope()) 3399 Attrs = 3400 Attrs.addAttribute(getLLVMContext(), llvm::AttributeSet::FunctionIndex, 3401 llvm::Attribute::NoInline); 3402 3403 CS.setAttributes(Attrs); 3404 CS.setCallingConv(static_cast<llvm::CallingConv::ID>(CallingConv)); 3405 3406 // In ObjC ARC mode with no ObjC ARC exception safety, tell the ARC 3407 // optimizer it can aggressively ignore unwind edges. 3408 if (CGM.getLangOpts().ObjCAutoRefCount) 3409 AddObjCARCExceptionMetadata(CS.getInstruction()); 3410 3411 // If the call doesn't return, finish the basic block and clear the 3412 // insertion point; this allows the rest of IRgen to discard 3413 // unreachable code. 3414 if (CS.doesNotReturn()) { 3415 Builder.CreateUnreachable(); 3416 Builder.ClearInsertionPoint(); 3417 3418 // FIXME: For now, emit a dummy basic block because expr emitters in 3419 // generally are not ready to handle emitting expressions at unreachable 3420 // points. 3421 EnsureInsertPoint(); 3422 3423 // Return a reasonable RValue. 3424 return GetUndefRValue(RetTy); 3425 } 3426 3427 llvm::Instruction *CI = CS.getInstruction(); 3428 if (Builder.isNamePreserving() && !CI->getType()->isVoidTy()) 3429 CI->setName("call"); 3430 3431 // Emit any writebacks immediately. Arguably this should happen 3432 // after any return-value munging. 3433 if (CallArgs.hasWritebacks()) 3434 emitWritebacks(*this, CallArgs); 3435 3436 // The stack cleanup for inalloca arguments has to run out of the normal 3437 // lexical order, so deactivate it and run it manually here. 3438 CallArgs.freeArgumentMemory(*this); 3439 3440 RValue Ret = [&] { 3441 switch (RetAI.getKind()) { 3442 case ABIArgInfo::InAlloca: 3443 case ABIArgInfo::Indirect: 3444 return convertTempToRValue(SRetPtr, RetTy, SourceLocation()); 3445 3446 case ABIArgInfo::Ignore: 3447 // If we are ignoring an argument that had a result, make sure to 3448 // construct the appropriate return value for our caller. 3449 return GetUndefRValue(RetTy); 3450 3451 case ABIArgInfo::Extend: 3452 case ABIArgInfo::Direct: { 3453 llvm::Type *RetIRTy = ConvertType(RetTy); 3454 if (RetAI.getCoerceToType() == RetIRTy && RetAI.getDirectOffset() == 0) { 3455 switch (getEvaluationKind(RetTy)) { 3456 case TEK_Complex: { 3457 llvm::Value *Real = Builder.CreateExtractValue(CI, 0); 3458 llvm::Value *Imag = Builder.CreateExtractValue(CI, 1); 3459 return RValue::getComplex(std::make_pair(Real, Imag)); 3460 } 3461 case TEK_Aggregate: { 3462 llvm::Value *DestPtr = ReturnValue.getValue(); 3463 bool DestIsVolatile = ReturnValue.isVolatile(); 3464 3465 if (!DestPtr) { 3466 DestPtr = CreateMemTemp(RetTy, "agg.tmp"); 3467 DestIsVolatile = false; 3468 } 3469 BuildAggStore(*this, CI, DestPtr, DestIsVolatile, false); 3470 return RValue::getAggregate(DestPtr); 3471 } 3472 case TEK_Scalar: { 3473 // If the argument doesn't match, perform a bitcast to coerce it. This 3474 // can happen due to trivial type mismatches. 3475 llvm::Value *V = CI; 3476 if (V->getType() != RetIRTy) 3477 V = Builder.CreateBitCast(V, RetIRTy); 3478 return RValue::get(V); 3479 } 3480 } 3481 llvm_unreachable("bad evaluation kind"); 3482 } 3483 3484 llvm::Value *DestPtr = ReturnValue.getValue(); 3485 bool DestIsVolatile = ReturnValue.isVolatile(); 3486 3487 if (!DestPtr) { 3488 DestPtr = CreateMemTemp(RetTy, "coerce"); 3489 DestIsVolatile = false; 3490 } 3491 3492 // If the value is offset in memory, apply the offset now. 3493 llvm::Value *StorePtr = DestPtr; 3494 if (unsigned Offs = RetAI.getDirectOffset()) { 3495 StorePtr = Builder.CreateBitCast(StorePtr, Builder.getInt8PtrTy()); 3496 StorePtr = 3497 Builder.CreateConstGEP1_32(Builder.getInt8Ty(), StorePtr, Offs); 3498 StorePtr = Builder.CreateBitCast(StorePtr, 3499 llvm::PointerType::getUnqual(RetAI.getCoerceToType())); 3500 } 3501 CreateCoercedStore(CI, StorePtr, DestIsVolatile, *this); 3502 3503 return convertTempToRValue(DestPtr, RetTy, SourceLocation()); 3504 } 3505 3506 case ABIArgInfo::Expand: 3507 llvm_unreachable("Invalid ABI kind for return argument"); 3508 } 3509 3510 llvm_unreachable("Unhandled ABIArgInfo::Kind"); 3511 } (); 3512 3513 if (Ret.isScalar() && TargetDecl) { 3514 if (const auto *AA = TargetDecl->getAttr<AssumeAlignedAttr>()) { 3515 llvm::Value *OffsetValue = nullptr; 3516 if (const auto *Offset = AA->getOffset()) 3517 OffsetValue = EmitScalarExpr(Offset); 3518 3519 llvm::Value *Alignment = EmitScalarExpr(AA->getAlignment()); 3520 llvm::ConstantInt *AlignmentCI = cast<llvm::ConstantInt>(Alignment); 3521 EmitAlignmentAssumption(Ret.getScalarVal(), AlignmentCI->getZExtValue(), 3522 OffsetValue); 3523 } 3524 } 3525 3526 return Ret; 3527 } 3528 3529 /* VarArg handling */ 3530 3531 llvm::Value *CodeGenFunction::EmitVAArg(llvm::Value *VAListAddr, QualType Ty) { 3532 return CGM.getTypes().getABIInfo().EmitVAArg(VAListAddr, Ty, *this); 3533 } 3534