1 //===--- CGCall.cpp - Encapsulate calling convention details --------------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // These classes wrap the information about a call or function 11 // definition used to handle ABI compliancy. 12 // 13 //===----------------------------------------------------------------------===// 14 15 #include "CGCall.h" 16 #include "ABIInfo.h" 17 #include "CGCXXABI.h" 18 #include "CodeGenFunction.h" 19 #include "CodeGenModule.h" 20 #include "TargetInfo.h" 21 #include "clang/AST/Decl.h" 22 #include "clang/AST/DeclCXX.h" 23 #include "clang/AST/DeclObjC.h" 24 #include "clang/Basic/TargetInfo.h" 25 #include "clang/CodeGen/CGFunctionInfo.h" 26 #include "clang/Frontend/CodeGenOptions.h" 27 #include "llvm/ADT/StringExtras.h" 28 #include "llvm/IR/Attributes.h" 29 #include "llvm/IR/CallSite.h" 30 #include "llvm/IR/DataLayout.h" 31 #include "llvm/IR/InlineAsm.h" 32 #include "llvm/IR/Intrinsics.h" 33 #include "llvm/Transforms/Utils/Local.h" 34 using namespace clang; 35 using namespace CodeGen; 36 37 /***/ 38 39 static unsigned ClangCallConvToLLVMCallConv(CallingConv CC) { 40 switch (CC) { 41 default: return llvm::CallingConv::C; 42 case CC_X86StdCall: return llvm::CallingConv::X86_StdCall; 43 case CC_X86FastCall: return llvm::CallingConv::X86_FastCall; 44 case CC_X86ThisCall: return llvm::CallingConv::X86_ThisCall; 45 case CC_X86_64Win64: return llvm::CallingConv::X86_64_Win64; 46 case CC_X86_64SysV: return llvm::CallingConv::X86_64_SysV; 47 case CC_AAPCS: return llvm::CallingConv::ARM_AAPCS; 48 case CC_AAPCS_VFP: return llvm::CallingConv::ARM_AAPCS_VFP; 49 case CC_IntelOclBicc: return llvm::CallingConv::Intel_OCL_BI; 50 // TODO: add support for CC_X86Pascal to llvm 51 } 52 } 53 54 /// Derives the 'this' type for codegen purposes, i.e. ignoring method 55 /// qualification. 56 /// FIXME: address space qualification? 57 static CanQualType GetThisType(ASTContext &Context, const CXXRecordDecl *RD) { 58 QualType RecTy = Context.getTagDeclType(RD)->getCanonicalTypeInternal(); 59 return Context.getPointerType(CanQualType::CreateUnsafe(RecTy)); 60 } 61 62 /// Returns the canonical formal type of the given C++ method. 63 static CanQual<FunctionProtoType> GetFormalType(const CXXMethodDecl *MD) { 64 return MD->getType()->getCanonicalTypeUnqualified() 65 .getAs<FunctionProtoType>(); 66 } 67 68 /// Returns the "extra-canonicalized" return type, which discards 69 /// qualifiers on the return type. Codegen doesn't care about them, 70 /// and it makes ABI code a little easier to be able to assume that 71 /// all parameter and return types are top-level unqualified. 72 static CanQualType GetReturnType(QualType RetTy) { 73 return RetTy->getCanonicalTypeUnqualified().getUnqualifiedType(); 74 } 75 76 /// Arrange the argument and result information for a value of the given 77 /// unprototyped freestanding function type. 78 const CGFunctionInfo & 79 CodeGenTypes::arrangeFreeFunctionType(CanQual<FunctionNoProtoType> FTNP) { 80 // When translating an unprototyped function type, always use a 81 // variadic type. 82 return arrangeLLVMFunctionInfo(FTNP->getReturnType().getUnqualifiedType(), 83 false, None, FTNP->getExtInfo(), 84 RequiredArgs(0)); 85 } 86 87 /// Arrange the LLVM function layout for a value of the given function 88 /// type, on top of any implicit parameters already stored. 89 static const CGFunctionInfo & 90 arrangeLLVMFunctionInfo(CodeGenTypes &CGT, bool IsInstanceMethod, 91 SmallVectorImpl<CanQualType> &prefix, 92 CanQual<FunctionProtoType> FTP) { 93 RequiredArgs required = RequiredArgs::forPrototypePlus(FTP, prefix.size()); 94 // FIXME: Kill copy. 95 for (unsigned i = 0, e = FTP->getNumParams(); i != e; ++i) 96 prefix.push_back(FTP->getParamType(i)); 97 CanQualType resultType = FTP->getReturnType().getUnqualifiedType(); 98 return CGT.arrangeLLVMFunctionInfo(resultType, IsInstanceMethod, prefix, 99 FTP->getExtInfo(), required); 100 } 101 102 /// Arrange the argument and result information for a value of the 103 /// given freestanding function type. 104 const CGFunctionInfo & 105 CodeGenTypes::arrangeFreeFunctionType(CanQual<FunctionProtoType> FTP) { 106 SmallVector<CanQualType, 16> argTypes; 107 return ::arrangeLLVMFunctionInfo(*this, false, argTypes, FTP); 108 } 109 110 static CallingConv getCallingConventionForDecl(const Decl *D, bool IsWindows) { 111 // Set the appropriate calling convention for the Function. 112 if (D->hasAttr<StdCallAttr>()) 113 return CC_X86StdCall; 114 115 if (D->hasAttr<FastCallAttr>()) 116 return CC_X86FastCall; 117 118 if (D->hasAttr<ThisCallAttr>()) 119 return CC_X86ThisCall; 120 121 if (D->hasAttr<PascalAttr>()) 122 return CC_X86Pascal; 123 124 if (PcsAttr *PCS = D->getAttr<PcsAttr>()) 125 return (PCS->getPCS() == PcsAttr::AAPCS ? CC_AAPCS : CC_AAPCS_VFP); 126 127 if (D->hasAttr<PnaclCallAttr>()) 128 return CC_PnaclCall; 129 130 if (D->hasAttr<IntelOclBiccAttr>()) 131 return CC_IntelOclBicc; 132 133 if (D->hasAttr<MSABIAttr>()) 134 return IsWindows ? CC_C : CC_X86_64Win64; 135 136 if (D->hasAttr<SysVABIAttr>()) 137 return IsWindows ? CC_X86_64SysV : CC_C; 138 139 return CC_C; 140 } 141 142 static bool isAAPCSVFP(const CGFunctionInfo &FI, const TargetInfo &Target) { 143 switch (FI.getEffectiveCallingConvention()) { 144 case llvm::CallingConv::C: 145 switch (Target.getTriple().getEnvironment()) { 146 case llvm::Triple::EABIHF: 147 case llvm::Triple::GNUEABIHF: 148 return true; 149 default: 150 return false; 151 } 152 case llvm::CallingConv::ARM_AAPCS_VFP: 153 return true; 154 default: 155 return false; 156 } 157 } 158 159 /// Arrange the argument and result information for a call to an 160 /// unknown C++ non-static member function of the given abstract type. 161 /// (Zero value of RD means we don't have any meaningful "this" argument type, 162 /// so fall back to a generic pointer type). 163 /// The member function must be an ordinary function, i.e. not a 164 /// constructor or destructor. 165 const CGFunctionInfo & 166 CodeGenTypes::arrangeCXXMethodType(const CXXRecordDecl *RD, 167 const FunctionProtoType *FTP) { 168 SmallVector<CanQualType, 16> argTypes; 169 170 // Add the 'this' pointer. 171 if (RD) 172 argTypes.push_back(GetThisType(Context, RD)); 173 else 174 argTypes.push_back(Context.VoidPtrTy); 175 176 return ::arrangeLLVMFunctionInfo( 177 *this, true, argTypes, 178 FTP->getCanonicalTypeUnqualified().getAs<FunctionProtoType>()); 179 } 180 181 /// Arrange the argument and result information for a declaration or 182 /// definition of the given C++ non-static member function. The 183 /// member function must be an ordinary function, i.e. not a 184 /// constructor or destructor. 185 const CGFunctionInfo & 186 CodeGenTypes::arrangeCXXMethodDeclaration(const CXXMethodDecl *MD) { 187 assert(!isa<CXXConstructorDecl>(MD) && "wrong method for constructors!"); 188 assert(!isa<CXXDestructorDecl>(MD) && "wrong method for destructors!"); 189 190 CanQual<FunctionProtoType> prototype = GetFormalType(MD); 191 192 if (MD->isInstance()) { 193 // The abstract case is perfectly fine. 194 const CXXRecordDecl *ThisType = TheCXXABI.getThisArgumentTypeForMethod(MD); 195 return arrangeCXXMethodType(ThisType, prototype.getTypePtr()); 196 } 197 198 return arrangeFreeFunctionType(prototype); 199 } 200 201 /// Arrange the argument and result information for a declaration 202 /// or definition to the given constructor variant. 203 const CGFunctionInfo & 204 CodeGenTypes::arrangeCXXConstructorDeclaration(const CXXConstructorDecl *D, 205 CXXCtorType ctorKind) { 206 SmallVector<CanQualType, 16> argTypes; 207 argTypes.push_back(GetThisType(Context, D->getParent())); 208 209 GlobalDecl GD(D, ctorKind); 210 CanQualType resultType = 211 TheCXXABI.HasThisReturn(GD) ? argTypes.front() : Context.VoidTy; 212 213 CanQual<FunctionProtoType> FTP = GetFormalType(D); 214 215 // Add the formal parameters. 216 for (unsigned i = 0, e = FTP->getNumParams(); i != e; ++i) 217 argTypes.push_back(FTP->getParamType(i)); 218 219 TheCXXABI.BuildConstructorSignature(D, ctorKind, resultType, argTypes); 220 221 RequiredArgs required = 222 (D->isVariadic() ? RequiredArgs(argTypes.size()) : RequiredArgs::All); 223 224 FunctionType::ExtInfo extInfo = FTP->getExtInfo(); 225 return arrangeLLVMFunctionInfo(resultType, true, argTypes, extInfo, required); 226 } 227 228 /// Arrange a call to a C++ method, passing the given arguments. 229 const CGFunctionInfo & 230 CodeGenTypes::arrangeCXXConstructorCall(const CallArgList &args, 231 const CXXConstructorDecl *D, 232 CXXCtorType CtorKind, 233 unsigned ExtraArgs) { 234 // FIXME: Kill copy. 235 SmallVector<CanQualType, 16> ArgTypes; 236 for (const auto &Arg : args) 237 ArgTypes.push_back(Context.getCanonicalParamType(Arg.Ty)); 238 239 CanQual<FunctionProtoType> FPT = GetFormalType(D); 240 RequiredArgs Required = RequiredArgs::forPrototypePlus(FPT, 1 + ExtraArgs); 241 GlobalDecl GD(D, CtorKind); 242 CanQualType ResultType = 243 TheCXXABI.HasThisReturn(GD) ? ArgTypes.front() : Context.VoidTy; 244 245 FunctionType::ExtInfo Info = FPT->getExtInfo(); 246 return arrangeLLVMFunctionInfo(ResultType, true, ArgTypes, Info, Required); 247 } 248 249 /// Arrange the argument and result information for a declaration, 250 /// definition, or call to the given destructor variant. It so 251 /// happens that all three cases produce the same information. 252 const CGFunctionInfo & 253 CodeGenTypes::arrangeCXXDestructor(const CXXDestructorDecl *D, 254 CXXDtorType dtorKind) { 255 SmallVector<CanQualType, 2> argTypes; 256 argTypes.push_back(GetThisType(Context, D->getParent())); 257 258 GlobalDecl GD(D, dtorKind); 259 CanQualType resultType = 260 TheCXXABI.HasThisReturn(GD) ? argTypes.front() : Context.VoidTy; 261 262 TheCXXABI.BuildDestructorSignature(D, dtorKind, resultType, argTypes); 263 264 CanQual<FunctionProtoType> FTP = GetFormalType(D); 265 assert(FTP->getNumParams() == 0 && "dtor with formal parameters"); 266 assert(FTP->isVariadic() == 0 && "dtor with formal parameters"); 267 268 FunctionType::ExtInfo extInfo = FTP->getExtInfo(); 269 return arrangeLLVMFunctionInfo(resultType, true, argTypes, extInfo, 270 RequiredArgs::All); 271 } 272 273 /// Arrange the argument and result information for the declaration or 274 /// definition of the given function. 275 const CGFunctionInfo & 276 CodeGenTypes::arrangeFunctionDeclaration(const FunctionDecl *FD) { 277 if (const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(FD)) 278 if (MD->isInstance()) 279 return arrangeCXXMethodDeclaration(MD); 280 281 CanQualType FTy = FD->getType()->getCanonicalTypeUnqualified(); 282 283 assert(isa<FunctionType>(FTy)); 284 285 // When declaring a function without a prototype, always use a 286 // non-variadic type. 287 if (isa<FunctionNoProtoType>(FTy)) { 288 CanQual<FunctionNoProtoType> noProto = FTy.getAs<FunctionNoProtoType>(); 289 return arrangeLLVMFunctionInfo(noProto->getReturnType(), false, None, 290 noProto->getExtInfo(), RequiredArgs::All); 291 } 292 293 assert(isa<FunctionProtoType>(FTy)); 294 return arrangeFreeFunctionType(FTy.getAs<FunctionProtoType>()); 295 } 296 297 /// Arrange the argument and result information for the declaration or 298 /// definition of an Objective-C method. 299 const CGFunctionInfo & 300 CodeGenTypes::arrangeObjCMethodDeclaration(const ObjCMethodDecl *MD) { 301 // It happens that this is the same as a call with no optional 302 // arguments, except also using the formal 'self' type. 303 return arrangeObjCMessageSendSignature(MD, MD->getSelfDecl()->getType()); 304 } 305 306 /// Arrange the argument and result information for the function type 307 /// through which to perform a send to the given Objective-C method, 308 /// using the given receiver type. The receiver type is not always 309 /// the 'self' type of the method or even an Objective-C pointer type. 310 /// This is *not* the right method for actually performing such a 311 /// message send, due to the possibility of optional arguments. 312 const CGFunctionInfo & 313 CodeGenTypes::arrangeObjCMessageSendSignature(const ObjCMethodDecl *MD, 314 QualType receiverType) { 315 SmallVector<CanQualType, 16> argTys; 316 argTys.push_back(Context.getCanonicalParamType(receiverType)); 317 argTys.push_back(Context.getCanonicalParamType(Context.getObjCSelType())); 318 // FIXME: Kill copy? 319 for (const auto *I : MD->params()) { 320 argTys.push_back(Context.getCanonicalParamType(I->getType())); 321 } 322 323 FunctionType::ExtInfo einfo; 324 bool IsWindows = getContext().getTargetInfo().getTriple().isOSWindows(); 325 einfo = einfo.withCallingConv(getCallingConventionForDecl(MD, IsWindows)); 326 327 if (getContext().getLangOpts().ObjCAutoRefCount && 328 MD->hasAttr<NSReturnsRetainedAttr>()) 329 einfo = einfo.withProducesResult(true); 330 331 RequiredArgs required = 332 (MD->isVariadic() ? RequiredArgs(argTys.size()) : RequiredArgs::All); 333 334 return arrangeLLVMFunctionInfo(GetReturnType(MD->getReturnType()), false, 335 argTys, einfo, required); 336 } 337 338 const CGFunctionInfo & 339 CodeGenTypes::arrangeGlobalDeclaration(GlobalDecl GD) { 340 // FIXME: Do we need to handle ObjCMethodDecl? 341 const FunctionDecl *FD = cast<FunctionDecl>(GD.getDecl()); 342 343 if (const CXXConstructorDecl *CD = dyn_cast<CXXConstructorDecl>(FD)) 344 return arrangeCXXConstructorDeclaration(CD, GD.getCtorType()); 345 346 if (const CXXDestructorDecl *DD = dyn_cast<CXXDestructorDecl>(FD)) 347 return arrangeCXXDestructor(DD, GD.getDtorType()); 348 349 return arrangeFunctionDeclaration(FD); 350 } 351 352 /// Arrange a call as unto a free function, except possibly with an 353 /// additional number of formal parameters considered required. 354 static const CGFunctionInfo & 355 arrangeFreeFunctionLikeCall(CodeGenTypes &CGT, 356 CodeGenModule &CGM, 357 const CallArgList &args, 358 const FunctionType *fnType, 359 unsigned numExtraRequiredArgs) { 360 assert(args.size() >= numExtraRequiredArgs); 361 362 // In most cases, there are no optional arguments. 363 RequiredArgs required = RequiredArgs::All; 364 365 // If we have a variadic prototype, the required arguments are the 366 // extra prefix plus the arguments in the prototype. 367 if (const FunctionProtoType *proto = dyn_cast<FunctionProtoType>(fnType)) { 368 if (proto->isVariadic()) 369 required = RequiredArgs(proto->getNumParams() + numExtraRequiredArgs); 370 371 // If we don't have a prototype at all, but we're supposed to 372 // explicitly use the variadic convention for unprototyped calls, 373 // treat all of the arguments as required but preserve the nominal 374 // possibility of variadics. 375 } else if (CGM.getTargetCodeGenInfo() 376 .isNoProtoCallVariadic(args, 377 cast<FunctionNoProtoType>(fnType))) { 378 required = RequiredArgs(args.size()); 379 } 380 381 return CGT.arrangeFreeFunctionCall(fnType->getReturnType(), args, 382 fnType->getExtInfo(), required); 383 } 384 385 /// Figure out the rules for calling a function with the given formal 386 /// type using the given arguments. The arguments are necessary 387 /// because the function might be unprototyped, in which case it's 388 /// target-dependent in crazy ways. 389 const CGFunctionInfo & 390 CodeGenTypes::arrangeFreeFunctionCall(const CallArgList &args, 391 const FunctionType *fnType) { 392 return arrangeFreeFunctionLikeCall(*this, CGM, args, fnType, 0); 393 } 394 395 /// A block function call is essentially a free-function call with an 396 /// extra implicit argument. 397 const CGFunctionInfo & 398 CodeGenTypes::arrangeBlockFunctionCall(const CallArgList &args, 399 const FunctionType *fnType) { 400 return arrangeFreeFunctionLikeCall(*this, CGM, args, fnType, 1); 401 } 402 403 const CGFunctionInfo & 404 CodeGenTypes::arrangeFreeFunctionCall(QualType resultType, 405 const CallArgList &args, 406 FunctionType::ExtInfo info, 407 RequiredArgs required) { 408 // FIXME: Kill copy. 409 SmallVector<CanQualType, 16> argTypes; 410 for (const auto &Arg : args) 411 argTypes.push_back(Context.getCanonicalParamType(Arg.Ty)); 412 return arrangeLLVMFunctionInfo(GetReturnType(resultType), false, argTypes, 413 info, required); 414 } 415 416 /// Arrange a call to a C++ method, passing the given arguments. 417 const CGFunctionInfo & 418 CodeGenTypes::arrangeCXXMethodCall(const CallArgList &args, 419 const FunctionProtoType *FPT, 420 RequiredArgs required) { 421 // FIXME: Kill copy. 422 SmallVector<CanQualType, 16> argTypes; 423 for (const auto &Arg : args) 424 argTypes.push_back(Context.getCanonicalParamType(Arg.Ty)); 425 426 FunctionType::ExtInfo info = FPT->getExtInfo(); 427 return arrangeLLVMFunctionInfo(GetReturnType(FPT->getReturnType()), true, 428 argTypes, info, required); 429 } 430 431 const CGFunctionInfo &CodeGenTypes::arrangeFreeFunctionDeclaration( 432 QualType resultType, const FunctionArgList &args, 433 const FunctionType::ExtInfo &info, bool isVariadic) { 434 // FIXME: Kill copy. 435 SmallVector<CanQualType, 16> argTypes; 436 for (auto Arg : args) 437 argTypes.push_back(Context.getCanonicalParamType(Arg->getType())); 438 439 RequiredArgs required = 440 (isVariadic ? RequiredArgs(args.size()) : RequiredArgs::All); 441 return arrangeLLVMFunctionInfo(GetReturnType(resultType), false, argTypes, info, 442 required); 443 } 444 445 const CGFunctionInfo &CodeGenTypes::arrangeNullaryFunction() { 446 return arrangeLLVMFunctionInfo(getContext().VoidTy, false, None, 447 FunctionType::ExtInfo(), RequiredArgs::All); 448 } 449 450 /// Arrange the argument and result information for an abstract value 451 /// of a given function type. This is the method which all of the 452 /// above functions ultimately defer to. 453 const CGFunctionInfo & 454 CodeGenTypes::arrangeLLVMFunctionInfo(CanQualType resultType, 455 bool IsInstanceMethod, 456 ArrayRef<CanQualType> argTypes, 457 FunctionType::ExtInfo info, 458 RequiredArgs required) { 459 #ifndef NDEBUG 460 for (ArrayRef<CanQualType>::const_iterator 461 I = argTypes.begin(), E = argTypes.end(); I != E; ++I) 462 assert(I->isCanonicalAsParam()); 463 #endif 464 465 unsigned CC = ClangCallConvToLLVMCallConv(info.getCC()); 466 467 // Lookup or create unique function info. 468 llvm::FoldingSetNodeID ID; 469 CGFunctionInfo::Profile(ID, IsInstanceMethod, info, required, resultType, 470 argTypes); 471 472 void *insertPos = nullptr; 473 CGFunctionInfo *FI = FunctionInfos.FindNodeOrInsertPos(ID, insertPos); 474 if (FI) 475 return *FI; 476 477 // Construct the function info. We co-allocate the ArgInfos. 478 FI = CGFunctionInfo::create(CC, IsInstanceMethod, info, resultType, argTypes, 479 required); 480 FunctionInfos.InsertNode(FI, insertPos); 481 482 bool inserted = FunctionsBeingProcessed.insert(FI); (void)inserted; 483 assert(inserted && "Recursively being processed?"); 484 485 // Compute ABI information. 486 getABIInfo().computeInfo(*FI); 487 488 // Loop over all of the computed argument and return value info. If any of 489 // them are direct or extend without a specified coerce type, specify the 490 // default now. 491 ABIArgInfo &retInfo = FI->getReturnInfo(); 492 if (retInfo.canHaveCoerceToType() && retInfo.getCoerceToType() == nullptr) 493 retInfo.setCoerceToType(ConvertType(FI->getReturnType())); 494 495 for (auto &I : FI->arguments()) 496 if (I.info.canHaveCoerceToType() && I.info.getCoerceToType() == nullptr) 497 I.info.setCoerceToType(ConvertType(I.type)); 498 499 bool erased = FunctionsBeingProcessed.erase(FI); (void)erased; 500 assert(erased && "Not in set?"); 501 502 return *FI; 503 } 504 505 CGFunctionInfo *CGFunctionInfo::create(unsigned llvmCC, 506 bool IsInstanceMethod, 507 const FunctionType::ExtInfo &info, 508 CanQualType resultType, 509 ArrayRef<CanQualType> argTypes, 510 RequiredArgs required) { 511 void *buffer = operator new(sizeof(CGFunctionInfo) + 512 sizeof(ArgInfo) * (argTypes.size() + 1)); 513 CGFunctionInfo *FI = new(buffer) CGFunctionInfo(); 514 FI->CallingConvention = llvmCC; 515 FI->EffectiveCallingConvention = llvmCC; 516 FI->ASTCallingConvention = info.getCC(); 517 FI->InstanceMethod = IsInstanceMethod; 518 FI->NoReturn = info.getNoReturn(); 519 FI->ReturnsRetained = info.getProducesResult(); 520 FI->Required = required; 521 FI->HasRegParm = info.getHasRegParm(); 522 FI->RegParm = info.getRegParm(); 523 FI->ArgStruct = nullptr; 524 FI->NumArgs = argTypes.size(); 525 FI->getArgsBuffer()[0].type = resultType; 526 for (unsigned i = 0, e = argTypes.size(); i != e; ++i) 527 FI->getArgsBuffer()[i + 1].type = argTypes[i]; 528 return FI; 529 } 530 531 /***/ 532 533 void CodeGenTypes::GetExpandedTypes(QualType type, 534 SmallVectorImpl<llvm::Type*> &expandedTypes) { 535 if (const ConstantArrayType *AT = Context.getAsConstantArrayType(type)) { 536 uint64_t NumElts = AT->getSize().getZExtValue(); 537 for (uint64_t Elt = 0; Elt < NumElts; ++Elt) 538 GetExpandedTypes(AT->getElementType(), expandedTypes); 539 } else if (const RecordType *RT = type->getAs<RecordType>()) { 540 const RecordDecl *RD = RT->getDecl(); 541 assert(!RD->hasFlexibleArrayMember() && 542 "Cannot expand structure with flexible array."); 543 if (RD->isUnion()) { 544 // Unions can be here only in degenerative cases - all the fields are same 545 // after flattening. Thus we have to use the "largest" field. 546 const FieldDecl *LargestFD = nullptr; 547 CharUnits UnionSize = CharUnits::Zero(); 548 549 for (const auto *FD : RD->fields()) { 550 assert(!FD->isBitField() && 551 "Cannot expand structure with bit-field members."); 552 CharUnits FieldSize = getContext().getTypeSizeInChars(FD->getType()); 553 if (UnionSize < FieldSize) { 554 UnionSize = FieldSize; 555 LargestFD = FD; 556 } 557 } 558 if (LargestFD) 559 GetExpandedTypes(LargestFD->getType(), expandedTypes); 560 } else { 561 for (const auto *I : RD->fields()) { 562 assert(!I->isBitField() && 563 "Cannot expand structure with bit-field members."); 564 GetExpandedTypes(I->getType(), expandedTypes); 565 } 566 } 567 } else if (const ComplexType *CT = type->getAs<ComplexType>()) { 568 llvm::Type *EltTy = ConvertType(CT->getElementType()); 569 expandedTypes.push_back(EltTy); 570 expandedTypes.push_back(EltTy); 571 } else 572 expandedTypes.push_back(ConvertType(type)); 573 } 574 575 void CodeGenFunction::ExpandTypeFromArgs( 576 QualType Ty, LValue LV, SmallVectorImpl<llvm::Argument *>::iterator &AI) { 577 assert(LV.isSimple() && 578 "Unexpected non-simple lvalue during struct expansion."); 579 580 if (const ConstantArrayType *AT = getContext().getAsConstantArrayType(Ty)) { 581 unsigned NumElts = AT->getSize().getZExtValue(); 582 QualType EltTy = AT->getElementType(); 583 for (unsigned Elt = 0; Elt < NumElts; ++Elt) { 584 llvm::Value *EltAddr = Builder.CreateConstGEP2_32(LV.getAddress(), 0, Elt); 585 LValue LV = MakeAddrLValue(EltAddr, EltTy); 586 ExpandTypeFromArgs(EltTy, LV, AI); 587 } 588 return; 589 } 590 if (const RecordType *RT = Ty->getAs<RecordType>()) { 591 RecordDecl *RD = RT->getDecl(); 592 if (RD->isUnion()) { 593 // Unions can be here only in degenerative cases - all the fields are same 594 // after flattening. Thus we have to use the "largest" field. 595 const FieldDecl *LargestFD = nullptr; 596 CharUnits UnionSize = CharUnits::Zero(); 597 598 for (const auto *FD : RD->fields()) { 599 assert(!FD->isBitField() && 600 "Cannot expand structure with bit-field members."); 601 CharUnits FieldSize = getContext().getTypeSizeInChars(FD->getType()); 602 if (UnionSize < FieldSize) { 603 UnionSize = FieldSize; 604 LargestFD = FD; 605 } 606 } 607 if (LargestFD) { 608 // FIXME: What are the right qualifiers here? 609 LValue SubLV = EmitLValueForField(LV, LargestFD); 610 ExpandTypeFromArgs(LargestFD->getType(), SubLV, AI); 611 } 612 } else { 613 for (const auto *FD : RD->fields()) { 614 QualType FT = FD->getType(); 615 // FIXME: What are the right qualifiers here? 616 LValue SubLV = EmitLValueForField(LV, FD); 617 ExpandTypeFromArgs(FT, SubLV, AI); 618 } 619 } 620 return; 621 } 622 if (const ComplexType *CT = Ty->getAs<ComplexType>()) { 623 QualType EltTy = CT->getElementType(); 624 llvm::Value *RealAddr = Builder.CreateStructGEP(LV.getAddress(), 0, "real"); 625 EmitStoreThroughLValue(RValue::get(*AI++), MakeAddrLValue(RealAddr, EltTy)); 626 llvm::Value *ImagAddr = Builder.CreateStructGEP(LV.getAddress(), 1, "imag"); 627 EmitStoreThroughLValue(RValue::get(*AI++), MakeAddrLValue(ImagAddr, EltTy)); 628 return; 629 } 630 EmitStoreThroughLValue(RValue::get(*AI++), LV); 631 } 632 633 /// EnterStructPointerForCoercedAccess - Given a struct pointer that we are 634 /// accessing some number of bytes out of it, try to gep into the struct to get 635 /// at its inner goodness. Dive as deep as possible without entering an element 636 /// with an in-memory size smaller than DstSize. 637 static llvm::Value * 638 EnterStructPointerForCoercedAccess(llvm::Value *SrcPtr, 639 llvm::StructType *SrcSTy, 640 uint64_t DstSize, CodeGenFunction &CGF) { 641 // We can't dive into a zero-element struct. 642 if (SrcSTy->getNumElements() == 0) return SrcPtr; 643 644 llvm::Type *FirstElt = SrcSTy->getElementType(0); 645 646 // If the first elt is at least as large as what we're looking for, or if the 647 // first element is the same size as the whole struct, we can enter it. 648 uint64_t FirstEltSize = 649 CGF.CGM.getDataLayout().getTypeAllocSize(FirstElt); 650 if (FirstEltSize < DstSize && 651 FirstEltSize < CGF.CGM.getDataLayout().getTypeAllocSize(SrcSTy)) 652 return SrcPtr; 653 654 // GEP into the first element. 655 SrcPtr = CGF.Builder.CreateConstGEP2_32(SrcPtr, 0, 0, "coerce.dive"); 656 657 // If the first element is a struct, recurse. 658 llvm::Type *SrcTy = 659 cast<llvm::PointerType>(SrcPtr->getType())->getElementType(); 660 if (llvm::StructType *SrcSTy = dyn_cast<llvm::StructType>(SrcTy)) 661 return EnterStructPointerForCoercedAccess(SrcPtr, SrcSTy, DstSize, CGF); 662 663 return SrcPtr; 664 } 665 666 /// CoerceIntOrPtrToIntOrPtr - Convert a value Val to the specific Ty where both 667 /// are either integers or pointers. This does a truncation of the value if it 668 /// is too large or a zero extension if it is too small. 669 /// 670 /// This behaves as if the value were coerced through memory, so on big-endian 671 /// targets the high bits are preserved in a truncation, while little-endian 672 /// targets preserve the low bits. 673 static llvm::Value *CoerceIntOrPtrToIntOrPtr(llvm::Value *Val, 674 llvm::Type *Ty, 675 CodeGenFunction &CGF) { 676 if (Val->getType() == Ty) 677 return Val; 678 679 if (isa<llvm::PointerType>(Val->getType())) { 680 // If this is Pointer->Pointer avoid conversion to and from int. 681 if (isa<llvm::PointerType>(Ty)) 682 return CGF.Builder.CreateBitCast(Val, Ty, "coerce.val"); 683 684 // Convert the pointer to an integer so we can play with its width. 685 Val = CGF.Builder.CreatePtrToInt(Val, CGF.IntPtrTy, "coerce.val.pi"); 686 } 687 688 llvm::Type *DestIntTy = Ty; 689 if (isa<llvm::PointerType>(DestIntTy)) 690 DestIntTy = CGF.IntPtrTy; 691 692 if (Val->getType() != DestIntTy) { 693 const llvm::DataLayout &DL = CGF.CGM.getDataLayout(); 694 if (DL.isBigEndian()) { 695 // Preserve the high bits on big-endian targets. 696 // That is what memory coercion does. 697 uint64_t SrcSize = DL.getTypeSizeInBits(Val->getType()); 698 uint64_t DstSize = DL.getTypeSizeInBits(DestIntTy); 699 700 if (SrcSize > DstSize) { 701 Val = CGF.Builder.CreateLShr(Val, SrcSize - DstSize, "coerce.highbits"); 702 Val = CGF.Builder.CreateTrunc(Val, DestIntTy, "coerce.val.ii"); 703 } else { 704 Val = CGF.Builder.CreateZExt(Val, DestIntTy, "coerce.val.ii"); 705 Val = CGF.Builder.CreateShl(Val, DstSize - SrcSize, "coerce.highbits"); 706 } 707 } else { 708 // Little-endian targets preserve the low bits. No shifts required. 709 Val = CGF.Builder.CreateIntCast(Val, DestIntTy, false, "coerce.val.ii"); 710 } 711 } 712 713 if (isa<llvm::PointerType>(Ty)) 714 Val = CGF.Builder.CreateIntToPtr(Val, Ty, "coerce.val.ip"); 715 return Val; 716 } 717 718 719 720 /// CreateCoercedLoad - Create a load from \arg SrcPtr interpreted as 721 /// a pointer to an object of type \arg Ty. 722 /// 723 /// This safely handles the case when the src type is smaller than the 724 /// destination type; in this situation the values of bits which not 725 /// present in the src are undefined. 726 static llvm::Value *CreateCoercedLoad(llvm::Value *SrcPtr, 727 llvm::Type *Ty, 728 CodeGenFunction &CGF) { 729 llvm::Type *SrcTy = 730 cast<llvm::PointerType>(SrcPtr->getType())->getElementType(); 731 732 // If SrcTy and Ty are the same, just do a load. 733 if (SrcTy == Ty) 734 return CGF.Builder.CreateLoad(SrcPtr); 735 736 uint64_t DstSize = CGF.CGM.getDataLayout().getTypeAllocSize(Ty); 737 738 if (llvm::StructType *SrcSTy = dyn_cast<llvm::StructType>(SrcTy)) { 739 SrcPtr = EnterStructPointerForCoercedAccess(SrcPtr, SrcSTy, DstSize, CGF); 740 SrcTy = cast<llvm::PointerType>(SrcPtr->getType())->getElementType(); 741 } 742 743 uint64_t SrcSize = CGF.CGM.getDataLayout().getTypeAllocSize(SrcTy); 744 745 // If the source and destination are integer or pointer types, just do an 746 // extension or truncation to the desired type. 747 if ((isa<llvm::IntegerType>(Ty) || isa<llvm::PointerType>(Ty)) && 748 (isa<llvm::IntegerType>(SrcTy) || isa<llvm::PointerType>(SrcTy))) { 749 llvm::LoadInst *Load = CGF.Builder.CreateLoad(SrcPtr); 750 return CoerceIntOrPtrToIntOrPtr(Load, Ty, CGF); 751 } 752 753 // If load is legal, just bitcast the src pointer. 754 if (SrcSize >= DstSize) { 755 // Generally SrcSize is never greater than DstSize, since this means we are 756 // losing bits. However, this can happen in cases where the structure has 757 // additional padding, for example due to a user specified alignment. 758 // 759 // FIXME: Assert that we aren't truncating non-padding bits when have access 760 // to that information. 761 llvm::Value *Casted = 762 CGF.Builder.CreateBitCast(SrcPtr, llvm::PointerType::getUnqual(Ty)); 763 llvm::LoadInst *Load = CGF.Builder.CreateLoad(Casted); 764 // FIXME: Use better alignment / avoid requiring aligned load. 765 Load->setAlignment(1); 766 return Load; 767 } 768 769 // Otherwise do coercion through memory. This is stupid, but 770 // simple. 771 llvm::Value *Tmp = CGF.CreateTempAlloca(Ty); 772 llvm::Type *I8PtrTy = CGF.Builder.getInt8PtrTy(); 773 llvm::Value *Casted = CGF.Builder.CreateBitCast(Tmp, I8PtrTy); 774 llvm::Value *SrcCasted = CGF.Builder.CreateBitCast(SrcPtr, I8PtrTy); 775 // FIXME: Use better alignment. 776 CGF.Builder.CreateMemCpy(Casted, SrcCasted, 777 llvm::ConstantInt::get(CGF.IntPtrTy, SrcSize), 778 1, false); 779 return CGF.Builder.CreateLoad(Tmp); 780 } 781 782 // Function to store a first-class aggregate into memory. We prefer to 783 // store the elements rather than the aggregate to be more friendly to 784 // fast-isel. 785 // FIXME: Do we need to recurse here? 786 static void BuildAggStore(CodeGenFunction &CGF, llvm::Value *Val, 787 llvm::Value *DestPtr, bool DestIsVolatile, 788 bool LowAlignment) { 789 // Prefer scalar stores to first-class aggregate stores. 790 if (llvm::StructType *STy = 791 dyn_cast<llvm::StructType>(Val->getType())) { 792 for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) { 793 llvm::Value *EltPtr = CGF.Builder.CreateConstGEP2_32(DestPtr, 0, i); 794 llvm::Value *Elt = CGF.Builder.CreateExtractValue(Val, i); 795 llvm::StoreInst *SI = CGF.Builder.CreateStore(Elt, EltPtr, 796 DestIsVolatile); 797 if (LowAlignment) 798 SI->setAlignment(1); 799 } 800 } else { 801 llvm::StoreInst *SI = CGF.Builder.CreateStore(Val, DestPtr, DestIsVolatile); 802 if (LowAlignment) 803 SI->setAlignment(1); 804 } 805 } 806 807 /// CreateCoercedStore - Create a store to \arg DstPtr from \arg Src, 808 /// where the source and destination may have different types. 809 /// 810 /// This safely handles the case when the src type is larger than the 811 /// destination type; the upper bits of the src will be lost. 812 static void CreateCoercedStore(llvm::Value *Src, 813 llvm::Value *DstPtr, 814 bool DstIsVolatile, 815 CodeGenFunction &CGF) { 816 llvm::Type *SrcTy = Src->getType(); 817 llvm::Type *DstTy = 818 cast<llvm::PointerType>(DstPtr->getType())->getElementType(); 819 if (SrcTy == DstTy) { 820 CGF.Builder.CreateStore(Src, DstPtr, DstIsVolatile); 821 return; 822 } 823 824 uint64_t SrcSize = CGF.CGM.getDataLayout().getTypeAllocSize(SrcTy); 825 826 if (llvm::StructType *DstSTy = dyn_cast<llvm::StructType>(DstTy)) { 827 DstPtr = EnterStructPointerForCoercedAccess(DstPtr, DstSTy, SrcSize, CGF); 828 DstTy = cast<llvm::PointerType>(DstPtr->getType())->getElementType(); 829 } 830 831 // If the source and destination are integer or pointer types, just do an 832 // extension or truncation to the desired type. 833 if ((isa<llvm::IntegerType>(SrcTy) || isa<llvm::PointerType>(SrcTy)) && 834 (isa<llvm::IntegerType>(DstTy) || isa<llvm::PointerType>(DstTy))) { 835 Src = CoerceIntOrPtrToIntOrPtr(Src, DstTy, CGF); 836 CGF.Builder.CreateStore(Src, DstPtr, DstIsVolatile); 837 return; 838 } 839 840 uint64_t DstSize = CGF.CGM.getDataLayout().getTypeAllocSize(DstTy); 841 842 // If store is legal, just bitcast the src pointer. 843 if (SrcSize <= DstSize) { 844 llvm::Value *Casted = 845 CGF.Builder.CreateBitCast(DstPtr, llvm::PointerType::getUnqual(SrcTy)); 846 // FIXME: Use better alignment / avoid requiring aligned store. 847 BuildAggStore(CGF, Src, Casted, DstIsVolatile, true); 848 } else { 849 // Otherwise do coercion through memory. This is stupid, but 850 // simple. 851 852 // Generally SrcSize is never greater than DstSize, since this means we are 853 // losing bits. However, this can happen in cases where the structure has 854 // additional padding, for example due to a user specified alignment. 855 // 856 // FIXME: Assert that we aren't truncating non-padding bits when have access 857 // to that information. 858 llvm::Value *Tmp = CGF.CreateTempAlloca(SrcTy); 859 CGF.Builder.CreateStore(Src, Tmp); 860 llvm::Type *I8PtrTy = CGF.Builder.getInt8PtrTy(); 861 llvm::Value *Casted = CGF.Builder.CreateBitCast(Tmp, I8PtrTy); 862 llvm::Value *DstCasted = CGF.Builder.CreateBitCast(DstPtr, I8PtrTy); 863 // FIXME: Use better alignment. 864 CGF.Builder.CreateMemCpy(DstCasted, Casted, 865 llvm::ConstantInt::get(CGF.IntPtrTy, DstSize), 866 1, false); 867 } 868 } 869 870 /***/ 871 872 bool CodeGenModule::ReturnTypeUsesSRet(const CGFunctionInfo &FI) { 873 return FI.getReturnInfo().isIndirect(); 874 } 875 876 bool CodeGenModule::ReturnSlotInterferesWithArgs(const CGFunctionInfo &FI) { 877 return ReturnTypeUsesSRet(FI) && 878 getTargetCodeGenInfo().doesReturnSlotInterfereWithArgs(); 879 } 880 881 bool CodeGenModule::ReturnTypeUsesFPRet(QualType ResultType) { 882 if (const BuiltinType *BT = ResultType->getAs<BuiltinType>()) { 883 switch (BT->getKind()) { 884 default: 885 return false; 886 case BuiltinType::Float: 887 return getTarget().useObjCFPRetForRealType(TargetInfo::Float); 888 case BuiltinType::Double: 889 return getTarget().useObjCFPRetForRealType(TargetInfo::Double); 890 case BuiltinType::LongDouble: 891 return getTarget().useObjCFPRetForRealType(TargetInfo::LongDouble); 892 } 893 } 894 895 return false; 896 } 897 898 bool CodeGenModule::ReturnTypeUsesFP2Ret(QualType ResultType) { 899 if (const ComplexType *CT = ResultType->getAs<ComplexType>()) { 900 if (const BuiltinType *BT = CT->getElementType()->getAs<BuiltinType>()) { 901 if (BT->getKind() == BuiltinType::LongDouble) 902 return getTarget().useObjCFP2RetForComplexLongDouble(); 903 } 904 } 905 906 return false; 907 } 908 909 llvm::FunctionType *CodeGenTypes::GetFunctionType(GlobalDecl GD) { 910 const CGFunctionInfo &FI = arrangeGlobalDeclaration(GD); 911 return GetFunctionType(FI); 912 } 913 914 llvm::FunctionType * 915 CodeGenTypes::GetFunctionType(const CGFunctionInfo &FI) { 916 917 bool Inserted = FunctionsBeingProcessed.insert(&FI); (void)Inserted; 918 assert(Inserted && "Recursively being processed?"); 919 920 bool SwapThisWithSRet = false; 921 SmallVector<llvm::Type*, 8> argTypes; 922 llvm::Type *resultType = nullptr; 923 924 const ABIArgInfo &retAI = FI.getReturnInfo(); 925 switch (retAI.getKind()) { 926 case ABIArgInfo::Expand: 927 llvm_unreachable("Invalid ABI kind for return argument"); 928 929 case ABIArgInfo::Extend: 930 case ABIArgInfo::Direct: 931 resultType = retAI.getCoerceToType(); 932 break; 933 934 case ABIArgInfo::InAlloca: 935 if (retAI.getInAllocaSRet()) { 936 // sret things on win32 aren't void, they return the sret pointer. 937 QualType ret = FI.getReturnType(); 938 llvm::Type *ty = ConvertType(ret); 939 unsigned addressSpace = Context.getTargetAddressSpace(ret); 940 resultType = llvm::PointerType::get(ty, addressSpace); 941 } else { 942 resultType = llvm::Type::getVoidTy(getLLVMContext()); 943 } 944 break; 945 946 case ABIArgInfo::Indirect: { 947 assert(!retAI.getIndirectAlign() && "Align unused on indirect return."); 948 resultType = llvm::Type::getVoidTy(getLLVMContext()); 949 950 QualType ret = FI.getReturnType(); 951 llvm::Type *ty = ConvertType(ret); 952 unsigned addressSpace = Context.getTargetAddressSpace(ret); 953 argTypes.push_back(llvm::PointerType::get(ty, addressSpace)); 954 955 SwapThisWithSRet = retAI.isSRetAfterThis(); 956 break; 957 } 958 959 case ABIArgInfo::Ignore: 960 resultType = llvm::Type::getVoidTy(getLLVMContext()); 961 break; 962 } 963 964 // Add in all of the required arguments. 965 CGFunctionInfo::const_arg_iterator it = FI.arg_begin(), ie; 966 if (FI.isVariadic()) { 967 ie = it + FI.getRequiredArgs().getNumRequiredArgs(); 968 } else { 969 ie = FI.arg_end(); 970 } 971 for (; it != ie; ++it) { 972 const ABIArgInfo &argAI = it->info; 973 974 // Insert a padding type to ensure proper alignment. 975 if (llvm::Type *PaddingType = argAI.getPaddingType()) 976 argTypes.push_back(PaddingType); 977 978 switch (argAI.getKind()) { 979 case ABIArgInfo::Ignore: 980 case ABIArgInfo::InAlloca: 981 break; 982 983 case ABIArgInfo::Indirect: { 984 // indirect arguments are always on the stack, which is addr space #0. 985 llvm::Type *LTy = ConvertTypeForMem(it->type); 986 argTypes.push_back(LTy->getPointerTo()); 987 break; 988 } 989 990 case ABIArgInfo::Extend: 991 case ABIArgInfo::Direct: { 992 // If the coerce-to type is a first class aggregate, flatten it. Either 993 // way is semantically identical, but fast-isel and the optimizer 994 // generally likes scalar values better than FCAs. 995 // We cannot do this for functions using the AAPCS calling convention, 996 // as structures are treated differently by that calling convention. 997 llvm::Type *argType = argAI.getCoerceToType(); 998 llvm::StructType *st = dyn_cast<llvm::StructType>(argType); 999 if (st && !isAAPCSVFP(FI, getTarget())) { 1000 for (unsigned i = 0, e = st->getNumElements(); i != e; ++i) 1001 argTypes.push_back(st->getElementType(i)); 1002 } else { 1003 argTypes.push_back(argType); 1004 } 1005 break; 1006 } 1007 1008 case ABIArgInfo::Expand: 1009 GetExpandedTypes(it->type, argTypes); 1010 break; 1011 } 1012 } 1013 1014 // Add the inalloca struct as the last parameter type. 1015 if (llvm::StructType *ArgStruct = FI.getArgStruct()) 1016 argTypes.push_back(ArgStruct->getPointerTo()); 1017 1018 if (SwapThisWithSRet) 1019 std::swap(argTypes[0], argTypes[1]); 1020 1021 bool Erased = FunctionsBeingProcessed.erase(&FI); (void)Erased; 1022 assert(Erased && "Not in set?"); 1023 1024 return llvm::FunctionType::get(resultType, argTypes, FI.isVariadic()); 1025 } 1026 1027 llvm::Type *CodeGenTypes::GetFunctionTypeForVTable(GlobalDecl GD) { 1028 const CXXMethodDecl *MD = cast<CXXMethodDecl>(GD.getDecl()); 1029 const FunctionProtoType *FPT = MD->getType()->getAs<FunctionProtoType>(); 1030 1031 if (!isFuncTypeConvertible(FPT)) 1032 return llvm::StructType::get(getLLVMContext()); 1033 1034 const CGFunctionInfo *Info; 1035 if (isa<CXXDestructorDecl>(MD)) 1036 Info = &arrangeCXXDestructor(cast<CXXDestructorDecl>(MD), GD.getDtorType()); 1037 else 1038 Info = &arrangeCXXMethodDeclaration(MD); 1039 return GetFunctionType(*Info); 1040 } 1041 1042 namespace { 1043 1044 /// Encapsulates information about the way function arguments from 1045 /// CGFunctionInfo should be passed to actual LLVM IR function. 1046 class ClangToLLVMArgMapping { 1047 static const unsigned InvalidIndex = ~0U; 1048 unsigned InallocaArgNo; 1049 unsigned SRetArgNo; 1050 unsigned TotalIRArgs; 1051 1052 /// Arguments of LLVM IR function corresponding to single Clang argument. 1053 struct IRArgs { 1054 unsigned PaddingArgIndex; 1055 // Argument is expanded to IR arguments at positions 1056 // [FirstArgIndex, FirstArgIndex + NumberOfArgs). 1057 unsigned FirstArgIndex; 1058 unsigned NumberOfArgs; 1059 1060 IRArgs() 1061 : PaddingArgIndex(InvalidIndex), FirstArgIndex(InvalidIndex), 1062 NumberOfArgs(0) {} 1063 }; 1064 1065 SmallVector<IRArgs, 8> ArgInfo; 1066 1067 public: 1068 ClangToLLVMArgMapping(CodeGenModule &CGM, const CGFunctionInfo &FI) 1069 : InallocaArgNo(InvalidIndex), SRetArgNo(InvalidIndex), TotalIRArgs(0), 1070 ArgInfo(FI.arg_size()) { 1071 construct(CGM, FI); 1072 } 1073 1074 bool hasInallocaArg() const { return InallocaArgNo != InvalidIndex; } 1075 unsigned getInallocaArgNo() const { 1076 assert(hasInallocaArg()); 1077 return InallocaArgNo; 1078 } 1079 1080 bool hasSRetArg() const { return SRetArgNo != InvalidIndex; } 1081 unsigned getSRetArgNo() const { 1082 assert(hasSRetArg()); 1083 return SRetArgNo; 1084 } 1085 1086 unsigned totalIRArgs() const { return TotalIRArgs; } 1087 1088 bool hasPaddingArg(unsigned ArgNo) const { 1089 assert(ArgNo < ArgInfo.size()); 1090 return ArgInfo[ArgNo].PaddingArgIndex != InvalidIndex; 1091 } 1092 unsigned getPaddingArgNo(unsigned ArgNo) const { 1093 assert(hasPaddingArg(ArgNo)); 1094 return ArgInfo[ArgNo].PaddingArgIndex; 1095 } 1096 1097 /// Returns index of first IR argument corresponding to ArgNo, and their 1098 /// quantity. 1099 std::pair<unsigned, unsigned> getIRArgs(unsigned ArgNo) const { 1100 assert(ArgNo < ArgInfo.size()); 1101 return std::make_pair(ArgInfo[ArgNo].FirstArgIndex, 1102 ArgInfo[ArgNo].NumberOfArgs); 1103 } 1104 1105 private: 1106 void construct(CodeGenModule &CGM, const CGFunctionInfo &FI); 1107 }; 1108 1109 void ClangToLLVMArgMapping::construct(CodeGenModule &CGM, 1110 const CGFunctionInfo &FI) { 1111 unsigned IRArgNo = 0; 1112 bool SwapThisWithSRet = false; 1113 const ABIArgInfo &RetAI = FI.getReturnInfo(); 1114 1115 if (RetAI.getKind() == ABIArgInfo::Indirect) { 1116 SwapThisWithSRet = RetAI.isSRetAfterThis(); 1117 SRetArgNo = SwapThisWithSRet ? 1 : IRArgNo++; 1118 } 1119 1120 unsigned ArgNo = 0; 1121 for (CGFunctionInfo::const_arg_iterator I = FI.arg_begin(), 1122 E = FI.arg_end(); 1123 I != E; ++I, ++ArgNo) { 1124 QualType ArgType = I->type; 1125 const ABIArgInfo &AI = I->info; 1126 // Collect data about IR arguments corresponding to Clang argument ArgNo. 1127 auto &IRArgs = ArgInfo[ArgNo]; 1128 1129 if (AI.getPaddingType()) 1130 IRArgs.PaddingArgIndex = IRArgNo++; 1131 1132 switch (AI.getKind()) { 1133 case ABIArgInfo::Extend: 1134 case ABIArgInfo::Direct: { 1135 // FIXME: handle sseregparm someday... 1136 llvm::StructType *STy = dyn_cast<llvm::StructType>(AI.getCoerceToType()); 1137 if (!isAAPCSVFP(FI, CGM.getTarget()) && STy) { 1138 IRArgs.NumberOfArgs = STy->getNumElements(); 1139 } else { 1140 IRArgs.NumberOfArgs = 1; 1141 } 1142 break; 1143 } 1144 case ABIArgInfo::Indirect: 1145 IRArgs.NumberOfArgs = 1; 1146 break; 1147 case ABIArgInfo::Ignore: 1148 case ABIArgInfo::InAlloca: 1149 // ignore and inalloca doesn't have matching LLVM parameters. 1150 IRArgs.NumberOfArgs = 0; 1151 break; 1152 case ABIArgInfo::Expand: { 1153 SmallVector<llvm::Type*, 8> Types; 1154 // FIXME: This is rather inefficient. Do we ever actually need to do 1155 // anything here? The result should be just reconstructed on the other 1156 // side, so extension should be a non-issue. 1157 CGM.getTypes().GetExpandedTypes(ArgType, Types); 1158 IRArgs.NumberOfArgs = Types.size(); 1159 break; 1160 } 1161 } 1162 1163 if (IRArgs.NumberOfArgs > 0) { 1164 IRArgs.FirstArgIndex = IRArgNo; 1165 IRArgNo += IRArgs.NumberOfArgs; 1166 } 1167 1168 // Skip over the sret parameter when it comes second. We already handled it 1169 // above. 1170 if (IRArgNo == 1 && SwapThisWithSRet) 1171 IRArgNo++; 1172 } 1173 assert(ArgNo == FI.arg_size()); 1174 1175 if (FI.usesInAlloca()) 1176 InallocaArgNo = IRArgNo++; 1177 1178 TotalIRArgs = IRArgNo; 1179 } 1180 } // namespace 1181 1182 void CodeGenModule::ConstructAttributeList(const CGFunctionInfo &FI, 1183 const Decl *TargetDecl, 1184 AttributeListType &PAL, 1185 unsigned &CallingConv, 1186 bool AttrOnCallSite) { 1187 llvm::AttrBuilder FuncAttrs; 1188 llvm::AttrBuilder RetAttrs; 1189 1190 CallingConv = FI.getEffectiveCallingConvention(); 1191 1192 if (FI.isNoReturn()) 1193 FuncAttrs.addAttribute(llvm::Attribute::NoReturn); 1194 1195 // FIXME: handle sseregparm someday... 1196 if (TargetDecl) { 1197 if (TargetDecl->hasAttr<ReturnsTwiceAttr>()) 1198 FuncAttrs.addAttribute(llvm::Attribute::ReturnsTwice); 1199 if (TargetDecl->hasAttr<NoThrowAttr>()) 1200 FuncAttrs.addAttribute(llvm::Attribute::NoUnwind); 1201 if (TargetDecl->hasAttr<NoReturnAttr>()) 1202 FuncAttrs.addAttribute(llvm::Attribute::NoReturn); 1203 if (TargetDecl->hasAttr<NoDuplicateAttr>()) 1204 FuncAttrs.addAttribute(llvm::Attribute::NoDuplicate); 1205 1206 if (const FunctionDecl *Fn = dyn_cast<FunctionDecl>(TargetDecl)) { 1207 const FunctionProtoType *FPT = Fn->getType()->getAs<FunctionProtoType>(); 1208 if (FPT && FPT->isNothrow(getContext())) 1209 FuncAttrs.addAttribute(llvm::Attribute::NoUnwind); 1210 // Don't use [[noreturn]] or _Noreturn for a call to a virtual function. 1211 // These attributes are not inherited by overloads. 1212 const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(Fn); 1213 if (Fn->isNoReturn() && !(AttrOnCallSite && MD && MD->isVirtual())) 1214 FuncAttrs.addAttribute(llvm::Attribute::NoReturn); 1215 } 1216 1217 // 'const' and 'pure' attribute functions are also nounwind. 1218 if (TargetDecl->hasAttr<ConstAttr>()) { 1219 FuncAttrs.addAttribute(llvm::Attribute::ReadNone); 1220 FuncAttrs.addAttribute(llvm::Attribute::NoUnwind); 1221 } else if (TargetDecl->hasAttr<PureAttr>()) { 1222 FuncAttrs.addAttribute(llvm::Attribute::ReadOnly); 1223 FuncAttrs.addAttribute(llvm::Attribute::NoUnwind); 1224 } 1225 if (TargetDecl->hasAttr<MallocAttr>()) 1226 RetAttrs.addAttribute(llvm::Attribute::NoAlias); 1227 if (TargetDecl->hasAttr<ReturnsNonNullAttr>()) 1228 RetAttrs.addAttribute(llvm::Attribute::NonNull); 1229 } 1230 1231 if (CodeGenOpts.OptimizeSize) 1232 FuncAttrs.addAttribute(llvm::Attribute::OptimizeForSize); 1233 if (CodeGenOpts.OptimizeSize == 2) 1234 FuncAttrs.addAttribute(llvm::Attribute::MinSize); 1235 if (CodeGenOpts.DisableRedZone) 1236 FuncAttrs.addAttribute(llvm::Attribute::NoRedZone); 1237 if (CodeGenOpts.NoImplicitFloat) 1238 FuncAttrs.addAttribute(llvm::Attribute::NoImplicitFloat); 1239 if (CodeGenOpts.EnableSegmentedStacks && 1240 !(TargetDecl && TargetDecl->hasAttr<NoSplitStackAttr>())) 1241 FuncAttrs.addAttribute("split-stack"); 1242 1243 if (AttrOnCallSite) { 1244 // Attributes that should go on the call site only. 1245 if (!CodeGenOpts.SimplifyLibCalls) 1246 FuncAttrs.addAttribute(llvm::Attribute::NoBuiltin); 1247 } else { 1248 // Attributes that should go on the function, but not the call site. 1249 if (!CodeGenOpts.DisableFPElim) { 1250 FuncAttrs.addAttribute("no-frame-pointer-elim", "false"); 1251 } else if (CodeGenOpts.OmitLeafFramePointer) { 1252 FuncAttrs.addAttribute("no-frame-pointer-elim", "false"); 1253 FuncAttrs.addAttribute("no-frame-pointer-elim-non-leaf"); 1254 } else { 1255 FuncAttrs.addAttribute("no-frame-pointer-elim", "true"); 1256 FuncAttrs.addAttribute("no-frame-pointer-elim-non-leaf"); 1257 } 1258 1259 FuncAttrs.addAttribute("less-precise-fpmad", 1260 llvm::toStringRef(CodeGenOpts.LessPreciseFPMAD)); 1261 FuncAttrs.addAttribute("no-infs-fp-math", 1262 llvm::toStringRef(CodeGenOpts.NoInfsFPMath)); 1263 FuncAttrs.addAttribute("no-nans-fp-math", 1264 llvm::toStringRef(CodeGenOpts.NoNaNsFPMath)); 1265 FuncAttrs.addAttribute("unsafe-fp-math", 1266 llvm::toStringRef(CodeGenOpts.UnsafeFPMath)); 1267 FuncAttrs.addAttribute("use-soft-float", 1268 llvm::toStringRef(CodeGenOpts.SoftFloat)); 1269 FuncAttrs.addAttribute("stack-protector-buffer-size", 1270 llvm::utostr(CodeGenOpts.SSPBufferSize)); 1271 1272 if (!CodeGenOpts.StackRealignment) 1273 FuncAttrs.addAttribute("no-realign-stack"); 1274 } 1275 1276 ClangToLLVMArgMapping IRFunctionArgs(*this, FI); 1277 1278 QualType RetTy = FI.getReturnType(); 1279 const ABIArgInfo &RetAI = FI.getReturnInfo(); 1280 switch (RetAI.getKind()) { 1281 case ABIArgInfo::Extend: 1282 if (RetTy->hasSignedIntegerRepresentation()) 1283 RetAttrs.addAttribute(llvm::Attribute::SExt); 1284 else if (RetTy->hasUnsignedIntegerRepresentation()) 1285 RetAttrs.addAttribute(llvm::Attribute::ZExt); 1286 // FALL THROUGH 1287 case ABIArgInfo::Direct: 1288 if (RetAI.getInReg()) 1289 RetAttrs.addAttribute(llvm::Attribute::InReg); 1290 break; 1291 case ABIArgInfo::Ignore: 1292 break; 1293 1294 case ABIArgInfo::InAlloca: 1295 case ABIArgInfo::Indirect: { 1296 // inalloca and sret disable readnone and readonly 1297 FuncAttrs.removeAttribute(llvm::Attribute::ReadOnly) 1298 .removeAttribute(llvm::Attribute::ReadNone); 1299 break; 1300 } 1301 1302 case ABIArgInfo::Expand: 1303 llvm_unreachable("Invalid ABI kind for return argument"); 1304 } 1305 1306 if (const auto *RefTy = RetTy->getAs<ReferenceType>()) { 1307 QualType PTy = RefTy->getPointeeType(); 1308 if (!PTy->isIncompleteType() && PTy->isConstantSizeType()) 1309 RetAttrs.addDereferenceableAttr(getContext().getTypeSizeInChars(PTy) 1310 .getQuantity()); 1311 else if (getContext().getTargetAddressSpace(PTy) == 0) 1312 RetAttrs.addAttribute(llvm::Attribute::NonNull); 1313 } 1314 1315 // Attach return attributes. 1316 if (RetAttrs.hasAttributes()) { 1317 PAL.push_back(llvm::AttributeSet::get( 1318 getLLVMContext(), llvm::AttributeSet::ReturnIndex, RetAttrs)); 1319 } 1320 1321 // Attach attributes to sret. 1322 if (IRFunctionArgs.hasSRetArg()) { 1323 llvm::AttrBuilder SRETAttrs; 1324 SRETAttrs.addAttribute(llvm::Attribute::StructRet); 1325 if (RetAI.getInReg()) 1326 SRETAttrs.addAttribute(llvm::Attribute::InReg); 1327 PAL.push_back(llvm::AttributeSet::get( 1328 getLLVMContext(), IRFunctionArgs.getSRetArgNo() + 1, SRETAttrs)); 1329 } 1330 1331 // Attach attributes to inalloca argument. 1332 if (IRFunctionArgs.hasInallocaArg()) { 1333 llvm::AttrBuilder Attrs; 1334 Attrs.addAttribute(llvm::Attribute::InAlloca); 1335 PAL.push_back(llvm::AttributeSet::get( 1336 getLLVMContext(), IRFunctionArgs.getInallocaArgNo() + 1, Attrs)); 1337 } 1338 1339 1340 unsigned ArgNo = 0; 1341 for (CGFunctionInfo::const_arg_iterator I = FI.arg_begin(), 1342 E = FI.arg_end(); 1343 I != E; ++I, ++ArgNo) { 1344 QualType ParamType = I->type; 1345 const ABIArgInfo &AI = I->info; 1346 llvm::AttrBuilder Attrs; 1347 1348 // Add attribute for padding argument, if necessary. 1349 if (IRFunctionArgs.hasPaddingArg(ArgNo)) { 1350 if (AI.getPaddingInReg()) 1351 PAL.push_back(llvm::AttributeSet::get( 1352 getLLVMContext(), IRFunctionArgs.getPaddingArgNo(ArgNo) + 1, 1353 llvm::Attribute::InReg)); 1354 } 1355 1356 // 'restrict' -> 'noalias' is done in EmitFunctionProlog when we 1357 // have the corresponding parameter variable. It doesn't make 1358 // sense to do it here because parameters are so messed up. 1359 switch (AI.getKind()) { 1360 case ABIArgInfo::Extend: 1361 if (ParamType->isSignedIntegerOrEnumerationType()) 1362 Attrs.addAttribute(llvm::Attribute::SExt); 1363 else if (ParamType->isUnsignedIntegerOrEnumerationType()) 1364 Attrs.addAttribute(llvm::Attribute::ZExt); 1365 // FALL THROUGH 1366 case ABIArgInfo::Direct: 1367 if (AI.getInReg()) 1368 Attrs.addAttribute(llvm::Attribute::InReg); 1369 break; 1370 1371 case ABIArgInfo::Indirect: 1372 if (AI.getInReg()) 1373 Attrs.addAttribute(llvm::Attribute::InReg); 1374 1375 if (AI.getIndirectByVal()) 1376 Attrs.addAttribute(llvm::Attribute::ByVal); 1377 1378 Attrs.addAlignmentAttr(AI.getIndirectAlign()); 1379 1380 // byval disables readnone and readonly. 1381 FuncAttrs.removeAttribute(llvm::Attribute::ReadOnly) 1382 .removeAttribute(llvm::Attribute::ReadNone); 1383 break; 1384 1385 case ABIArgInfo::Ignore: 1386 case ABIArgInfo::Expand: 1387 continue; 1388 1389 case ABIArgInfo::InAlloca: 1390 // inalloca disables readnone and readonly. 1391 FuncAttrs.removeAttribute(llvm::Attribute::ReadOnly) 1392 .removeAttribute(llvm::Attribute::ReadNone); 1393 continue; 1394 } 1395 1396 if (const auto *RefTy = ParamType->getAs<ReferenceType>()) { 1397 QualType PTy = RefTy->getPointeeType(); 1398 if (!PTy->isIncompleteType() && PTy->isConstantSizeType()) 1399 Attrs.addDereferenceableAttr(getContext().getTypeSizeInChars(PTy) 1400 .getQuantity()); 1401 else if (getContext().getTargetAddressSpace(PTy) == 0) 1402 Attrs.addAttribute(llvm::Attribute::NonNull); 1403 } 1404 1405 if (Attrs.hasAttributes()) { 1406 unsigned FirstIRArg, NumIRArgs; 1407 std::tie(FirstIRArg, NumIRArgs) = IRFunctionArgs.getIRArgs(ArgNo); 1408 for (unsigned i = 0; i < NumIRArgs; i++) 1409 PAL.push_back(llvm::AttributeSet::get(getLLVMContext(), 1410 FirstIRArg + i + 1, Attrs)); 1411 } 1412 } 1413 assert(ArgNo == FI.arg_size()); 1414 1415 if (FuncAttrs.hasAttributes()) 1416 PAL.push_back(llvm:: 1417 AttributeSet::get(getLLVMContext(), 1418 llvm::AttributeSet::FunctionIndex, 1419 FuncAttrs)); 1420 } 1421 1422 /// An argument came in as a promoted argument; demote it back to its 1423 /// declared type. 1424 static llvm::Value *emitArgumentDemotion(CodeGenFunction &CGF, 1425 const VarDecl *var, 1426 llvm::Value *value) { 1427 llvm::Type *varType = CGF.ConvertType(var->getType()); 1428 1429 // This can happen with promotions that actually don't change the 1430 // underlying type, like the enum promotions. 1431 if (value->getType() == varType) return value; 1432 1433 assert((varType->isIntegerTy() || varType->isFloatingPointTy()) 1434 && "unexpected promotion type"); 1435 1436 if (isa<llvm::IntegerType>(varType)) 1437 return CGF.Builder.CreateTrunc(value, varType, "arg.unpromote"); 1438 1439 return CGF.Builder.CreateFPCast(value, varType, "arg.unpromote"); 1440 } 1441 1442 void CodeGenFunction::EmitFunctionProlog(const CGFunctionInfo &FI, 1443 llvm::Function *Fn, 1444 const FunctionArgList &Args) { 1445 // If this is an implicit-return-zero function, go ahead and 1446 // initialize the return value. TODO: it might be nice to have 1447 // a more general mechanism for this that didn't require synthesized 1448 // return statements. 1449 if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(CurCodeDecl)) { 1450 if (FD->hasImplicitReturnZero()) { 1451 QualType RetTy = FD->getReturnType().getUnqualifiedType(); 1452 llvm::Type* LLVMTy = CGM.getTypes().ConvertType(RetTy); 1453 llvm::Constant* Zero = llvm::Constant::getNullValue(LLVMTy); 1454 Builder.CreateStore(Zero, ReturnValue); 1455 } 1456 } 1457 1458 // FIXME: We no longer need the types from FunctionArgList; lift up and 1459 // simplify. 1460 1461 ClangToLLVMArgMapping IRFunctionArgs(CGM, FI); 1462 // Flattened function arguments. 1463 SmallVector<llvm::Argument *, 16> FnArgs; 1464 FnArgs.reserve(IRFunctionArgs.totalIRArgs()); 1465 for (auto &Arg : Fn->args()) { 1466 FnArgs.push_back(&Arg); 1467 } 1468 assert(FnArgs.size() == IRFunctionArgs.totalIRArgs()); 1469 1470 // If we're using inalloca, all the memory arguments are GEPs off of the last 1471 // parameter, which is a pointer to the complete memory area. 1472 llvm::Value *ArgStruct = nullptr; 1473 if (IRFunctionArgs.hasInallocaArg()) { 1474 ArgStruct = FnArgs[IRFunctionArgs.getInallocaArgNo()]; 1475 assert(ArgStruct->getType() == FI.getArgStruct()->getPointerTo()); 1476 } 1477 1478 // Name the struct return parameter. 1479 if (IRFunctionArgs.hasSRetArg()) { 1480 auto AI = FnArgs[IRFunctionArgs.getSRetArgNo()]; 1481 AI->setName("agg.result"); 1482 AI->addAttr(llvm::AttributeSet::get(getLLVMContext(), AI->getArgNo() + 1, 1483 llvm::Attribute::NoAlias)); 1484 } 1485 1486 // Get the function-level nonnull attribute if it exists. 1487 const NonNullAttr *NNAtt = 1488 CurCodeDecl ? CurCodeDecl->getAttr<NonNullAttr>() : nullptr; 1489 1490 // Track if we received the parameter as a pointer (indirect, byval, or 1491 // inalloca). If already have a pointer, EmitParmDecl doesn't need to copy it 1492 // into a local alloca for us. 1493 enum ValOrPointer { HaveValue = 0, HavePointer = 1 }; 1494 typedef llvm::PointerIntPair<llvm::Value *, 1> ValueAndIsPtr; 1495 SmallVector<ValueAndIsPtr, 16> ArgVals; 1496 ArgVals.reserve(Args.size()); 1497 1498 // Create a pointer value for every parameter declaration. This usually 1499 // entails copying one or more LLVM IR arguments into an alloca. Don't push 1500 // any cleanups or do anything that might unwind. We do that separately, so 1501 // we can push the cleanups in the correct order for the ABI. 1502 assert(FI.arg_size() == Args.size() && 1503 "Mismatch between function signature & arguments."); 1504 unsigned ArgNo = 0; 1505 CGFunctionInfo::const_arg_iterator info_it = FI.arg_begin(); 1506 for (FunctionArgList::const_iterator i = Args.begin(), e = Args.end(); 1507 i != e; ++i, ++info_it, ++ArgNo) { 1508 const VarDecl *Arg = *i; 1509 QualType Ty = info_it->type; 1510 const ABIArgInfo &ArgI = info_it->info; 1511 1512 bool isPromoted = 1513 isa<ParmVarDecl>(Arg) && cast<ParmVarDecl>(Arg)->isKNRPromoted(); 1514 1515 unsigned FirstIRArg, NumIRArgs; 1516 std::tie(FirstIRArg, NumIRArgs) = IRFunctionArgs.getIRArgs(ArgNo); 1517 1518 switch (ArgI.getKind()) { 1519 case ABIArgInfo::InAlloca: { 1520 assert(NumIRArgs == 0); 1521 llvm::Value *V = Builder.CreateStructGEP( 1522 ArgStruct, ArgI.getInAllocaFieldIndex(), Arg->getName()); 1523 ArgVals.push_back(ValueAndIsPtr(V, HavePointer)); 1524 break; 1525 } 1526 1527 case ABIArgInfo::Indirect: { 1528 assert(NumIRArgs == 1); 1529 llvm::Value *V = FnArgs[FirstIRArg]; 1530 1531 if (!hasScalarEvaluationKind(Ty)) { 1532 // Aggregates and complex variables are accessed by reference. All we 1533 // need to do is realign the value, if requested 1534 if (ArgI.getIndirectRealign()) { 1535 llvm::Value *AlignedTemp = CreateMemTemp(Ty, "coerce"); 1536 1537 // Copy from the incoming argument pointer to the temporary with the 1538 // appropriate alignment. 1539 // 1540 // FIXME: We should have a common utility for generating an aggregate 1541 // copy. 1542 llvm::Type *I8PtrTy = Builder.getInt8PtrTy(); 1543 CharUnits Size = getContext().getTypeSizeInChars(Ty); 1544 llvm::Value *Dst = Builder.CreateBitCast(AlignedTemp, I8PtrTy); 1545 llvm::Value *Src = Builder.CreateBitCast(V, I8PtrTy); 1546 Builder.CreateMemCpy(Dst, 1547 Src, 1548 llvm::ConstantInt::get(IntPtrTy, 1549 Size.getQuantity()), 1550 ArgI.getIndirectAlign(), 1551 false); 1552 V = AlignedTemp; 1553 } 1554 ArgVals.push_back(ValueAndIsPtr(V, HavePointer)); 1555 } else { 1556 // Load scalar value from indirect argument. 1557 CharUnits Alignment = getContext().getTypeAlignInChars(Ty); 1558 V = EmitLoadOfScalar(V, false, Alignment.getQuantity(), Ty, 1559 Arg->getLocStart()); 1560 1561 if (isPromoted) 1562 V = emitArgumentDemotion(*this, Arg, V); 1563 ArgVals.push_back(ValueAndIsPtr(V, HaveValue)); 1564 } 1565 break; 1566 } 1567 1568 case ABIArgInfo::Extend: 1569 case ABIArgInfo::Direct: { 1570 1571 // If we have the trivial case, handle it with no muss and fuss. 1572 if (!isa<llvm::StructType>(ArgI.getCoerceToType()) && 1573 ArgI.getCoerceToType() == ConvertType(Ty) && 1574 ArgI.getDirectOffset() == 0) { 1575 assert(NumIRArgs == 1); 1576 auto AI = FnArgs[FirstIRArg]; 1577 llvm::Value *V = AI; 1578 1579 if (const ParmVarDecl *PVD = dyn_cast<ParmVarDecl>(Arg)) { 1580 if ((NNAtt && NNAtt->isNonNull(PVD->getFunctionScopeIndex())) || 1581 PVD->hasAttr<NonNullAttr>()) 1582 AI->addAttr(llvm::AttributeSet::get(getLLVMContext(), 1583 AI->getArgNo() + 1, 1584 llvm::Attribute::NonNull)); 1585 1586 QualType OTy = PVD->getOriginalType(); 1587 if (const auto *ArrTy = 1588 getContext().getAsConstantArrayType(OTy)) { 1589 // A C99 array parameter declaration with the static keyword also 1590 // indicates dereferenceability, and if the size is constant we can 1591 // use the dereferenceable attribute (which requires the size in 1592 // bytes). 1593 if (ArrTy->getSizeModifier() == ArrayType::Static) { 1594 QualType ETy = ArrTy->getElementType(); 1595 uint64_t ArrSize = ArrTy->getSize().getZExtValue(); 1596 if (!ETy->isIncompleteType() && ETy->isConstantSizeType() && 1597 ArrSize) { 1598 llvm::AttrBuilder Attrs; 1599 Attrs.addDereferenceableAttr( 1600 getContext().getTypeSizeInChars(ETy).getQuantity()*ArrSize); 1601 AI->addAttr(llvm::AttributeSet::get(getLLVMContext(), 1602 AI->getArgNo() + 1, Attrs)); 1603 } else if (getContext().getTargetAddressSpace(ETy) == 0) { 1604 AI->addAttr(llvm::AttributeSet::get(getLLVMContext(), 1605 AI->getArgNo() + 1, 1606 llvm::Attribute::NonNull)); 1607 } 1608 } 1609 } else if (const auto *ArrTy = 1610 getContext().getAsVariableArrayType(OTy)) { 1611 // For C99 VLAs with the static keyword, we don't know the size so 1612 // we can't use the dereferenceable attribute, but in addrspace(0) 1613 // we know that it must be nonnull. 1614 if (ArrTy->getSizeModifier() == VariableArrayType::Static && 1615 !getContext().getTargetAddressSpace(ArrTy->getElementType())) 1616 AI->addAttr(llvm::AttributeSet::get(getLLVMContext(), 1617 AI->getArgNo() + 1, 1618 llvm::Attribute::NonNull)); 1619 } 1620 } 1621 1622 if (Arg->getType().isRestrictQualified()) 1623 AI->addAttr(llvm::AttributeSet::get(getLLVMContext(), 1624 AI->getArgNo() + 1, 1625 llvm::Attribute::NoAlias)); 1626 1627 // Ensure the argument is the correct type. 1628 if (V->getType() != ArgI.getCoerceToType()) 1629 V = Builder.CreateBitCast(V, ArgI.getCoerceToType()); 1630 1631 if (isPromoted) 1632 V = emitArgumentDemotion(*this, Arg, V); 1633 1634 if (const CXXMethodDecl *MD = 1635 dyn_cast_or_null<CXXMethodDecl>(CurCodeDecl)) { 1636 if (MD->isVirtual() && Arg == CXXABIThisDecl) 1637 V = CGM.getCXXABI(). 1638 adjustThisParameterInVirtualFunctionPrologue(*this, CurGD, V); 1639 } 1640 1641 // Because of merging of function types from multiple decls it is 1642 // possible for the type of an argument to not match the corresponding 1643 // type in the function type. Since we are codegening the callee 1644 // in here, add a cast to the argument type. 1645 llvm::Type *LTy = ConvertType(Arg->getType()); 1646 if (V->getType() != LTy) 1647 V = Builder.CreateBitCast(V, LTy); 1648 1649 ArgVals.push_back(ValueAndIsPtr(V, HaveValue)); 1650 break; 1651 } 1652 1653 llvm::AllocaInst *Alloca = CreateMemTemp(Ty, Arg->getName()); 1654 1655 // The alignment we need to use is the max of the requested alignment for 1656 // the argument plus the alignment required by our access code below. 1657 unsigned AlignmentToUse = 1658 CGM.getDataLayout().getABITypeAlignment(ArgI.getCoerceToType()); 1659 AlignmentToUse = std::max(AlignmentToUse, 1660 (unsigned)getContext().getDeclAlign(Arg).getQuantity()); 1661 1662 Alloca->setAlignment(AlignmentToUse); 1663 llvm::Value *V = Alloca; 1664 llvm::Value *Ptr = V; // Pointer to store into. 1665 1666 // If the value is offset in memory, apply the offset now. 1667 if (unsigned Offs = ArgI.getDirectOffset()) { 1668 Ptr = Builder.CreateBitCast(Ptr, Builder.getInt8PtrTy()); 1669 Ptr = Builder.CreateConstGEP1_32(Ptr, Offs); 1670 Ptr = Builder.CreateBitCast(Ptr, 1671 llvm::PointerType::getUnqual(ArgI.getCoerceToType())); 1672 } 1673 1674 // If the coerce-to type is a first class aggregate, we flatten it and 1675 // pass the elements. Either way is semantically identical, but fast-isel 1676 // and the optimizer generally likes scalar values better than FCAs. 1677 // We cannot do this for functions using the AAPCS calling convention, 1678 // as structures are treated differently by that calling convention. 1679 llvm::StructType *STy = dyn_cast<llvm::StructType>(ArgI.getCoerceToType()); 1680 if (!isAAPCSVFP(FI, getTarget()) && STy && STy->getNumElements() > 1) { 1681 uint64_t SrcSize = CGM.getDataLayout().getTypeAllocSize(STy); 1682 llvm::Type *DstTy = 1683 cast<llvm::PointerType>(Ptr->getType())->getElementType(); 1684 uint64_t DstSize = CGM.getDataLayout().getTypeAllocSize(DstTy); 1685 1686 if (SrcSize <= DstSize) { 1687 Ptr = Builder.CreateBitCast(Ptr, llvm::PointerType::getUnqual(STy)); 1688 1689 assert(STy->getNumElements() == NumIRArgs); 1690 for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) { 1691 auto AI = FnArgs[FirstIRArg + i]; 1692 AI->setName(Arg->getName() + ".coerce" + Twine(i)); 1693 llvm::Value *EltPtr = Builder.CreateConstGEP2_32(Ptr, 0, i); 1694 Builder.CreateStore(AI, EltPtr); 1695 } 1696 } else { 1697 llvm::AllocaInst *TempAlloca = 1698 CreateTempAlloca(ArgI.getCoerceToType(), "coerce"); 1699 TempAlloca->setAlignment(AlignmentToUse); 1700 llvm::Value *TempV = TempAlloca; 1701 1702 assert(STy->getNumElements() == NumIRArgs); 1703 for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) { 1704 auto AI = FnArgs[FirstIRArg + i]; 1705 AI->setName(Arg->getName() + ".coerce" + Twine(i)); 1706 llvm::Value *EltPtr = Builder.CreateConstGEP2_32(TempV, 0, i); 1707 Builder.CreateStore(AI, EltPtr); 1708 } 1709 1710 Builder.CreateMemCpy(Ptr, TempV, DstSize, AlignmentToUse); 1711 } 1712 } else { 1713 // Simple case, just do a coerced store of the argument into the alloca. 1714 assert(NumIRArgs == 1); 1715 auto AI = FnArgs[FirstIRArg]; 1716 AI->setName(Arg->getName() + ".coerce"); 1717 CreateCoercedStore(AI, Ptr, /*DestIsVolatile=*/false, *this); 1718 } 1719 1720 1721 // Match to what EmitParmDecl is expecting for this type. 1722 if (CodeGenFunction::hasScalarEvaluationKind(Ty)) { 1723 V = EmitLoadOfScalar(V, false, AlignmentToUse, Ty, Arg->getLocStart()); 1724 if (isPromoted) 1725 V = emitArgumentDemotion(*this, Arg, V); 1726 ArgVals.push_back(ValueAndIsPtr(V, HaveValue)); 1727 } else { 1728 ArgVals.push_back(ValueAndIsPtr(V, HavePointer)); 1729 } 1730 break; 1731 } 1732 1733 case ABIArgInfo::Expand: { 1734 // If this structure was expanded into multiple arguments then 1735 // we need to create a temporary and reconstruct it from the 1736 // arguments. 1737 llvm::AllocaInst *Alloca = CreateMemTemp(Ty); 1738 CharUnits Align = getContext().getDeclAlign(Arg); 1739 Alloca->setAlignment(Align.getQuantity()); 1740 LValue LV = MakeAddrLValue(Alloca, Ty, Align); 1741 ArgVals.push_back(ValueAndIsPtr(Alloca, HavePointer)); 1742 1743 auto FnArgIter = FnArgs.begin() + FirstIRArg; 1744 ExpandTypeFromArgs(Ty, LV, FnArgIter); 1745 assert(FnArgIter == FnArgs.begin() + FirstIRArg + NumIRArgs); 1746 for (unsigned i = 0, e = NumIRArgs; i != e; ++i) { 1747 auto AI = FnArgs[FirstIRArg + i]; 1748 AI->setName(Arg->getName() + "." + Twine(i)); 1749 } 1750 break; 1751 } 1752 1753 case ABIArgInfo::Ignore: 1754 assert(NumIRArgs == 0); 1755 // Initialize the local variable appropriately. 1756 if (!hasScalarEvaluationKind(Ty)) { 1757 ArgVals.push_back(ValueAndIsPtr(CreateMemTemp(Ty), HavePointer)); 1758 } else { 1759 llvm::Value *U = llvm::UndefValue::get(ConvertType(Arg->getType())); 1760 ArgVals.push_back(ValueAndIsPtr(U, HaveValue)); 1761 } 1762 break; 1763 } 1764 } 1765 1766 if (getTarget().getCXXABI().areArgsDestroyedLeftToRightInCallee()) { 1767 for (int I = Args.size() - 1; I >= 0; --I) 1768 EmitParmDecl(*Args[I], ArgVals[I].getPointer(), ArgVals[I].getInt(), 1769 I + 1); 1770 } else { 1771 for (unsigned I = 0, E = Args.size(); I != E; ++I) 1772 EmitParmDecl(*Args[I], ArgVals[I].getPointer(), ArgVals[I].getInt(), 1773 I + 1); 1774 } 1775 } 1776 1777 static void eraseUnusedBitCasts(llvm::Instruction *insn) { 1778 while (insn->use_empty()) { 1779 llvm::BitCastInst *bitcast = dyn_cast<llvm::BitCastInst>(insn); 1780 if (!bitcast) return; 1781 1782 // This is "safe" because we would have used a ConstantExpr otherwise. 1783 insn = cast<llvm::Instruction>(bitcast->getOperand(0)); 1784 bitcast->eraseFromParent(); 1785 } 1786 } 1787 1788 /// Try to emit a fused autorelease of a return result. 1789 static llvm::Value *tryEmitFusedAutoreleaseOfResult(CodeGenFunction &CGF, 1790 llvm::Value *result) { 1791 // We must be immediately followed the cast. 1792 llvm::BasicBlock *BB = CGF.Builder.GetInsertBlock(); 1793 if (BB->empty()) return nullptr; 1794 if (&BB->back() != result) return nullptr; 1795 1796 llvm::Type *resultType = result->getType(); 1797 1798 // result is in a BasicBlock and is therefore an Instruction. 1799 llvm::Instruction *generator = cast<llvm::Instruction>(result); 1800 1801 SmallVector<llvm::Instruction*,4> insnsToKill; 1802 1803 // Look for: 1804 // %generator = bitcast %type1* %generator2 to %type2* 1805 while (llvm::BitCastInst *bitcast = dyn_cast<llvm::BitCastInst>(generator)) { 1806 // We would have emitted this as a constant if the operand weren't 1807 // an Instruction. 1808 generator = cast<llvm::Instruction>(bitcast->getOperand(0)); 1809 1810 // Require the generator to be immediately followed by the cast. 1811 if (generator->getNextNode() != bitcast) 1812 return nullptr; 1813 1814 insnsToKill.push_back(bitcast); 1815 } 1816 1817 // Look for: 1818 // %generator = call i8* @objc_retain(i8* %originalResult) 1819 // or 1820 // %generator = call i8* @objc_retainAutoreleasedReturnValue(i8* %originalResult) 1821 llvm::CallInst *call = dyn_cast<llvm::CallInst>(generator); 1822 if (!call) return nullptr; 1823 1824 bool doRetainAutorelease; 1825 1826 if (call->getCalledValue() == CGF.CGM.getARCEntrypoints().objc_retain) { 1827 doRetainAutorelease = true; 1828 } else if (call->getCalledValue() == CGF.CGM.getARCEntrypoints() 1829 .objc_retainAutoreleasedReturnValue) { 1830 doRetainAutorelease = false; 1831 1832 // If we emitted an assembly marker for this call (and the 1833 // ARCEntrypoints field should have been set if so), go looking 1834 // for that call. If we can't find it, we can't do this 1835 // optimization. But it should always be the immediately previous 1836 // instruction, unless we needed bitcasts around the call. 1837 if (CGF.CGM.getARCEntrypoints().retainAutoreleasedReturnValueMarker) { 1838 llvm::Instruction *prev = call->getPrevNode(); 1839 assert(prev); 1840 if (isa<llvm::BitCastInst>(prev)) { 1841 prev = prev->getPrevNode(); 1842 assert(prev); 1843 } 1844 assert(isa<llvm::CallInst>(prev)); 1845 assert(cast<llvm::CallInst>(prev)->getCalledValue() == 1846 CGF.CGM.getARCEntrypoints().retainAutoreleasedReturnValueMarker); 1847 insnsToKill.push_back(prev); 1848 } 1849 } else { 1850 return nullptr; 1851 } 1852 1853 result = call->getArgOperand(0); 1854 insnsToKill.push_back(call); 1855 1856 // Keep killing bitcasts, for sanity. Note that we no longer care 1857 // about precise ordering as long as there's exactly one use. 1858 while (llvm::BitCastInst *bitcast = dyn_cast<llvm::BitCastInst>(result)) { 1859 if (!bitcast->hasOneUse()) break; 1860 insnsToKill.push_back(bitcast); 1861 result = bitcast->getOperand(0); 1862 } 1863 1864 // Delete all the unnecessary instructions, from latest to earliest. 1865 for (SmallVectorImpl<llvm::Instruction*>::iterator 1866 i = insnsToKill.begin(), e = insnsToKill.end(); i != e; ++i) 1867 (*i)->eraseFromParent(); 1868 1869 // Do the fused retain/autorelease if we were asked to. 1870 if (doRetainAutorelease) 1871 result = CGF.EmitARCRetainAutoreleaseReturnValue(result); 1872 1873 // Cast back to the result type. 1874 return CGF.Builder.CreateBitCast(result, resultType); 1875 } 1876 1877 /// If this is a +1 of the value of an immutable 'self', remove it. 1878 static llvm::Value *tryRemoveRetainOfSelf(CodeGenFunction &CGF, 1879 llvm::Value *result) { 1880 // This is only applicable to a method with an immutable 'self'. 1881 const ObjCMethodDecl *method = 1882 dyn_cast_or_null<ObjCMethodDecl>(CGF.CurCodeDecl); 1883 if (!method) return nullptr; 1884 const VarDecl *self = method->getSelfDecl(); 1885 if (!self->getType().isConstQualified()) return nullptr; 1886 1887 // Look for a retain call. 1888 llvm::CallInst *retainCall = 1889 dyn_cast<llvm::CallInst>(result->stripPointerCasts()); 1890 if (!retainCall || 1891 retainCall->getCalledValue() != CGF.CGM.getARCEntrypoints().objc_retain) 1892 return nullptr; 1893 1894 // Look for an ordinary load of 'self'. 1895 llvm::Value *retainedValue = retainCall->getArgOperand(0); 1896 llvm::LoadInst *load = 1897 dyn_cast<llvm::LoadInst>(retainedValue->stripPointerCasts()); 1898 if (!load || load->isAtomic() || load->isVolatile() || 1899 load->getPointerOperand() != CGF.GetAddrOfLocalVar(self)) 1900 return nullptr; 1901 1902 // Okay! Burn it all down. This relies for correctness on the 1903 // assumption that the retain is emitted as part of the return and 1904 // that thereafter everything is used "linearly". 1905 llvm::Type *resultType = result->getType(); 1906 eraseUnusedBitCasts(cast<llvm::Instruction>(result)); 1907 assert(retainCall->use_empty()); 1908 retainCall->eraseFromParent(); 1909 eraseUnusedBitCasts(cast<llvm::Instruction>(retainedValue)); 1910 1911 return CGF.Builder.CreateBitCast(load, resultType); 1912 } 1913 1914 /// Emit an ARC autorelease of the result of a function. 1915 /// 1916 /// \return the value to actually return from the function 1917 static llvm::Value *emitAutoreleaseOfResult(CodeGenFunction &CGF, 1918 llvm::Value *result) { 1919 // If we're returning 'self', kill the initial retain. This is a 1920 // heuristic attempt to "encourage correctness" in the really unfortunate 1921 // case where we have a return of self during a dealloc and we desperately 1922 // need to avoid the possible autorelease. 1923 if (llvm::Value *self = tryRemoveRetainOfSelf(CGF, result)) 1924 return self; 1925 1926 // At -O0, try to emit a fused retain/autorelease. 1927 if (CGF.shouldUseFusedARCCalls()) 1928 if (llvm::Value *fused = tryEmitFusedAutoreleaseOfResult(CGF, result)) 1929 return fused; 1930 1931 return CGF.EmitARCAutoreleaseReturnValue(result); 1932 } 1933 1934 /// Heuristically search for a dominating store to the return-value slot. 1935 static llvm::StoreInst *findDominatingStoreToReturnValue(CodeGenFunction &CGF) { 1936 // If there are multiple uses of the return-value slot, just check 1937 // for something immediately preceding the IP. Sometimes this can 1938 // happen with how we generate implicit-returns; it can also happen 1939 // with noreturn cleanups. 1940 if (!CGF.ReturnValue->hasOneUse()) { 1941 llvm::BasicBlock *IP = CGF.Builder.GetInsertBlock(); 1942 if (IP->empty()) return nullptr; 1943 llvm::StoreInst *store = dyn_cast<llvm::StoreInst>(&IP->back()); 1944 if (!store) return nullptr; 1945 if (store->getPointerOperand() != CGF.ReturnValue) return nullptr; 1946 assert(!store->isAtomic() && !store->isVolatile()); // see below 1947 return store; 1948 } 1949 1950 llvm::StoreInst *store = 1951 dyn_cast<llvm::StoreInst>(CGF.ReturnValue->user_back()); 1952 if (!store) return nullptr; 1953 1954 // These aren't actually possible for non-coerced returns, and we 1955 // only care about non-coerced returns on this code path. 1956 assert(!store->isAtomic() && !store->isVolatile()); 1957 1958 // Now do a first-and-dirty dominance check: just walk up the 1959 // single-predecessors chain from the current insertion point. 1960 llvm::BasicBlock *StoreBB = store->getParent(); 1961 llvm::BasicBlock *IP = CGF.Builder.GetInsertBlock(); 1962 while (IP != StoreBB) { 1963 if (!(IP = IP->getSinglePredecessor())) 1964 return nullptr; 1965 } 1966 1967 // Okay, the store's basic block dominates the insertion point; we 1968 // can do our thing. 1969 return store; 1970 } 1971 1972 void CodeGenFunction::EmitFunctionEpilog(const CGFunctionInfo &FI, 1973 bool EmitRetDbgLoc, 1974 SourceLocation EndLoc) { 1975 // Functions with no result always return void. 1976 if (!ReturnValue) { 1977 Builder.CreateRetVoid(); 1978 return; 1979 } 1980 1981 llvm::DebugLoc RetDbgLoc; 1982 llvm::Value *RV = nullptr; 1983 QualType RetTy = FI.getReturnType(); 1984 const ABIArgInfo &RetAI = FI.getReturnInfo(); 1985 1986 switch (RetAI.getKind()) { 1987 case ABIArgInfo::InAlloca: 1988 // Aggregrates get evaluated directly into the destination. Sometimes we 1989 // need to return the sret value in a register, though. 1990 assert(hasAggregateEvaluationKind(RetTy)); 1991 if (RetAI.getInAllocaSRet()) { 1992 llvm::Function::arg_iterator EI = CurFn->arg_end(); 1993 --EI; 1994 llvm::Value *ArgStruct = EI; 1995 llvm::Value *SRet = 1996 Builder.CreateStructGEP(ArgStruct, RetAI.getInAllocaFieldIndex()); 1997 RV = Builder.CreateLoad(SRet, "sret"); 1998 } 1999 break; 2000 2001 case ABIArgInfo::Indirect: { 2002 auto AI = CurFn->arg_begin(); 2003 if (RetAI.isSRetAfterThis()) 2004 ++AI; 2005 switch (getEvaluationKind(RetTy)) { 2006 case TEK_Complex: { 2007 ComplexPairTy RT = 2008 EmitLoadOfComplex(MakeNaturalAlignAddrLValue(ReturnValue, RetTy), 2009 EndLoc); 2010 EmitStoreOfComplex(RT, MakeNaturalAlignAddrLValue(AI, RetTy), 2011 /*isInit*/ true); 2012 break; 2013 } 2014 case TEK_Aggregate: 2015 // Do nothing; aggregrates get evaluated directly into the destination. 2016 break; 2017 case TEK_Scalar: 2018 EmitStoreOfScalar(Builder.CreateLoad(ReturnValue), 2019 MakeNaturalAlignAddrLValue(AI, RetTy), 2020 /*isInit*/ true); 2021 break; 2022 } 2023 break; 2024 } 2025 2026 case ABIArgInfo::Extend: 2027 case ABIArgInfo::Direct: 2028 if (RetAI.getCoerceToType() == ConvertType(RetTy) && 2029 RetAI.getDirectOffset() == 0) { 2030 // The internal return value temp always will have pointer-to-return-type 2031 // type, just do a load. 2032 2033 // If there is a dominating store to ReturnValue, we can elide 2034 // the load, zap the store, and usually zap the alloca. 2035 if (llvm::StoreInst *SI = findDominatingStoreToReturnValue(*this)) { 2036 // Reuse the debug location from the store unless there is 2037 // cleanup code to be emitted between the store and return 2038 // instruction. 2039 if (EmitRetDbgLoc && !AutoreleaseResult) 2040 RetDbgLoc = SI->getDebugLoc(); 2041 // Get the stored value and nuke the now-dead store. 2042 RV = SI->getValueOperand(); 2043 SI->eraseFromParent(); 2044 2045 // If that was the only use of the return value, nuke it as well now. 2046 if (ReturnValue->use_empty() && isa<llvm::AllocaInst>(ReturnValue)) { 2047 cast<llvm::AllocaInst>(ReturnValue)->eraseFromParent(); 2048 ReturnValue = nullptr; 2049 } 2050 2051 // Otherwise, we have to do a simple load. 2052 } else { 2053 RV = Builder.CreateLoad(ReturnValue); 2054 } 2055 } else { 2056 llvm::Value *V = ReturnValue; 2057 // If the value is offset in memory, apply the offset now. 2058 if (unsigned Offs = RetAI.getDirectOffset()) { 2059 V = Builder.CreateBitCast(V, Builder.getInt8PtrTy()); 2060 V = Builder.CreateConstGEP1_32(V, Offs); 2061 V = Builder.CreateBitCast(V, 2062 llvm::PointerType::getUnqual(RetAI.getCoerceToType())); 2063 } 2064 2065 RV = CreateCoercedLoad(V, RetAI.getCoerceToType(), *this); 2066 } 2067 2068 // In ARC, end functions that return a retainable type with a call 2069 // to objc_autoreleaseReturnValue. 2070 if (AutoreleaseResult) { 2071 assert(getLangOpts().ObjCAutoRefCount && 2072 !FI.isReturnsRetained() && 2073 RetTy->isObjCRetainableType()); 2074 RV = emitAutoreleaseOfResult(*this, RV); 2075 } 2076 2077 break; 2078 2079 case ABIArgInfo::Ignore: 2080 break; 2081 2082 case ABIArgInfo::Expand: 2083 llvm_unreachable("Invalid ABI kind for return argument"); 2084 } 2085 2086 llvm::Instruction *Ret; 2087 if (RV) { 2088 if (SanOpts->ReturnsNonnullAttribute && 2089 CurGD.getDecl()->hasAttr<ReturnsNonNullAttr>()) { 2090 SanitizerScope SanScope(this); 2091 llvm::Value *Cond = 2092 Builder.CreateICmpNE(RV, llvm::Constant::getNullValue(RV->getType())); 2093 llvm::Constant *StaticData[] = { 2094 EmitCheckSourceLocation(EndLoc) 2095 }; 2096 EmitCheck(Cond, "nonnull_return", StaticData, ArrayRef<llvm::Value *>(), 2097 CRK_Recoverable); 2098 } 2099 Ret = Builder.CreateRet(RV); 2100 } else { 2101 Ret = Builder.CreateRetVoid(); 2102 } 2103 2104 if (!RetDbgLoc.isUnknown()) 2105 Ret->setDebugLoc(RetDbgLoc); 2106 } 2107 2108 static bool isInAllocaArgument(CGCXXABI &ABI, QualType type) { 2109 const CXXRecordDecl *RD = type->getAsCXXRecordDecl(); 2110 return RD && ABI.getRecordArgABI(RD) == CGCXXABI::RAA_DirectInMemory; 2111 } 2112 2113 static AggValueSlot createPlaceholderSlot(CodeGenFunction &CGF, QualType Ty) { 2114 // FIXME: Generate IR in one pass, rather than going back and fixing up these 2115 // placeholders. 2116 llvm::Type *IRTy = CGF.ConvertTypeForMem(Ty); 2117 llvm::Value *Placeholder = 2118 llvm::UndefValue::get(IRTy->getPointerTo()->getPointerTo()); 2119 Placeholder = CGF.Builder.CreateLoad(Placeholder); 2120 return AggValueSlot::forAddr(Placeholder, CharUnits::Zero(), 2121 Ty.getQualifiers(), 2122 AggValueSlot::IsNotDestructed, 2123 AggValueSlot::DoesNotNeedGCBarriers, 2124 AggValueSlot::IsNotAliased); 2125 } 2126 2127 void CodeGenFunction::EmitDelegateCallArg(CallArgList &args, 2128 const VarDecl *param, 2129 SourceLocation loc) { 2130 // StartFunction converted the ABI-lowered parameter(s) into a 2131 // local alloca. We need to turn that into an r-value suitable 2132 // for EmitCall. 2133 llvm::Value *local = GetAddrOfLocalVar(param); 2134 2135 QualType type = param->getType(); 2136 2137 // For the most part, we just need to load the alloca, except: 2138 // 1) aggregate r-values are actually pointers to temporaries, and 2139 // 2) references to non-scalars are pointers directly to the aggregate. 2140 // I don't know why references to scalars are different here. 2141 if (const ReferenceType *ref = type->getAs<ReferenceType>()) { 2142 if (!hasScalarEvaluationKind(ref->getPointeeType())) 2143 return args.add(RValue::getAggregate(local), type); 2144 2145 // Locals which are references to scalars are represented 2146 // with allocas holding the pointer. 2147 return args.add(RValue::get(Builder.CreateLoad(local)), type); 2148 } 2149 2150 assert(!isInAllocaArgument(CGM.getCXXABI(), type) && 2151 "cannot emit delegate call arguments for inalloca arguments!"); 2152 2153 args.add(convertTempToRValue(local, type, loc), type); 2154 } 2155 2156 static bool isProvablyNull(llvm::Value *addr) { 2157 return isa<llvm::ConstantPointerNull>(addr); 2158 } 2159 2160 static bool isProvablyNonNull(llvm::Value *addr) { 2161 return isa<llvm::AllocaInst>(addr); 2162 } 2163 2164 /// Emit the actual writing-back of a writeback. 2165 static void emitWriteback(CodeGenFunction &CGF, 2166 const CallArgList::Writeback &writeback) { 2167 const LValue &srcLV = writeback.Source; 2168 llvm::Value *srcAddr = srcLV.getAddress(); 2169 assert(!isProvablyNull(srcAddr) && 2170 "shouldn't have writeback for provably null argument"); 2171 2172 llvm::BasicBlock *contBB = nullptr; 2173 2174 // If the argument wasn't provably non-null, we need to null check 2175 // before doing the store. 2176 bool provablyNonNull = isProvablyNonNull(srcAddr); 2177 if (!provablyNonNull) { 2178 llvm::BasicBlock *writebackBB = CGF.createBasicBlock("icr.writeback"); 2179 contBB = CGF.createBasicBlock("icr.done"); 2180 2181 llvm::Value *isNull = CGF.Builder.CreateIsNull(srcAddr, "icr.isnull"); 2182 CGF.Builder.CreateCondBr(isNull, contBB, writebackBB); 2183 CGF.EmitBlock(writebackBB); 2184 } 2185 2186 // Load the value to writeback. 2187 llvm::Value *value = CGF.Builder.CreateLoad(writeback.Temporary); 2188 2189 // Cast it back, in case we're writing an id to a Foo* or something. 2190 value = CGF.Builder.CreateBitCast(value, 2191 cast<llvm::PointerType>(srcAddr->getType())->getElementType(), 2192 "icr.writeback-cast"); 2193 2194 // Perform the writeback. 2195 2196 // If we have a "to use" value, it's something we need to emit a use 2197 // of. This has to be carefully threaded in: if it's done after the 2198 // release it's potentially undefined behavior (and the optimizer 2199 // will ignore it), and if it happens before the retain then the 2200 // optimizer could move the release there. 2201 if (writeback.ToUse) { 2202 assert(srcLV.getObjCLifetime() == Qualifiers::OCL_Strong); 2203 2204 // Retain the new value. No need to block-copy here: the block's 2205 // being passed up the stack. 2206 value = CGF.EmitARCRetainNonBlock(value); 2207 2208 // Emit the intrinsic use here. 2209 CGF.EmitARCIntrinsicUse(writeback.ToUse); 2210 2211 // Load the old value (primitively). 2212 llvm::Value *oldValue = CGF.EmitLoadOfScalar(srcLV, SourceLocation()); 2213 2214 // Put the new value in place (primitively). 2215 CGF.EmitStoreOfScalar(value, srcLV, /*init*/ false); 2216 2217 // Release the old value. 2218 CGF.EmitARCRelease(oldValue, srcLV.isARCPreciseLifetime()); 2219 2220 // Otherwise, we can just do a normal lvalue store. 2221 } else { 2222 CGF.EmitStoreThroughLValue(RValue::get(value), srcLV); 2223 } 2224 2225 // Jump to the continuation block. 2226 if (!provablyNonNull) 2227 CGF.EmitBlock(contBB); 2228 } 2229 2230 static void emitWritebacks(CodeGenFunction &CGF, 2231 const CallArgList &args) { 2232 for (const auto &I : args.writebacks()) 2233 emitWriteback(CGF, I); 2234 } 2235 2236 static void deactivateArgCleanupsBeforeCall(CodeGenFunction &CGF, 2237 const CallArgList &CallArgs) { 2238 assert(CGF.getTarget().getCXXABI().areArgsDestroyedLeftToRightInCallee()); 2239 ArrayRef<CallArgList::CallArgCleanup> Cleanups = 2240 CallArgs.getCleanupsToDeactivate(); 2241 // Iterate in reverse to increase the likelihood of popping the cleanup. 2242 for (ArrayRef<CallArgList::CallArgCleanup>::reverse_iterator 2243 I = Cleanups.rbegin(), E = Cleanups.rend(); I != E; ++I) { 2244 CGF.DeactivateCleanupBlock(I->Cleanup, I->IsActiveIP); 2245 I->IsActiveIP->eraseFromParent(); 2246 } 2247 } 2248 2249 static const Expr *maybeGetUnaryAddrOfOperand(const Expr *E) { 2250 if (const UnaryOperator *uop = dyn_cast<UnaryOperator>(E->IgnoreParens())) 2251 if (uop->getOpcode() == UO_AddrOf) 2252 return uop->getSubExpr(); 2253 return nullptr; 2254 } 2255 2256 /// Emit an argument that's being passed call-by-writeback. That is, 2257 /// we are passing the address of 2258 static void emitWritebackArg(CodeGenFunction &CGF, CallArgList &args, 2259 const ObjCIndirectCopyRestoreExpr *CRE) { 2260 LValue srcLV; 2261 2262 // Make an optimistic effort to emit the address as an l-value. 2263 // This can fail if the the argument expression is more complicated. 2264 if (const Expr *lvExpr = maybeGetUnaryAddrOfOperand(CRE->getSubExpr())) { 2265 srcLV = CGF.EmitLValue(lvExpr); 2266 2267 // Otherwise, just emit it as a scalar. 2268 } else { 2269 llvm::Value *srcAddr = CGF.EmitScalarExpr(CRE->getSubExpr()); 2270 2271 QualType srcAddrType = 2272 CRE->getSubExpr()->getType()->castAs<PointerType>()->getPointeeType(); 2273 srcLV = CGF.MakeNaturalAlignAddrLValue(srcAddr, srcAddrType); 2274 } 2275 llvm::Value *srcAddr = srcLV.getAddress(); 2276 2277 // The dest and src types don't necessarily match in LLVM terms 2278 // because of the crazy ObjC compatibility rules. 2279 2280 llvm::PointerType *destType = 2281 cast<llvm::PointerType>(CGF.ConvertType(CRE->getType())); 2282 2283 // If the address is a constant null, just pass the appropriate null. 2284 if (isProvablyNull(srcAddr)) { 2285 args.add(RValue::get(llvm::ConstantPointerNull::get(destType)), 2286 CRE->getType()); 2287 return; 2288 } 2289 2290 // Create the temporary. 2291 llvm::Value *temp = CGF.CreateTempAlloca(destType->getElementType(), 2292 "icr.temp"); 2293 // Loading an l-value can introduce a cleanup if the l-value is __weak, 2294 // and that cleanup will be conditional if we can't prove that the l-value 2295 // isn't null, so we need to register a dominating point so that the cleanups 2296 // system will make valid IR. 2297 CodeGenFunction::ConditionalEvaluation condEval(CGF); 2298 2299 // Zero-initialize it if we're not doing a copy-initialization. 2300 bool shouldCopy = CRE->shouldCopy(); 2301 if (!shouldCopy) { 2302 llvm::Value *null = 2303 llvm::ConstantPointerNull::get( 2304 cast<llvm::PointerType>(destType->getElementType())); 2305 CGF.Builder.CreateStore(null, temp); 2306 } 2307 2308 llvm::BasicBlock *contBB = nullptr; 2309 llvm::BasicBlock *originBB = nullptr; 2310 2311 // If the address is *not* known to be non-null, we need to switch. 2312 llvm::Value *finalArgument; 2313 2314 bool provablyNonNull = isProvablyNonNull(srcAddr); 2315 if (provablyNonNull) { 2316 finalArgument = temp; 2317 } else { 2318 llvm::Value *isNull = CGF.Builder.CreateIsNull(srcAddr, "icr.isnull"); 2319 2320 finalArgument = CGF.Builder.CreateSelect(isNull, 2321 llvm::ConstantPointerNull::get(destType), 2322 temp, "icr.argument"); 2323 2324 // If we need to copy, then the load has to be conditional, which 2325 // means we need control flow. 2326 if (shouldCopy) { 2327 originBB = CGF.Builder.GetInsertBlock(); 2328 contBB = CGF.createBasicBlock("icr.cont"); 2329 llvm::BasicBlock *copyBB = CGF.createBasicBlock("icr.copy"); 2330 CGF.Builder.CreateCondBr(isNull, contBB, copyBB); 2331 CGF.EmitBlock(copyBB); 2332 condEval.begin(CGF); 2333 } 2334 } 2335 2336 llvm::Value *valueToUse = nullptr; 2337 2338 // Perform a copy if necessary. 2339 if (shouldCopy) { 2340 RValue srcRV = CGF.EmitLoadOfLValue(srcLV, SourceLocation()); 2341 assert(srcRV.isScalar()); 2342 2343 llvm::Value *src = srcRV.getScalarVal(); 2344 src = CGF.Builder.CreateBitCast(src, destType->getElementType(), 2345 "icr.cast"); 2346 2347 // Use an ordinary store, not a store-to-lvalue. 2348 CGF.Builder.CreateStore(src, temp); 2349 2350 // If optimization is enabled, and the value was held in a 2351 // __strong variable, we need to tell the optimizer that this 2352 // value has to stay alive until we're doing the store back. 2353 // This is because the temporary is effectively unretained, 2354 // and so otherwise we can violate the high-level semantics. 2355 if (CGF.CGM.getCodeGenOpts().OptimizationLevel != 0 && 2356 srcLV.getObjCLifetime() == Qualifiers::OCL_Strong) { 2357 valueToUse = src; 2358 } 2359 } 2360 2361 // Finish the control flow if we needed it. 2362 if (shouldCopy && !provablyNonNull) { 2363 llvm::BasicBlock *copyBB = CGF.Builder.GetInsertBlock(); 2364 CGF.EmitBlock(contBB); 2365 2366 // Make a phi for the value to intrinsically use. 2367 if (valueToUse) { 2368 llvm::PHINode *phiToUse = CGF.Builder.CreatePHI(valueToUse->getType(), 2, 2369 "icr.to-use"); 2370 phiToUse->addIncoming(valueToUse, copyBB); 2371 phiToUse->addIncoming(llvm::UndefValue::get(valueToUse->getType()), 2372 originBB); 2373 valueToUse = phiToUse; 2374 } 2375 2376 condEval.end(CGF); 2377 } 2378 2379 args.addWriteback(srcLV, temp, valueToUse); 2380 args.add(RValue::get(finalArgument), CRE->getType()); 2381 } 2382 2383 void CallArgList::allocateArgumentMemory(CodeGenFunction &CGF) { 2384 assert(!StackBase && !StackCleanup.isValid()); 2385 2386 // Save the stack. 2387 llvm::Function *F = CGF.CGM.getIntrinsic(llvm::Intrinsic::stacksave); 2388 StackBase = CGF.Builder.CreateCall(F, "inalloca.save"); 2389 2390 // Control gets really tied up in landing pads, so we have to spill the 2391 // stacksave to an alloca to avoid violating SSA form. 2392 // TODO: This is dead if we never emit the cleanup. We should create the 2393 // alloca and store lazily on the first cleanup emission. 2394 StackBaseMem = CGF.CreateTempAlloca(CGF.Int8PtrTy, "inalloca.spmem"); 2395 CGF.Builder.CreateStore(StackBase, StackBaseMem); 2396 CGF.pushStackRestore(EHCleanup, StackBaseMem); 2397 StackCleanup = CGF.EHStack.getInnermostEHScope(); 2398 assert(StackCleanup.isValid()); 2399 } 2400 2401 void CallArgList::freeArgumentMemory(CodeGenFunction &CGF) const { 2402 if (StackBase) { 2403 CGF.DeactivateCleanupBlock(StackCleanup, StackBase); 2404 llvm::Value *F = CGF.CGM.getIntrinsic(llvm::Intrinsic::stackrestore); 2405 // We could load StackBase from StackBaseMem, but in the non-exceptional 2406 // case we can skip it. 2407 CGF.Builder.CreateCall(F, StackBase); 2408 } 2409 } 2410 2411 void CodeGenFunction::EmitCallArgs(CallArgList &Args, 2412 ArrayRef<QualType> ArgTypes, 2413 CallExpr::const_arg_iterator ArgBeg, 2414 CallExpr::const_arg_iterator ArgEnd, 2415 bool ForceColumnInfo) { 2416 CGDebugInfo *DI = getDebugInfo(); 2417 SourceLocation CallLoc; 2418 if (DI) CallLoc = DI->getLocation(); 2419 2420 // We *have* to evaluate arguments from right to left in the MS C++ ABI, 2421 // because arguments are destroyed left to right in the callee. 2422 if (CGM.getTarget().getCXXABI().areArgsDestroyedLeftToRightInCallee()) { 2423 // Insert a stack save if we're going to need any inalloca args. 2424 bool HasInAllocaArgs = false; 2425 for (ArrayRef<QualType>::iterator I = ArgTypes.begin(), E = ArgTypes.end(); 2426 I != E && !HasInAllocaArgs; ++I) 2427 HasInAllocaArgs = isInAllocaArgument(CGM.getCXXABI(), *I); 2428 if (HasInAllocaArgs) { 2429 assert(getTarget().getTriple().getArch() == llvm::Triple::x86); 2430 Args.allocateArgumentMemory(*this); 2431 } 2432 2433 // Evaluate each argument. 2434 size_t CallArgsStart = Args.size(); 2435 for (int I = ArgTypes.size() - 1; I >= 0; --I) { 2436 CallExpr::const_arg_iterator Arg = ArgBeg + I; 2437 EmitCallArg(Args, *Arg, ArgTypes[I]); 2438 // Restore the debug location. 2439 if (DI) DI->EmitLocation(Builder, CallLoc, ForceColumnInfo); 2440 } 2441 2442 // Un-reverse the arguments we just evaluated so they match up with the LLVM 2443 // IR function. 2444 std::reverse(Args.begin() + CallArgsStart, Args.end()); 2445 return; 2446 } 2447 2448 for (unsigned I = 0, E = ArgTypes.size(); I != E; ++I) { 2449 CallExpr::const_arg_iterator Arg = ArgBeg + I; 2450 assert(Arg != ArgEnd); 2451 EmitCallArg(Args, *Arg, ArgTypes[I]); 2452 // Restore the debug location. 2453 if (DI) DI->EmitLocation(Builder, CallLoc, ForceColumnInfo); 2454 } 2455 } 2456 2457 namespace { 2458 2459 struct DestroyUnpassedArg : EHScopeStack::Cleanup { 2460 DestroyUnpassedArg(llvm::Value *Addr, QualType Ty) 2461 : Addr(Addr), Ty(Ty) {} 2462 2463 llvm::Value *Addr; 2464 QualType Ty; 2465 2466 void Emit(CodeGenFunction &CGF, Flags flags) override { 2467 const CXXDestructorDecl *Dtor = Ty->getAsCXXRecordDecl()->getDestructor(); 2468 assert(!Dtor->isTrivial()); 2469 CGF.EmitCXXDestructorCall(Dtor, Dtor_Complete, /*for vbase*/ false, 2470 /*Delegating=*/false, Addr); 2471 } 2472 }; 2473 2474 } 2475 2476 void CodeGenFunction::EmitCallArg(CallArgList &args, const Expr *E, 2477 QualType type) { 2478 if (const ObjCIndirectCopyRestoreExpr *CRE 2479 = dyn_cast<ObjCIndirectCopyRestoreExpr>(E)) { 2480 assert(getLangOpts().ObjCAutoRefCount); 2481 assert(getContext().hasSameType(E->getType(), type)); 2482 return emitWritebackArg(*this, args, CRE); 2483 } 2484 2485 assert(type->isReferenceType() == E->isGLValue() && 2486 "reference binding to unmaterialized r-value!"); 2487 2488 if (E->isGLValue()) { 2489 assert(E->getObjectKind() == OK_Ordinary); 2490 return args.add(EmitReferenceBindingToExpr(E), type); 2491 } 2492 2493 bool HasAggregateEvalKind = hasAggregateEvaluationKind(type); 2494 2495 // In the Microsoft C++ ABI, aggregate arguments are destructed by the callee. 2496 // However, we still have to push an EH-only cleanup in case we unwind before 2497 // we make it to the call. 2498 if (HasAggregateEvalKind && 2499 CGM.getTarget().getCXXABI().areArgsDestroyedLeftToRightInCallee()) { 2500 // If we're using inalloca, use the argument memory. Otherwise, use a 2501 // temporary. 2502 AggValueSlot Slot; 2503 if (args.isUsingInAlloca()) 2504 Slot = createPlaceholderSlot(*this, type); 2505 else 2506 Slot = CreateAggTemp(type, "agg.tmp"); 2507 2508 const CXXRecordDecl *RD = type->getAsCXXRecordDecl(); 2509 bool DestroyedInCallee = 2510 RD && RD->hasNonTrivialDestructor() && 2511 CGM.getCXXABI().getRecordArgABI(RD) != CGCXXABI::RAA_Default; 2512 if (DestroyedInCallee) 2513 Slot.setExternallyDestructed(); 2514 2515 EmitAggExpr(E, Slot); 2516 RValue RV = Slot.asRValue(); 2517 args.add(RV, type); 2518 2519 if (DestroyedInCallee) { 2520 // Create a no-op GEP between the placeholder and the cleanup so we can 2521 // RAUW it successfully. It also serves as a marker of the first 2522 // instruction where the cleanup is active. 2523 pushFullExprCleanup<DestroyUnpassedArg>(EHCleanup, Slot.getAddr(), type); 2524 // This unreachable is a temporary marker which will be removed later. 2525 llvm::Instruction *IsActive = Builder.CreateUnreachable(); 2526 args.addArgCleanupDeactivation(EHStack.getInnermostEHScope(), IsActive); 2527 } 2528 return; 2529 } 2530 2531 if (HasAggregateEvalKind && isa<ImplicitCastExpr>(E) && 2532 cast<CastExpr>(E)->getCastKind() == CK_LValueToRValue) { 2533 LValue L = EmitLValue(cast<CastExpr>(E)->getSubExpr()); 2534 assert(L.isSimple()); 2535 if (L.getAlignment() >= getContext().getTypeAlignInChars(type)) { 2536 args.add(L.asAggregateRValue(), type, /*NeedsCopy*/true); 2537 } else { 2538 // We can't represent a misaligned lvalue in the CallArgList, so copy 2539 // to an aligned temporary now. 2540 llvm::Value *tmp = CreateMemTemp(type); 2541 EmitAggregateCopy(tmp, L.getAddress(), type, L.isVolatile(), 2542 L.getAlignment()); 2543 args.add(RValue::getAggregate(tmp), type); 2544 } 2545 return; 2546 } 2547 2548 args.add(EmitAnyExprToTemp(E), type); 2549 } 2550 2551 // In ObjC ARC mode with no ObjC ARC exception safety, tell the ARC 2552 // optimizer it can aggressively ignore unwind edges. 2553 void 2554 CodeGenFunction::AddObjCARCExceptionMetadata(llvm::Instruction *Inst) { 2555 if (CGM.getCodeGenOpts().OptimizationLevel != 0 && 2556 !CGM.getCodeGenOpts().ObjCAutoRefCountExceptions) 2557 Inst->setMetadata("clang.arc.no_objc_arc_exceptions", 2558 CGM.getNoObjCARCExceptionsMetadata()); 2559 } 2560 2561 /// Emits a call to the given no-arguments nounwind runtime function. 2562 llvm::CallInst * 2563 CodeGenFunction::EmitNounwindRuntimeCall(llvm::Value *callee, 2564 const llvm::Twine &name) { 2565 return EmitNounwindRuntimeCall(callee, ArrayRef<llvm::Value*>(), name); 2566 } 2567 2568 /// Emits a call to the given nounwind runtime function. 2569 llvm::CallInst * 2570 CodeGenFunction::EmitNounwindRuntimeCall(llvm::Value *callee, 2571 ArrayRef<llvm::Value*> args, 2572 const llvm::Twine &name) { 2573 llvm::CallInst *call = EmitRuntimeCall(callee, args, name); 2574 call->setDoesNotThrow(); 2575 return call; 2576 } 2577 2578 /// Emits a simple call (never an invoke) to the given no-arguments 2579 /// runtime function. 2580 llvm::CallInst * 2581 CodeGenFunction::EmitRuntimeCall(llvm::Value *callee, 2582 const llvm::Twine &name) { 2583 return EmitRuntimeCall(callee, ArrayRef<llvm::Value*>(), name); 2584 } 2585 2586 /// Emits a simple call (never an invoke) to the given runtime 2587 /// function. 2588 llvm::CallInst * 2589 CodeGenFunction::EmitRuntimeCall(llvm::Value *callee, 2590 ArrayRef<llvm::Value*> args, 2591 const llvm::Twine &name) { 2592 llvm::CallInst *call = Builder.CreateCall(callee, args, name); 2593 call->setCallingConv(getRuntimeCC()); 2594 return call; 2595 } 2596 2597 /// Emits a call or invoke to the given noreturn runtime function. 2598 void CodeGenFunction::EmitNoreturnRuntimeCallOrInvoke(llvm::Value *callee, 2599 ArrayRef<llvm::Value*> args) { 2600 if (getInvokeDest()) { 2601 llvm::InvokeInst *invoke = 2602 Builder.CreateInvoke(callee, 2603 getUnreachableBlock(), 2604 getInvokeDest(), 2605 args); 2606 invoke->setDoesNotReturn(); 2607 invoke->setCallingConv(getRuntimeCC()); 2608 } else { 2609 llvm::CallInst *call = Builder.CreateCall(callee, args); 2610 call->setDoesNotReturn(); 2611 call->setCallingConv(getRuntimeCC()); 2612 Builder.CreateUnreachable(); 2613 } 2614 PGO.setCurrentRegionUnreachable(); 2615 } 2616 2617 /// Emits a call or invoke instruction to the given nullary runtime 2618 /// function. 2619 llvm::CallSite 2620 CodeGenFunction::EmitRuntimeCallOrInvoke(llvm::Value *callee, 2621 const Twine &name) { 2622 return EmitRuntimeCallOrInvoke(callee, ArrayRef<llvm::Value*>(), name); 2623 } 2624 2625 /// Emits a call or invoke instruction to the given runtime function. 2626 llvm::CallSite 2627 CodeGenFunction::EmitRuntimeCallOrInvoke(llvm::Value *callee, 2628 ArrayRef<llvm::Value*> args, 2629 const Twine &name) { 2630 llvm::CallSite callSite = EmitCallOrInvoke(callee, args, name); 2631 callSite.setCallingConv(getRuntimeCC()); 2632 return callSite; 2633 } 2634 2635 llvm::CallSite 2636 CodeGenFunction::EmitCallOrInvoke(llvm::Value *Callee, 2637 const Twine &Name) { 2638 return EmitCallOrInvoke(Callee, ArrayRef<llvm::Value *>(), Name); 2639 } 2640 2641 /// Emits a call or invoke instruction to the given function, depending 2642 /// on the current state of the EH stack. 2643 llvm::CallSite 2644 CodeGenFunction::EmitCallOrInvoke(llvm::Value *Callee, 2645 ArrayRef<llvm::Value *> Args, 2646 const Twine &Name) { 2647 llvm::BasicBlock *InvokeDest = getInvokeDest(); 2648 2649 llvm::Instruction *Inst; 2650 if (!InvokeDest) 2651 Inst = Builder.CreateCall(Callee, Args, Name); 2652 else { 2653 llvm::BasicBlock *ContBB = createBasicBlock("invoke.cont"); 2654 Inst = Builder.CreateInvoke(Callee, ContBB, InvokeDest, Args, Name); 2655 EmitBlock(ContBB); 2656 } 2657 2658 // In ObjC ARC mode with no ObjC ARC exception safety, tell the ARC 2659 // optimizer it can aggressively ignore unwind edges. 2660 if (CGM.getLangOpts().ObjCAutoRefCount) 2661 AddObjCARCExceptionMetadata(Inst); 2662 2663 return Inst; 2664 } 2665 2666 void CodeGenFunction::ExpandTypeToArgs( 2667 QualType Ty, RValue RV, llvm::FunctionType *IRFuncTy, 2668 SmallVectorImpl<llvm::Value *> &IRCallArgs, unsigned &IRCallArgPos) { 2669 if (const ConstantArrayType *AT = getContext().getAsConstantArrayType(Ty)) { 2670 unsigned NumElts = AT->getSize().getZExtValue(); 2671 QualType EltTy = AT->getElementType(); 2672 llvm::Value *Addr = RV.getAggregateAddr(); 2673 for (unsigned Elt = 0; Elt < NumElts; ++Elt) { 2674 llvm::Value *EltAddr = Builder.CreateConstGEP2_32(Addr, 0, Elt); 2675 RValue EltRV = convertTempToRValue(EltAddr, EltTy, SourceLocation()); 2676 ExpandTypeToArgs(EltTy, EltRV, IRFuncTy, IRCallArgs, IRCallArgPos); 2677 } 2678 } else if (const RecordType *RT = Ty->getAs<RecordType>()) { 2679 RecordDecl *RD = RT->getDecl(); 2680 assert(RV.isAggregate() && "Unexpected rvalue during struct expansion"); 2681 LValue LV = MakeAddrLValue(RV.getAggregateAddr(), Ty); 2682 2683 if (RD->isUnion()) { 2684 const FieldDecl *LargestFD = nullptr; 2685 CharUnits UnionSize = CharUnits::Zero(); 2686 2687 for (const auto *FD : RD->fields()) { 2688 assert(!FD->isBitField() && 2689 "Cannot expand structure with bit-field members."); 2690 CharUnits FieldSize = getContext().getTypeSizeInChars(FD->getType()); 2691 if (UnionSize < FieldSize) { 2692 UnionSize = FieldSize; 2693 LargestFD = FD; 2694 } 2695 } 2696 if (LargestFD) { 2697 RValue FldRV = EmitRValueForField(LV, LargestFD, SourceLocation()); 2698 ExpandTypeToArgs(LargestFD->getType(), FldRV, IRFuncTy, IRCallArgs, 2699 IRCallArgPos); 2700 } 2701 } else { 2702 for (const auto *FD : RD->fields()) { 2703 RValue FldRV = EmitRValueForField(LV, FD, SourceLocation()); 2704 ExpandTypeToArgs(FD->getType(), FldRV, IRFuncTy, IRCallArgs, IRCallArgPos); 2705 } 2706 } 2707 } else if (Ty->isAnyComplexType()) { 2708 ComplexPairTy CV = RV.getComplexVal(); 2709 IRCallArgs[IRCallArgPos++] = CV.first; 2710 IRCallArgs[IRCallArgPos++] = CV.second; 2711 } else { 2712 assert(RV.isScalar() && 2713 "Unexpected non-scalar rvalue during struct expansion."); 2714 2715 // Insert a bitcast as needed. 2716 llvm::Value *V = RV.getScalarVal(); 2717 if (IRCallArgPos < IRFuncTy->getNumParams() && 2718 V->getType() != IRFuncTy->getParamType(IRCallArgPos)) 2719 V = Builder.CreateBitCast(V, IRFuncTy->getParamType(IRCallArgPos)); 2720 2721 IRCallArgs[IRCallArgPos++] = V; 2722 } 2723 } 2724 2725 /// \brief Store a non-aggregate value to an address to initialize it. For 2726 /// initialization, a non-atomic store will be used. 2727 static void EmitInitStoreOfNonAggregate(CodeGenFunction &CGF, RValue Src, 2728 LValue Dst) { 2729 if (Src.isScalar()) 2730 CGF.EmitStoreOfScalar(Src.getScalarVal(), Dst, /*init=*/true); 2731 else 2732 CGF.EmitStoreOfComplex(Src.getComplexVal(), Dst, /*init=*/true); 2733 } 2734 2735 void CodeGenFunction::deferPlaceholderReplacement(llvm::Instruction *Old, 2736 llvm::Value *New) { 2737 DeferredReplacements.push_back(std::make_pair(Old, New)); 2738 } 2739 2740 RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo, 2741 llvm::Value *Callee, 2742 ReturnValueSlot ReturnValue, 2743 const CallArgList &CallArgs, 2744 const Decl *TargetDecl, 2745 llvm::Instruction **callOrInvoke) { 2746 // FIXME: We no longer need the types from CallArgs; lift up and simplify. 2747 2748 // Handle struct-return functions by passing a pointer to the 2749 // location that we would like to return into. 2750 QualType RetTy = CallInfo.getReturnType(); 2751 const ABIArgInfo &RetAI = CallInfo.getReturnInfo(); 2752 2753 llvm::FunctionType *IRFuncTy = 2754 cast<llvm::FunctionType>( 2755 cast<llvm::PointerType>(Callee->getType())->getElementType()); 2756 2757 // If we're using inalloca, insert the allocation after the stack save. 2758 // FIXME: Do this earlier rather than hacking it in here! 2759 llvm::Value *ArgMemory = nullptr; 2760 if (llvm::StructType *ArgStruct = CallInfo.getArgStruct()) { 2761 llvm::Instruction *IP = CallArgs.getStackBase(); 2762 llvm::AllocaInst *AI; 2763 if (IP) { 2764 IP = IP->getNextNode(); 2765 AI = new llvm::AllocaInst(ArgStruct, "argmem", IP); 2766 } else { 2767 AI = CreateTempAlloca(ArgStruct, "argmem"); 2768 } 2769 AI->setUsedWithInAlloca(true); 2770 assert(AI->isUsedWithInAlloca() && !AI->isStaticAlloca()); 2771 ArgMemory = AI; 2772 } 2773 2774 ClangToLLVMArgMapping IRFunctionArgs(CGM, CallInfo); 2775 SmallVector<llvm::Value *, 16> IRCallArgs(IRFunctionArgs.totalIRArgs()); 2776 2777 // If the call returns a temporary with struct return, create a temporary 2778 // alloca to hold the result, unless one is given to us. 2779 llvm::Value *SRetPtr = nullptr; 2780 if (RetAI.isIndirect() || RetAI.isInAlloca()) { 2781 SRetPtr = ReturnValue.getValue(); 2782 if (!SRetPtr) 2783 SRetPtr = CreateMemTemp(RetTy); 2784 if (IRFunctionArgs.hasSRetArg()) { 2785 IRCallArgs[IRFunctionArgs.getSRetArgNo()] = SRetPtr; 2786 } else { 2787 llvm::Value *Addr = 2788 Builder.CreateStructGEP(ArgMemory, RetAI.getInAllocaFieldIndex()); 2789 Builder.CreateStore(SRetPtr, Addr); 2790 } 2791 } 2792 2793 assert(CallInfo.arg_size() == CallArgs.size() && 2794 "Mismatch between function signature & arguments."); 2795 unsigned ArgNo = 0; 2796 CGFunctionInfo::const_arg_iterator info_it = CallInfo.arg_begin(); 2797 for (CallArgList::const_iterator I = CallArgs.begin(), E = CallArgs.end(); 2798 I != E; ++I, ++info_it, ++ArgNo) { 2799 const ABIArgInfo &ArgInfo = info_it->info; 2800 RValue RV = I->RV; 2801 2802 CharUnits TypeAlign = getContext().getTypeAlignInChars(I->Ty); 2803 2804 // Insert a padding argument to ensure proper alignment. 2805 if (IRFunctionArgs.hasPaddingArg(ArgNo)) 2806 IRCallArgs[IRFunctionArgs.getPaddingArgNo(ArgNo)] = 2807 llvm::UndefValue::get(ArgInfo.getPaddingType()); 2808 2809 unsigned FirstIRArg, NumIRArgs; 2810 std::tie(FirstIRArg, NumIRArgs) = IRFunctionArgs.getIRArgs(ArgNo); 2811 2812 switch (ArgInfo.getKind()) { 2813 case ABIArgInfo::InAlloca: { 2814 assert(NumIRArgs == 0); 2815 assert(getTarget().getTriple().getArch() == llvm::Triple::x86); 2816 if (RV.isAggregate()) { 2817 // Replace the placeholder with the appropriate argument slot GEP. 2818 llvm::Instruction *Placeholder = 2819 cast<llvm::Instruction>(RV.getAggregateAddr()); 2820 CGBuilderTy::InsertPoint IP = Builder.saveIP(); 2821 Builder.SetInsertPoint(Placeholder); 2822 llvm::Value *Addr = Builder.CreateStructGEP( 2823 ArgMemory, ArgInfo.getInAllocaFieldIndex()); 2824 Builder.restoreIP(IP); 2825 deferPlaceholderReplacement(Placeholder, Addr); 2826 } else { 2827 // Store the RValue into the argument struct. 2828 llvm::Value *Addr = 2829 Builder.CreateStructGEP(ArgMemory, ArgInfo.getInAllocaFieldIndex()); 2830 unsigned AS = Addr->getType()->getPointerAddressSpace(); 2831 llvm::Type *MemType = ConvertTypeForMem(I->Ty)->getPointerTo(AS); 2832 // There are some cases where a trivial bitcast is not avoidable. The 2833 // definition of a type later in a translation unit may change it's type 2834 // from {}* to (%struct.foo*)*. 2835 if (Addr->getType() != MemType) 2836 Addr = Builder.CreateBitCast(Addr, MemType); 2837 LValue argLV = MakeAddrLValue(Addr, I->Ty, TypeAlign); 2838 EmitInitStoreOfNonAggregate(*this, RV, argLV); 2839 } 2840 break; 2841 } 2842 2843 case ABIArgInfo::Indirect: { 2844 assert(NumIRArgs == 1); 2845 if (RV.isScalar() || RV.isComplex()) { 2846 // Make a temporary alloca to pass the argument. 2847 llvm::AllocaInst *AI = CreateMemTemp(I->Ty); 2848 if (ArgInfo.getIndirectAlign() > AI->getAlignment()) 2849 AI->setAlignment(ArgInfo.getIndirectAlign()); 2850 IRCallArgs[FirstIRArg] = AI; 2851 2852 LValue argLV = MakeAddrLValue(AI, I->Ty, TypeAlign); 2853 EmitInitStoreOfNonAggregate(*this, RV, argLV); 2854 } else { 2855 // We want to avoid creating an unnecessary temporary+copy here; 2856 // however, we need one in three cases: 2857 // 1. If the argument is not byval, and we are required to copy the 2858 // source. (This case doesn't occur on any common architecture.) 2859 // 2. If the argument is byval, RV is not sufficiently aligned, and 2860 // we cannot force it to be sufficiently aligned. 2861 // 3. If the argument is byval, but RV is located in an address space 2862 // different than that of the argument (0). 2863 llvm::Value *Addr = RV.getAggregateAddr(); 2864 unsigned Align = ArgInfo.getIndirectAlign(); 2865 const llvm::DataLayout *TD = &CGM.getDataLayout(); 2866 const unsigned RVAddrSpace = Addr->getType()->getPointerAddressSpace(); 2867 const unsigned ArgAddrSpace = 2868 (FirstIRArg < IRFuncTy->getNumParams() 2869 ? IRFuncTy->getParamType(FirstIRArg)->getPointerAddressSpace() 2870 : 0); 2871 if ((!ArgInfo.getIndirectByVal() && I->NeedsCopy) || 2872 (ArgInfo.getIndirectByVal() && TypeAlign.getQuantity() < Align && 2873 llvm::getOrEnforceKnownAlignment(Addr, Align, TD) < Align) || 2874 (ArgInfo.getIndirectByVal() && (RVAddrSpace != ArgAddrSpace))) { 2875 // Create an aligned temporary, and copy to it. 2876 llvm::AllocaInst *AI = CreateMemTemp(I->Ty); 2877 if (Align > AI->getAlignment()) 2878 AI->setAlignment(Align); 2879 IRCallArgs[FirstIRArg] = AI; 2880 EmitAggregateCopy(AI, Addr, I->Ty, RV.isVolatileQualified()); 2881 } else { 2882 // Skip the extra memcpy call. 2883 IRCallArgs[FirstIRArg] = Addr; 2884 } 2885 } 2886 break; 2887 } 2888 2889 case ABIArgInfo::Ignore: 2890 assert(NumIRArgs == 0); 2891 break; 2892 2893 case ABIArgInfo::Extend: 2894 case ABIArgInfo::Direct: { 2895 if (!isa<llvm::StructType>(ArgInfo.getCoerceToType()) && 2896 ArgInfo.getCoerceToType() == ConvertType(info_it->type) && 2897 ArgInfo.getDirectOffset() == 0) { 2898 assert(NumIRArgs == 1); 2899 llvm::Value *V; 2900 if (RV.isScalar()) 2901 V = RV.getScalarVal(); 2902 else 2903 V = Builder.CreateLoad(RV.getAggregateAddr()); 2904 2905 // If the argument doesn't match, perform a bitcast to coerce it. This 2906 // can happen due to trivial type mismatches. 2907 if (FirstIRArg < IRFuncTy->getNumParams() && 2908 V->getType() != IRFuncTy->getParamType(FirstIRArg)) 2909 V = Builder.CreateBitCast(V, IRFuncTy->getParamType(FirstIRArg)); 2910 IRCallArgs[FirstIRArg] = V; 2911 break; 2912 } 2913 2914 // FIXME: Avoid the conversion through memory if possible. 2915 llvm::Value *SrcPtr; 2916 if (RV.isScalar() || RV.isComplex()) { 2917 SrcPtr = CreateMemTemp(I->Ty, "coerce"); 2918 LValue SrcLV = MakeAddrLValue(SrcPtr, I->Ty, TypeAlign); 2919 EmitInitStoreOfNonAggregate(*this, RV, SrcLV); 2920 } else 2921 SrcPtr = RV.getAggregateAddr(); 2922 2923 // If the value is offset in memory, apply the offset now. 2924 if (unsigned Offs = ArgInfo.getDirectOffset()) { 2925 SrcPtr = Builder.CreateBitCast(SrcPtr, Builder.getInt8PtrTy()); 2926 SrcPtr = Builder.CreateConstGEP1_32(SrcPtr, Offs); 2927 SrcPtr = Builder.CreateBitCast(SrcPtr, 2928 llvm::PointerType::getUnqual(ArgInfo.getCoerceToType())); 2929 2930 } 2931 2932 // If the coerce-to type is a first class aggregate, we flatten it and 2933 // pass the elements. Either way is semantically identical, but fast-isel 2934 // and the optimizer generally likes scalar values better than FCAs. 2935 // We cannot do this for functions using the AAPCS calling convention, 2936 // as structures are treated differently by that calling convention. 2937 llvm::StructType *STy = 2938 dyn_cast<llvm::StructType>(ArgInfo.getCoerceToType()); 2939 if (STy && !isAAPCSVFP(CallInfo, getTarget())) { 2940 llvm::Type *SrcTy = 2941 cast<llvm::PointerType>(SrcPtr->getType())->getElementType(); 2942 uint64_t SrcSize = CGM.getDataLayout().getTypeAllocSize(SrcTy); 2943 uint64_t DstSize = CGM.getDataLayout().getTypeAllocSize(STy); 2944 2945 // If the source type is smaller than the destination type of the 2946 // coerce-to logic, copy the source value into a temp alloca the size 2947 // of the destination type to allow loading all of it. The bits past 2948 // the source value are left undef. 2949 if (SrcSize < DstSize) { 2950 llvm::AllocaInst *TempAlloca 2951 = CreateTempAlloca(STy, SrcPtr->getName() + ".coerce"); 2952 Builder.CreateMemCpy(TempAlloca, SrcPtr, SrcSize, 0); 2953 SrcPtr = TempAlloca; 2954 } else { 2955 SrcPtr = Builder.CreateBitCast(SrcPtr, 2956 llvm::PointerType::getUnqual(STy)); 2957 } 2958 2959 assert(NumIRArgs == STy->getNumElements()); 2960 for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) { 2961 llvm::Value *EltPtr = Builder.CreateConstGEP2_32(SrcPtr, 0, i); 2962 llvm::LoadInst *LI = Builder.CreateLoad(EltPtr); 2963 // We don't know what we're loading from. 2964 LI->setAlignment(1); 2965 IRCallArgs[FirstIRArg + i] = LI; 2966 } 2967 } else { 2968 // In the simple case, just pass the coerced loaded value. 2969 assert(NumIRArgs == 1); 2970 IRCallArgs[FirstIRArg] = 2971 CreateCoercedLoad(SrcPtr, ArgInfo.getCoerceToType(), *this); 2972 } 2973 2974 break; 2975 } 2976 2977 case ABIArgInfo::Expand: 2978 unsigned IRArgPos = FirstIRArg; 2979 ExpandTypeToArgs(I->Ty, RV, IRFuncTy, IRCallArgs, IRArgPos); 2980 assert(IRArgPos == FirstIRArg + NumIRArgs); 2981 break; 2982 } 2983 } 2984 2985 if (ArgMemory) { 2986 llvm::Value *Arg = ArgMemory; 2987 if (CallInfo.isVariadic()) { 2988 // When passing non-POD arguments by value to variadic functions, we will 2989 // end up with a variadic prototype and an inalloca call site. In such 2990 // cases, we can't do any parameter mismatch checks. Give up and bitcast 2991 // the callee. 2992 unsigned CalleeAS = 2993 cast<llvm::PointerType>(Callee->getType())->getAddressSpace(); 2994 Callee = Builder.CreateBitCast( 2995 Callee, getTypes().GetFunctionType(CallInfo)->getPointerTo(CalleeAS)); 2996 } else { 2997 llvm::Type *LastParamTy = 2998 IRFuncTy->getParamType(IRFuncTy->getNumParams() - 1); 2999 if (Arg->getType() != LastParamTy) { 3000 #ifndef NDEBUG 3001 // Assert that these structs have equivalent element types. 3002 llvm::StructType *FullTy = CallInfo.getArgStruct(); 3003 llvm::StructType *DeclaredTy = cast<llvm::StructType>( 3004 cast<llvm::PointerType>(LastParamTy)->getElementType()); 3005 assert(DeclaredTy->getNumElements() == FullTy->getNumElements()); 3006 for (llvm::StructType::element_iterator DI = DeclaredTy->element_begin(), 3007 DE = DeclaredTy->element_end(), 3008 FI = FullTy->element_begin(); 3009 DI != DE; ++DI, ++FI) 3010 assert(*DI == *FI); 3011 #endif 3012 Arg = Builder.CreateBitCast(Arg, LastParamTy); 3013 } 3014 } 3015 assert(IRFunctionArgs.hasInallocaArg()); 3016 IRCallArgs[IRFunctionArgs.getInallocaArgNo()] = Arg; 3017 } 3018 3019 if (!CallArgs.getCleanupsToDeactivate().empty()) 3020 deactivateArgCleanupsBeforeCall(*this, CallArgs); 3021 3022 // If the callee is a bitcast of a function to a varargs pointer to function 3023 // type, check to see if we can remove the bitcast. This handles some cases 3024 // with unprototyped functions. 3025 if (llvm::ConstantExpr *CE = dyn_cast<llvm::ConstantExpr>(Callee)) 3026 if (llvm::Function *CalleeF = dyn_cast<llvm::Function>(CE->getOperand(0))) { 3027 llvm::PointerType *CurPT=cast<llvm::PointerType>(Callee->getType()); 3028 llvm::FunctionType *CurFT = 3029 cast<llvm::FunctionType>(CurPT->getElementType()); 3030 llvm::FunctionType *ActualFT = CalleeF->getFunctionType(); 3031 3032 if (CE->getOpcode() == llvm::Instruction::BitCast && 3033 ActualFT->getReturnType() == CurFT->getReturnType() && 3034 ActualFT->getNumParams() == CurFT->getNumParams() && 3035 ActualFT->getNumParams() == IRCallArgs.size() && 3036 (CurFT->isVarArg() || !ActualFT->isVarArg())) { 3037 bool ArgsMatch = true; 3038 for (unsigned i = 0, e = ActualFT->getNumParams(); i != e; ++i) 3039 if (ActualFT->getParamType(i) != CurFT->getParamType(i)) { 3040 ArgsMatch = false; 3041 break; 3042 } 3043 3044 // Strip the cast if we can get away with it. This is a nice cleanup, 3045 // but also allows us to inline the function at -O0 if it is marked 3046 // always_inline. 3047 if (ArgsMatch) 3048 Callee = CalleeF; 3049 } 3050 } 3051 3052 assert(IRCallArgs.size() == IRFuncTy->getNumParams() || IRFuncTy->isVarArg()); 3053 for (unsigned i = 0; i < IRCallArgs.size(); ++i) { 3054 // Inalloca argument can have different type. 3055 if (IRFunctionArgs.hasInallocaArg() && 3056 i == IRFunctionArgs.getInallocaArgNo()) 3057 continue; 3058 if (i < IRFuncTy->getNumParams()) 3059 assert(IRCallArgs[i]->getType() == IRFuncTy->getParamType(i)); 3060 } 3061 3062 unsigned CallingConv; 3063 CodeGen::AttributeListType AttributeList; 3064 CGM.ConstructAttributeList(CallInfo, TargetDecl, AttributeList, 3065 CallingConv, true); 3066 llvm::AttributeSet Attrs = llvm::AttributeSet::get(getLLVMContext(), 3067 AttributeList); 3068 3069 llvm::BasicBlock *InvokeDest = nullptr; 3070 if (!Attrs.hasAttribute(llvm::AttributeSet::FunctionIndex, 3071 llvm::Attribute::NoUnwind)) 3072 InvokeDest = getInvokeDest(); 3073 3074 llvm::CallSite CS; 3075 if (!InvokeDest) { 3076 CS = Builder.CreateCall(Callee, IRCallArgs); 3077 } else { 3078 llvm::BasicBlock *Cont = createBasicBlock("invoke.cont"); 3079 CS = Builder.CreateInvoke(Callee, Cont, InvokeDest, IRCallArgs); 3080 EmitBlock(Cont); 3081 } 3082 if (callOrInvoke) 3083 *callOrInvoke = CS.getInstruction(); 3084 3085 if (CurCodeDecl && CurCodeDecl->hasAttr<FlattenAttr>() && 3086 !CS.hasFnAttr(llvm::Attribute::NoInline)) 3087 Attrs = 3088 Attrs.addAttribute(getLLVMContext(), llvm::AttributeSet::FunctionIndex, 3089 llvm::Attribute::AlwaysInline); 3090 3091 CS.setAttributes(Attrs); 3092 CS.setCallingConv(static_cast<llvm::CallingConv::ID>(CallingConv)); 3093 3094 // In ObjC ARC mode with no ObjC ARC exception safety, tell the ARC 3095 // optimizer it can aggressively ignore unwind edges. 3096 if (CGM.getLangOpts().ObjCAutoRefCount) 3097 AddObjCARCExceptionMetadata(CS.getInstruction()); 3098 3099 // If the call doesn't return, finish the basic block and clear the 3100 // insertion point; this allows the rest of IRgen to discard 3101 // unreachable code. 3102 if (CS.doesNotReturn()) { 3103 Builder.CreateUnreachable(); 3104 Builder.ClearInsertionPoint(); 3105 3106 // FIXME: For now, emit a dummy basic block because expr emitters in 3107 // generally are not ready to handle emitting expressions at unreachable 3108 // points. 3109 EnsureInsertPoint(); 3110 3111 // Return a reasonable RValue. 3112 return GetUndefRValue(RetTy); 3113 } 3114 3115 llvm::Instruction *CI = CS.getInstruction(); 3116 if (Builder.isNamePreserving() && !CI->getType()->isVoidTy()) 3117 CI->setName("call"); 3118 3119 // Emit any writebacks immediately. Arguably this should happen 3120 // after any return-value munging. 3121 if (CallArgs.hasWritebacks()) 3122 emitWritebacks(*this, CallArgs); 3123 3124 // The stack cleanup for inalloca arguments has to run out of the normal 3125 // lexical order, so deactivate it and run it manually here. 3126 CallArgs.freeArgumentMemory(*this); 3127 3128 switch (RetAI.getKind()) { 3129 case ABIArgInfo::InAlloca: 3130 case ABIArgInfo::Indirect: 3131 return convertTempToRValue(SRetPtr, RetTy, SourceLocation()); 3132 3133 case ABIArgInfo::Ignore: 3134 // If we are ignoring an argument that had a result, make sure to 3135 // construct the appropriate return value for our caller. 3136 return GetUndefRValue(RetTy); 3137 3138 case ABIArgInfo::Extend: 3139 case ABIArgInfo::Direct: { 3140 llvm::Type *RetIRTy = ConvertType(RetTy); 3141 if (RetAI.getCoerceToType() == RetIRTy && RetAI.getDirectOffset() == 0) { 3142 switch (getEvaluationKind(RetTy)) { 3143 case TEK_Complex: { 3144 llvm::Value *Real = Builder.CreateExtractValue(CI, 0); 3145 llvm::Value *Imag = Builder.CreateExtractValue(CI, 1); 3146 return RValue::getComplex(std::make_pair(Real, Imag)); 3147 } 3148 case TEK_Aggregate: { 3149 llvm::Value *DestPtr = ReturnValue.getValue(); 3150 bool DestIsVolatile = ReturnValue.isVolatile(); 3151 3152 if (!DestPtr) { 3153 DestPtr = CreateMemTemp(RetTy, "agg.tmp"); 3154 DestIsVolatile = false; 3155 } 3156 BuildAggStore(*this, CI, DestPtr, DestIsVolatile, false); 3157 return RValue::getAggregate(DestPtr); 3158 } 3159 case TEK_Scalar: { 3160 // If the argument doesn't match, perform a bitcast to coerce it. This 3161 // can happen due to trivial type mismatches. 3162 llvm::Value *V = CI; 3163 if (V->getType() != RetIRTy) 3164 V = Builder.CreateBitCast(V, RetIRTy); 3165 return RValue::get(V); 3166 } 3167 } 3168 llvm_unreachable("bad evaluation kind"); 3169 } 3170 3171 llvm::Value *DestPtr = ReturnValue.getValue(); 3172 bool DestIsVolatile = ReturnValue.isVolatile(); 3173 3174 if (!DestPtr) { 3175 DestPtr = CreateMemTemp(RetTy, "coerce"); 3176 DestIsVolatile = false; 3177 } 3178 3179 // If the value is offset in memory, apply the offset now. 3180 llvm::Value *StorePtr = DestPtr; 3181 if (unsigned Offs = RetAI.getDirectOffset()) { 3182 StorePtr = Builder.CreateBitCast(StorePtr, Builder.getInt8PtrTy()); 3183 StorePtr = Builder.CreateConstGEP1_32(StorePtr, Offs); 3184 StorePtr = Builder.CreateBitCast(StorePtr, 3185 llvm::PointerType::getUnqual(RetAI.getCoerceToType())); 3186 } 3187 CreateCoercedStore(CI, StorePtr, DestIsVolatile, *this); 3188 3189 return convertTempToRValue(DestPtr, RetTy, SourceLocation()); 3190 } 3191 3192 case ABIArgInfo::Expand: 3193 llvm_unreachable("Invalid ABI kind for return argument"); 3194 } 3195 3196 llvm_unreachable("Unhandled ABIArgInfo::Kind"); 3197 } 3198 3199 /* VarArg handling */ 3200 3201 llvm::Value *CodeGenFunction::EmitVAArg(llvm::Value *VAListAddr, QualType Ty) { 3202 return CGM.getTypes().getABIInfo().EmitVAArg(VAListAddr, Ty, *this); 3203 } 3204