1 //===--- CGCall.cpp - Encapsulate calling convention details --------------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // These classes wrap the information about a call or function 11 // definition used to handle ABI compliancy. 12 // 13 //===----------------------------------------------------------------------===// 14 15 #include "CGCall.h" 16 #include "ABIInfo.h" 17 #include "CGCXXABI.h" 18 #include "CodeGenFunction.h" 19 #include "CodeGenModule.h" 20 #include "TargetInfo.h" 21 #include "clang/AST/Decl.h" 22 #include "clang/AST/DeclCXX.h" 23 #include "clang/AST/DeclObjC.h" 24 #include "clang/Basic/TargetInfo.h" 25 #include "clang/CodeGen/CGFunctionInfo.h" 26 #include "clang/Frontend/CodeGenOptions.h" 27 #include "llvm/ADT/StringExtras.h" 28 #include "llvm/IR/Attributes.h" 29 #include "llvm/IR/DataLayout.h" 30 #include "llvm/IR/InlineAsm.h" 31 #include "llvm/IR/Intrinsics.h" 32 #include "llvm/Support/CallSite.h" 33 #include "llvm/Transforms/Utils/Local.h" 34 using namespace clang; 35 using namespace CodeGen; 36 37 /***/ 38 39 static unsigned ClangCallConvToLLVMCallConv(CallingConv CC) { 40 switch (CC) { 41 default: return llvm::CallingConv::C; 42 case CC_X86StdCall: return llvm::CallingConv::X86_StdCall; 43 case CC_X86FastCall: return llvm::CallingConv::X86_FastCall; 44 case CC_X86ThisCall: return llvm::CallingConv::X86_ThisCall; 45 case CC_X86_64Win64: return llvm::CallingConv::X86_64_Win64; 46 case CC_X86_64SysV: return llvm::CallingConv::X86_64_SysV; 47 case CC_AAPCS: return llvm::CallingConv::ARM_AAPCS; 48 case CC_AAPCS_VFP: return llvm::CallingConv::ARM_AAPCS_VFP; 49 case CC_IntelOclBicc: return llvm::CallingConv::Intel_OCL_BI; 50 // TODO: add support for CC_X86Pascal to llvm 51 } 52 } 53 54 /// Derives the 'this' type for codegen purposes, i.e. ignoring method 55 /// qualification. 56 /// FIXME: address space qualification? 57 static CanQualType GetThisType(ASTContext &Context, const CXXRecordDecl *RD) { 58 QualType RecTy = Context.getTagDeclType(RD)->getCanonicalTypeInternal(); 59 return Context.getPointerType(CanQualType::CreateUnsafe(RecTy)); 60 } 61 62 /// Returns the canonical formal type of the given C++ method. 63 static CanQual<FunctionProtoType> GetFormalType(const CXXMethodDecl *MD) { 64 return MD->getType()->getCanonicalTypeUnqualified() 65 .getAs<FunctionProtoType>(); 66 } 67 68 /// Returns the "extra-canonicalized" return type, which discards 69 /// qualifiers on the return type. Codegen doesn't care about them, 70 /// and it makes ABI code a little easier to be able to assume that 71 /// all parameter and return types are top-level unqualified. 72 static CanQualType GetReturnType(QualType RetTy) { 73 return RetTy->getCanonicalTypeUnqualified().getUnqualifiedType(); 74 } 75 76 /// Arrange the argument and result information for a value of the given 77 /// unprototyped freestanding function type. 78 const CGFunctionInfo & 79 CodeGenTypes::arrangeFreeFunctionType(CanQual<FunctionNoProtoType> FTNP) { 80 // When translating an unprototyped function type, always use a 81 // variadic type. 82 return arrangeLLVMFunctionInfo(FTNP->getReturnType().getUnqualifiedType(), 83 false, None, FTNP->getExtInfo(), 84 RequiredArgs(0)); 85 } 86 87 /// Arrange the LLVM function layout for a value of the given function 88 /// type, on top of any implicit parameters already stored. Use the 89 /// given ExtInfo instead of the ExtInfo from the function type. 90 static const CGFunctionInfo &arrangeLLVMFunctionInfo(CodeGenTypes &CGT, 91 bool IsInstanceMethod, 92 SmallVectorImpl<CanQualType> &prefix, 93 CanQual<FunctionProtoType> FTP, 94 FunctionType::ExtInfo extInfo) { 95 RequiredArgs required = RequiredArgs::forPrototypePlus(FTP, prefix.size()); 96 // FIXME: Kill copy. 97 for (unsigned i = 0, e = FTP->getNumParams(); i != e; ++i) 98 prefix.push_back(FTP->getParamType(i)); 99 CanQualType resultType = FTP->getReturnType().getUnqualifiedType(); 100 return CGT.arrangeLLVMFunctionInfo(resultType, IsInstanceMethod, prefix, 101 extInfo, required); 102 } 103 104 /// Arrange the argument and result information for a free function (i.e. 105 /// not a C++ or ObjC instance method) of the given type. 106 static const CGFunctionInfo &arrangeFreeFunctionType(CodeGenTypes &CGT, 107 SmallVectorImpl<CanQualType> &prefix, 108 CanQual<FunctionProtoType> FTP) { 109 return arrangeLLVMFunctionInfo(CGT, false, prefix, FTP, FTP->getExtInfo()); 110 } 111 112 /// Arrange the argument and result information for a free function (i.e. 113 /// not a C++ or ObjC instance method) of the given type. 114 static const CGFunctionInfo &arrangeCXXMethodType(CodeGenTypes &CGT, 115 SmallVectorImpl<CanQualType> &prefix, 116 CanQual<FunctionProtoType> FTP) { 117 FunctionType::ExtInfo extInfo = FTP->getExtInfo(); 118 return arrangeLLVMFunctionInfo(CGT, true, prefix, FTP, extInfo); 119 } 120 121 /// Arrange the argument and result information for a value of the 122 /// given freestanding function type. 123 const CGFunctionInfo & 124 CodeGenTypes::arrangeFreeFunctionType(CanQual<FunctionProtoType> FTP) { 125 SmallVector<CanQualType, 16> argTypes; 126 return ::arrangeFreeFunctionType(*this, argTypes, FTP); 127 } 128 129 static CallingConv getCallingConventionForDecl(const Decl *D, bool IsWindows) { 130 // Set the appropriate calling convention for the Function. 131 if (D->hasAttr<StdCallAttr>()) 132 return CC_X86StdCall; 133 134 if (D->hasAttr<FastCallAttr>()) 135 return CC_X86FastCall; 136 137 if (D->hasAttr<ThisCallAttr>()) 138 return CC_X86ThisCall; 139 140 if (D->hasAttr<PascalAttr>()) 141 return CC_X86Pascal; 142 143 if (PcsAttr *PCS = D->getAttr<PcsAttr>()) 144 return (PCS->getPCS() == PcsAttr::AAPCS ? CC_AAPCS : CC_AAPCS_VFP); 145 146 if (D->hasAttr<PnaclCallAttr>()) 147 return CC_PnaclCall; 148 149 if (D->hasAttr<IntelOclBiccAttr>()) 150 return CC_IntelOclBicc; 151 152 if (D->hasAttr<MSABIAttr>()) 153 return IsWindows ? CC_C : CC_X86_64Win64; 154 155 if (D->hasAttr<SysVABIAttr>()) 156 return IsWindows ? CC_X86_64SysV : CC_C; 157 158 return CC_C; 159 } 160 161 /// Arrange the argument and result information for a call to an 162 /// unknown C++ non-static member function of the given abstract type. 163 /// (Zero value of RD means we don't have any meaningful "this" argument type, 164 /// so fall back to a generic pointer type). 165 /// The member function must be an ordinary function, i.e. not a 166 /// constructor or destructor. 167 const CGFunctionInfo & 168 CodeGenTypes::arrangeCXXMethodType(const CXXRecordDecl *RD, 169 const FunctionProtoType *FTP) { 170 SmallVector<CanQualType, 16> argTypes; 171 172 // Add the 'this' pointer. 173 if (RD) 174 argTypes.push_back(GetThisType(Context, RD)); 175 else 176 argTypes.push_back(Context.VoidPtrTy); 177 178 return ::arrangeCXXMethodType(*this, argTypes, 179 FTP->getCanonicalTypeUnqualified().getAs<FunctionProtoType>()); 180 } 181 182 /// Arrange the argument and result information for a declaration or 183 /// definition of the given C++ non-static member function. The 184 /// member function must be an ordinary function, i.e. not a 185 /// constructor or destructor. 186 const CGFunctionInfo & 187 CodeGenTypes::arrangeCXXMethodDeclaration(const CXXMethodDecl *MD) { 188 assert(!isa<CXXConstructorDecl>(MD) && "wrong method for constructors!"); 189 assert(!isa<CXXDestructorDecl>(MD) && "wrong method for destructors!"); 190 191 CanQual<FunctionProtoType> prototype = GetFormalType(MD); 192 193 if (MD->isInstance()) { 194 // The abstract case is perfectly fine. 195 const CXXRecordDecl *ThisType = TheCXXABI.getThisArgumentTypeForMethod(MD); 196 return arrangeCXXMethodType(ThisType, prototype.getTypePtr()); 197 } 198 199 return arrangeFreeFunctionType(prototype); 200 } 201 202 /// Arrange the argument and result information for a declaration 203 /// or definition to the given constructor variant. 204 const CGFunctionInfo & 205 CodeGenTypes::arrangeCXXConstructorDeclaration(const CXXConstructorDecl *D, 206 CXXCtorType ctorKind) { 207 SmallVector<CanQualType, 16> argTypes; 208 argTypes.push_back(GetThisType(Context, D->getParent())); 209 210 GlobalDecl GD(D, ctorKind); 211 CanQualType resultType = 212 TheCXXABI.HasThisReturn(GD) ? argTypes.front() : Context.VoidTy; 213 214 CanQual<FunctionProtoType> FTP = GetFormalType(D); 215 216 // Add the formal parameters. 217 for (unsigned i = 0, e = FTP->getNumParams(); i != e; ++i) 218 argTypes.push_back(FTP->getParamType(i)); 219 220 TheCXXABI.BuildConstructorSignature(D, ctorKind, resultType, argTypes); 221 222 RequiredArgs required = 223 (D->isVariadic() ? RequiredArgs(argTypes.size()) : RequiredArgs::All); 224 225 FunctionType::ExtInfo extInfo = FTP->getExtInfo(); 226 return arrangeLLVMFunctionInfo(resultType, true, argTypes, extInfo, required); 227 } 228 229 /// Arrange a call to a C++ method, passing the given arguments. 230 const CGFunctionInfo & 231 CodeGenTypes::arrangeCXXConstructorCall(const CallArgList &args, 232 const CXXConstructorDecl *D, 233 CXXCtorType CtorKind, 234 unsigned ExtraArgs) { 235 // FIXME: Kill copy. 236 SmallVector<CanQualType, 16> ArgTypes; 237 for (CallArgList::const_iterator i = args.begin(), e = args.end(); i != e; 238 ++i) 239 ArgTypes.push_back(Context.getCanonicalParamType(i->Ty)); 240 241 CanQual<FunctionProtoType> FPT = GetFormalType(D); 242 RequiredArgs Required = RequiredArgs::forPrototypePlus(FPT, 1 + ExtraArgs); 243 GlobalDecl GD(D, CtorKind); 244 CanQualType ResultType = 245 TheCXXABI.HasThisReturn(GD) ? ArgTypes.front() : Context.VoidTy; 246 247 FunctionType::ExtInfo Info = FPT->getExtInfo(); 248 return arrangeLLVMFunctionInfo(ResultType, true, ArgTypes, Info, Required); 249 } 250 251 /// Arrange the argument and result information for a declaration, 252 /// definition, or call to the given destructor variant. It so 253 /// happens that all three cases produce the same information. 254 const CGFunctionInfo & 255 CodeGenTypes::arrangeCXXDestructor(const CXXDestructorDecl *D, 256 CXXDtorType dtorKind) { 257 SmallVector<CanQualType, 2> argTypes; 258 argTypes.push_back(GetThisType(Context, D->getParent())); 259 260 GlobalDecl GD(D, dtorKind); 261 CanQualType resultType = 262 TheCXXABI.HasThisReturn(GD) ? argTypes.front() : Context.VoidTy; 263 264 TheCXXABI.BuildDestructorSignature(D, dtorKind, resultType, argTypes); 265 266 CanQual<FunctionProtoType> FTP = GetFormalType(D); 267 assert(FTP->getNumParams() == 0 && "dtor with formal parameters"); 268 assert(FTP->isVariadic() == 0 && "dtor with formal parameters"); 269 270 FunctionType::ExtInfo extInfo = FTP->getExtInfo(); 271 return arrangeLLVMFunctionInfo(resultType, true, argTypes, extInfo, 272 RequiredArgs::All); 273 } 274 275 /// Arrange the argument and result information for the declaration or 276 /// definition of the given function. 277 const CGFunctionInfo & 278 CodeGenTypes::arrangeFunctionDeclaration(const FunctionDecl *FD) { 279 if (const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(FD)) 280 if (MD->isInstance()) 281 return arrangeCXXMethodDeclaration(MD); 282 283 CanQualType FTy = FD->getType()->getCanonicalTypeUnqualified(); 284 285 assert(isa<FunctionType>(FTy)); 286 287 // When declaring a function without a prototype, always use a 288 // non-variadic type. 289 if (isa<FunctionNoProtoType>(FTy)) { 290 CanQual<FunctionNoProtoType> noProto = FTy.getAs<FunctionNoProtoType>(); 291 return arrangeLLVMFunctionInfo(noProto->getReturnType(), false, None, 292 noProto->getExtInfo(), RequiredArgs::All); 293 } 294 295 assert(isa<FunctionProtoType>(FTy)); 296 return arrangeFreeFunctionType(FTy.getAs<FunctionProtoType>()); 297 } 298 299 /// Arrange the argument and result information for the declaration or 300 /// definition of an Objective-C method. 301 const CGFunctionInfo & 302 CodeGenTypes::arrangeObjCMethodDeclaration(const ObjCMethodDecl *MD) { 303 // It happens that this is the same as a call with no optional 304 // arguments, except also using the formal 'self' type. 305 return arrangeObjCMessageSendSignature(MD, MD->getSelfDecl()->getType()); 306 } 307 308 /// Arrange the argument and result information for the function type 309 /// through which to perform a send to the given Objective-C method, 310 /// using the given receiver type. The receiver type is not always 311 /// the 'self' type of the method or even an Objective-C pointer type. 312 /// This is *not* the right method for actually performing such a 313 /// message send, due to the possibility of optional arguments. 314 const CGFunctionInfo & 315 CodeGenTypes::arrangeObjCMessageSendSignature(const ObjCMethodDecl *MD, 316 QualType receiverType) { 317 SmallVector<CanQualType, 16> argTys; 318 argTys.push_back(Context.getCanonicalParamType(receiverType)); 319 argTys.push_back(Context.getCanonicalParamType(Context.getObjCSelType())); 320 // FIXME: Kill copy? 321 for (ObjCMethodDecl::param_const_iterator i = MD->param_begin(), 322 e = MD->param_end(); i != e; ++i) { 323 argTys.push_back(Context.getCanonicalParamType((*i)->getType())); 324 } 325 326 FunctionType::ExtInfo einfo; 327 bool IsWindows = getContext().getTargetInfo().getTriple().isOSWindows(); 328 einfo = einfo.withCallingConv(getCallingConventionForDecl(MD, IsWindows)); 329 330 if (getContext().getLangOpts().ObjCAutoRefCount && 331 MD->hasAttr<NSReturnsRetainedAttr>()) 332 einfo = einfo.withProducesResult(true); 333 334 RequiredArgs required = 335 (MD->isVariadic() ? RequiredArgs(argTys.size()) : RequiredArgs::All); 336 337 return arrangeLLVMFunctionInfo(GetReturnType(MD->getReturnType()), false, 338 argTys, einfo, required); 339 } 340 341 const CGFunctionInfo & 342 CodeGenTypes::arrangeGlobalDeclaration(GlobalDecl GD) { 343 // FIXME: Do we need to handle ObjCMethodDecl? 344 const FunctionDecl *FD = cast<FunctionDecl>(GD.getDecl()); 345 346 if (const CXXConstructorDecl *CD = dyn_cast<CXXConstructorDecl>(FD)) 347 return arrangeCXXConstructorDeclaration(CD, GD.getCtorType()); 348 349 if (const CXXDestructorDecl *DD = dyn_cast<CXXDestructorDecl>(FD)) 350 return arrangeCXXDestructor(DD, GD.getDtorType()); 351 352 return arrangeFunctionDeclaration(FD); 353 } 354 355 /// Arrange a call as unto a free function, except possibly with an 356 /// additional number of formal parameters considered required. 357 static const CGFunctionInfo & 358 arrangeFreeFunctionLikeCall(CodeGenTypes &CGT, 359 CodeGenModule &CGM, 360 const CallArgList &args, 361 const FunctionType *fnType, 362 unsigned numExtraRequiredArgs) { 363 assert(args.size() >= numExtraRequiredArgs); 364 365 // In most cases, there are no optional arguments. 366 RequiredArgs required = RequiredArgs::All; 367 368 // If we have a variadic prototype, the required arguments are the 369 // extra prefix plus the arguments in the prototype. 370 if (const FunctionProtoType *proto = dyn_cast<FunctionProtoType>(fnType)) { 371 if (proto->isVariadic()) 372 required = RequiredArgs(proto->getNumParams() + numExtraRequiredArgs); 373 374 // If we don't have a prototype at all, but we're supposed to 375 // explicitly use the variadic convention for unprototyped calls, 376 // treat all of the arguments as required but preserve the nominal 377 // possibility of variadics. 378 } else if (CGM.getTargetCodeGenInfo() 379 .isNoProtoCallVariadic(args, 380 cast<FunctionNoProtoType>(fnType))) { 381 required = RequiredArgs(args.size()); 382 } 383 384 return CGT.arrangeFreeFunctionCall(fnType->getReturnType(), args, 385 fnType->getExtInfo(), required); 386 } 387 388 /// Figure out the rules for calling a function with the given formal 389 /// type using the given arguments. The arguments are necessary 390 /// because the function might be unprototyped, in which case it's 391 /// target-dependent in crazy ways. 392 const CGFunctionInfo & 393 CodeGenTypes::arrangeFreeFunctionCall(const CallArgList &args, 394 const FunctionType *fnType) { 395 return arrangeFreeFunctionLikeCall(*this, CGM, args, fnType, 0); 396 } 397 398 /// A block function call is essentially a free-function call with an 399 /// extra implicit argument. 400 const CGFunctionInfo & 401 CodeGenTypes::arrangeBlockFunctionCall(const CallArgList &args, 402 const FunctionType *fnType) { 403 return arrangeFreeFunctionLikeCall(*this, CGM, args, fnType, 1); 404 } 405 406 const CGFunctionInfo & 407 CodeGenTypes::arrangeFreeFunctionCall(QualType resultType, 408 const CallArgList &args, 409 FunctionType::ExtInfo info, 410 RequiredArgs required) { 411 // FIXME: Kill copy. 412 SmallVector<CanQualType, 16> argTypes; 413 for (CallArgList::const_iterator i = args.begin(), e = args.end(); 414 i != e; ++i) 415 argTypes.push_back(Context.getCanonicalParamType(i->Ty)); 416 return arrangeLLVMFunctionInfo(GetReturnType(resultType), false, argTypes, 417 info, required); 418 } 419 420 /// Arrange a call to a C++ method, passing the given arguments. 421 const CGFunctionInfo & 422 CodeGenTypes::arrangeCXXMethodCall(const CallArgList &args, 423 const FunctionProtoType *FPT, 424 RequiredArgs required) { 425 // FIXME: Kill copy. 426 SmallVector<CanQualType, 16> argTypes; 427 for (CallArgList::const_iterator i = args.begin(), e = args.end(); 428 i != e; ++i) 429 argTypes.push_back(Context.getCanonicalParamType(i->Ty)); 430 431 FunctionType::ExtInfo info = FPT->getExtInfo(); 432 return arrangeLLVMFunctionInfo(GetReturnType(FPT->getReturnType()), true, 433 argTypes, info, required); 434 } 435 436 const CGFunctionInfo &CodeGenTypes::arrangeFreeFunctionDeclaration( 437 QualType resultType, const FunctionArgList &args, 438 const FunctionType::ExtInfo &info, bool isVariadic) { 439 // FIXME: Kill copy. 440 SmallVector<CanQualType, 16> argTypes; 441 for (FunctionArgList::const_iterator i = args.begin(), e = args.end(); 442 i != e; ++i) 443 argTypes.push_back(Context.getCanonicalParamType((*i)->getType())); 444 445 RequiredArgs required = 446 (isVariadic ? RequiredArgs(args.size()) : RequiredArgs::All); 447 return arrangeLLVMFunctionInfo(GetReturnType(resultType), false, argTypes, info, 448 required); 449 } 450 451 const CGFunctionInfo &CodeGenTypes::arrangeNullaryFunction() { 452 return arrangeLLVMFunctionInfo(getContext().VoidTy, false, None, 453 FunctionType::ExtInfo(), RequiredArgs::All); 454 } 455 456 /// Arrange the argument and result information for an abstract value 457 /// of a given function type. This is the method which all of the 458 /// above functions ultimately defer to. 459 const CGFunctionInfo & 460 CodeGenTypes::arrangeLLVMFunctionInfo(CanQualType resultType, 461 bool IsInstanceMethod, 462 ArrayRef<CanQualType> argTypes, 463 FunctionType::ExtInfo info, 464 RequiredArgs required) { 465 #ifndef NDEBUG 466 for (ArrayRef<CanQualType>::const_iterator 467 I = argTypes.begin(), E = argTypes.end(); I != E; ++I) 468 assert(I->isCanonicalAsParam()); 469 #endif 470 471 unsigned CC = ClangCallConvToLLVMCallConv(info.getCC()); 472 473 // Lookup or create unique function info. 474 llvm::FoldingSetNodeID ID; 475 CGFunctionInfo::Profile(ID, IsInstanceMethod, info, required, resultType, 476 argTypes); 477 478 void *insertPos = 0; 479 CGFunctionInfo *FI = FunctionInfos.FindNodeOrInsertPos(ID, insertPos); 480 if (FI) 481 return *FI; 482 483 // Construct the function info. We co-allocate the ArgInfos. 484 FI = CGFunctionInfo::create(CC, IsInstanceMethod, info, resultType, argTypes, 485 required); 486 FunctionInfos.InsertNode(FI, insertPos); 487 488 bool inserted = FunctionsBeingProcessed.insert(FI); (void)inserted; 489 assert(inserted && "Recursively being processed?"); 490 491 // Compute ABI information. 492 getABIInfo().computeInfo(*FI); 493 494 // Loop over all of the computed argument and return value info. If any of 495 // them are direct or extend without a specified coerce type, specify the 496 // default now. 497 ABIArgInfo &retInfo = FI->getReturnInfo(); 498 if (retInfo.canHaveCoerceToType() && retInfo.getCoerceToType() == 0) 499 retInfo.setCoerceToType(ConvertType(FI->getReturnType())); 500 501 for (CGFunctionInfo::arg_iterator I = FI->arg_begin(), E = FI->arg_end(); 502 I != E; ++I) 503 if (I->info.canHaveCoerceToType() && I->info.getCoerceToType() == 0) 504 I->info.setCoerceToType(ConvertType(I->type)); 505 506 bool erased = FunctionsBeingProcessed.erase(FI); (void)erased; 507 assert(erased && "Not in set?"); 508 509 return *FI; 510 } 511 512 CGFunctionInfo *CGFunctionInfo::create(unsigned llvmCC, 513 bool IsInstanceMethod, 514 const FunctionType::ExtInfo &info, 515 CanQualType resultType, 516 ArrayRef<CanQualType> argTypes, 517 RequiredArgs required) { 518 void *buffer = operator new(sizeof(CGFunctionInfo) + 519 sizeof(ArgInfo) * (argTypes.size() + 1)); 520 CGFunctionInfo *FI = new(buffer) CGFunctionInfo(); 521 FI->CallingConvention = llvmCC; 522 FI->EffectiveCallingConvention = llvmCC; 523 FI->ASTCallingConvention = info.getCC(); 524 FI->InstanceMethod = IsInstanceMethod; 525 FI->NoReturn = info.getNoReturn(); 526 FI->ReturnsRetained = info.getProducesResult(); 527 FI->Required = required; 528 FI->HasRegParm = info.getHasRegParm(); 529 FI->RegParm = info.getRegParm(); 530 FI->ArgStruct = 0; 531 FI->NumArgs = argTypes.size(); 532 FI->getArgsBuffer()[0].type = resultType; 533 for (unsigned i = 0, e = argTypes.size(); i != e; ++i) 534 FI->getArgsBuffer()[i + 1].type = argTypes[i]; 535 return FI; 536 } 537 538 /***/ 539 540 void CodeGenTypes::GetExpandedTypes(QualType type, 541 SmallVectorImpl<llvm::Type*> &expandedTypes) { 542 if (const ConstantArrayType *AT = Context.getAsConstantArrayType(type)) { 543 uint64_t NumElts = AT->getSize().getZExtValue(); 544 for (uint64_t Elt = 0; Elt < NumElts; ++Elt) 545 GetExpandedTypes(AT->getElementType(), expandedTypes); 546 } else if (const RecordType *RT = type->getAs<RecordType>()) { 547 const RecordDecl *RD = RT->getDecl(); 548 assert(!RD->hasFlexibleArrayMember() && 549 "Cannot expand structure with flexible array."); 550 if (RD->isUnion()) { 551 // Unions can be here only in degenerative cases - all the fields are same 552 // after flattening. Thus we have to use the "largest" field. 553 const FieldDecl *LargestFD = 0; 554 CharUnits UnionSize = CharUnits::Zero(); 555 556 for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end(); 557 i != e; ++i) { 558 const FieldDecl *FD = *i; 559 assert(!FD->isBitField() && 560 "Cannot expand structure with bit-field members."); 561 CharUnits FieldSize = getContext().getTypeSizeInChars(FD->getType()); 562 if (UnionSize < FieldSize) { 563 UnionSize = FieldSize; 564 LargestFD = FD; 565 } 566 } 567 if (LargestFD) 568 GetExpandedTypes(LargestFD->getType(), expandedTypes); 569 } else { 570 for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end(); 571 i != e; ++i) { 572 assert(!i->isBitField() && 573 "Cannot expand structure with bit-field members."); 574 GetExpandedTypes(i->getType(), expandedTypes); 575 } 576 } 577 } else if (const ComplexType *CT = type->getAs<ComplexType>()) { 578 llvm::Type *EltTy = ConvertType(CT->getElementType()); 579 expandedTypes.push_back(EltTy); 580 expandedTypes.push_back(EltTy); 581 } else 582 expandedTypes.push_back(ConvertType(type)); 583 } 584 585 llvm::Function::arg_iterator 586 CodeGenFunction::ExpandTypeFromArgs(QualType Ty, LValue LV, 587 llvm::Function::arg_iterator AI) { 588 assert(LV.isSimple() && 589 "Unexpected non-simple lvalue during struct expansion."); 590 591 if (const ConstantArrayType *AT = getContext().getAsConstantArrayType(Ty)) { 592 unsigned NumElts = AT->getSize().getZExtValue(); 593 QualType EltTy = AT->getElementType(); 594 for (unsigned Elt = 0; Elt < NumElts; ++Elt) { 595 llvm::Value *EltAddr = Builder.CreateConstGEP2_32(LV.getAddress(), 0, Elt); 596 LValue LV = MakeAddrLValue(EltAddr, EltTy); 597 AI = ExpandTypeFromArgs(EltTy, LV, AI); 598 } 599 } else if (const RecordType *RT = Ty->getAs<RecordType>()) { 600 RecordDecl *RD = RT->getDecl(); 601 if (RD->isUnion()) { 602 // Unions can be here only in degenerative cases - all the fields are same 603 // after flattening. Thus we have to use the "largest" field. 604 const FieldDecl *LargestFD = 0; 605 CharUnits UnionSize = CharUnits::Zero(); 606 607 for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end(); 608 i != e; ++i) { 609 const FieldDecl *FD = *i; 610 assert(!FD->isBitField() && 611 "Cannot expand structure with bit-field members."); 612 CharUnits FieldSize = getContext().getTypeSizeInChars(FD->getType()); 613 if (UnionSize < FieldSize) { 614 UnionSize = FieldSize; 615 LargestFD = FD; 616 } 617 } 618 if (LargestFD) { 619 // FIXME: What are the right qualifiers here? 620 LValue SubLV = EmitLValueForField(LV, LargestFD); 621 AI = ExpandTypeFromArgs(LargestFD->getType(), SubLV, AI); 622 } 623 } else { 624 for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end(); 625 i != e; ++i) { 626 FieldDecl *FD = *i; 627 QualType FT = FD->getType(); 628 629 // FIXME: What are the right qualifiers here? 630 LValue SubLV = EmitLValueForField(LV, FD); 631 AI = ExpandTypeFromArgs(FT, SubLV, AI); 632 } 633 } 634 } else if (const ComplexType *CT = Ty->getAs<ComplexType>()) { 635 QualType EltTy = CT->getElementType(); 636 llvm::Value *RealAddr = Builder.CreateStructGEP(LV.getAddress(), 0, "real"); 637 EmitStoreThroughLValue(RValue::get(AI++), MakeAddrLValue(RealAddr, EltTy)); 638 llvm::Value *ImagAddr = Builder.CreateStructGEP(LV.getAddress(), 1, "imag"); 639 EmitStoreThroughLValue(RValue::get(AI++), MakeAddrLValue(ImagAddr, EltTy)); 640 } else { 641 EmitStoreThroughLValue(RValue::get(AI), LV); 642 ++AI; 643 } 644 645 return AI; 646 } 647 648 /// EnterStructPointerForCoercedAccess - Given a struct pointer that we are 649 /// accessing some number of bytes out of it, try to gep into the struct to get 650 /// at its inner goodness. Dive as deep as possible without entering an element 651 /// with an in-memory size smaller than DstSize. 652 static llvm::Value * 653 EnterStructPointerForCoercedAccess(llvm::Value *SrcPtr, 654 llvm::StructType *SrcSTy, 655 uint64_t DstSize, CodeGenFunction &CGF) { 656 // We can't dive into a zero-element struct. 657 if (SrcSTy->getNumElements() == 0) return SrcPtr; 658 659 llvm::Type *FirstElt = SrcSTy->getElementType(0); 660 661 // If the first elt is at least as large as what we're looking for, or if the 662 // first element is the same size as the whole struct, we can enter it. 663 uint64_t FirstEltSize = 664 CGF.CGM.getDataLayout().getTypeAllocSize(FirstElt); 665 if (FirstEltSize < DstSize && 666 FirstEltSize < CGF.CGM.getDataLayout().getTypeAllocSize(SrcSTy)) 667 return SrcPtr; 668 669 // GEP into the first element. 670 SrcPtr = CGF.Builder.CreateConstGEP2_32(SrcPtr, 0, 0, "coerce.dive"); 671 672 // If the first element is a struct, recurse. 673 llvm::Type *SrcTy = 674 cast<llvm::PointerType>(SrcPtr->getType())->getElementType(); 675 if (llvm::StructType *SrcSTy = dyn_cast<llvm::StructType>(SrcTy)) 676 return EnterStructPointerForCoercedAccess(SrcPtr, SrcSTy, DstSize, CGF); 677 678 return SrcPtr; 679 } 680 681 /// CoerceIntOrPtrToIntOrPtr - Convert a value Val to the specific Ty where both 682 /// are either integers or pointers. This does a truncation of the value if it 683 /// is too large or a zero extension if it is too small. 684 /// 685 /// This behaves as if the value were coerced through memory, so on big-endian 686 /// targets the high bits are preserved in a truncation, while little-endian 687 /// targets preserve the low bits. 688 static llvm::Value *CoerceIntOrPtrToIntOrPtr(llvm::Value *Val, 689 llvm::Type *Ty, 690 CodeGenFunction &CGF) { 691 if (Val->getType() == Ty) 692 return Val; 693 694 if (isa<llvm::PointerType>(Val->getType())) { 695 // If this is Pointer->Pointer avoid conversion to and from int. 696 if (isa<llvm::PointerType>(Ty)) 697 return CGF.Builder.CreateBitCast(Val, Ty, "coerce.val"); 698 699 // Convert the pointer to an integer so we can play with its width. 700 Val = CGF.Builder.CreatePtrToInt(Val, CGF.IntPtrTy, "coerce.val.pi"); 701 } 702 703 llvm::Type *DestIntTy = Ty; 704 if (isa<llvm::PointerType>(DestIntTy)) 705 DestIntTy = CGF.IntPtrTy; 706 707 if (Val->getType() != DestIntTy) { 708 const llvm::DataLayout &DL = CGF.CGM.getDataLayout(); 709 if (DL.isBigEndian()) { 710 // Preserve the high bits on big-endian targets. 711 // That is what memory coercion does. 712 uint64_t SrcSize = DL.getTypeAllocSizeInBits(Val->getType()); 713 uint64_t DstSize = DL.getTypeAllocSizeInBits(DestIntTy); 714 if (SrcSize > DstSize) { 715 Val = CGF.Builder.CreateLShr(Val, SrcSize - DstSize, "coerce.highbits"); 716 Val = CGF.Builder.CreateTrunc(Val, DestIntTy, "coerce.val.ii"); 717 } else { 718 Val = CGF.Builder.CreateZExt(Val, DestIntTy, "coerce.val.ii"); 719 Val = CGF.Builder.CreateShl(Val, DstSize - SrcSize, "coerce.highbits"); 720 } 721 } else { 722 // Little-endian targets preserve the low bits. No shifts required. 723 Val = CGF.Builder.CreateIntCast(Val, DestIntTy, false, "coerce.val.ii"); 724 } 725 } 726 727 if (isa<llvm::PointerType>(Ty)) 728 Val = CGF.Builder.CreateIntToPtr(Val, Ty, "coerce.val.ip"); 729 return Val; 730 } 731 732 733 734 /// CreateCoercedLoad - Create a load from \arg SrcPtr interpreted as 735 /// a pointer to an object of type \arg Ty. 736 /// 737 /// This safely handles the case when the src type is smaller than the 738 /// destination type; in this situation the values of bits which not 739 /// present in the src are undefined. 740 static llvm::Value *CreateCoercedLoad(llvm::Value *SrcPtr, 741 llvm::Type *Ty, 742 CodeGenFunction &CGF) { 743 llvm::Type *SrcTy = 744 cast<llvm::PointerType>(SrcPtr->getType())->getElementType(); 745 746 // If SrcTy and Ty are the same, just do a load. 747 if (SrcTy == Ty) 748 return CGF.Builder.CreateLoad(SrcPtr); 749 750 uint64_t DstSize = CGF.CGM.getDataLayout().getTypeAllocSize(Ty); 751 752 if (llvm::StructType *SrcSTy = dyn_cast<llvm::StructType>(SrcTy)) { 753 SrcPtr = EnterStructPointerForCoercedAccess(SrcPtr, SrcSTy, DstSize, CGF); 754 SrcTy = cast<llvm::PointerType>(SrcPtr->getType())->getElementType(); 755 } 756 757 uint64_t SrcSize = CGF.CGM.getDataLayout().getTypeAllocSize(SrcTy); 758 759 // If the source and destination are integer or pointer types, just do an 760 // extension or truncation to the desired type. 761 if ((isa<llvm::IntegerType>(Ty) || isa<llvm::PointerType>(Ty)) && 762 (isa<llvm::IntegerType>(SrcTy) || isa<llvm::PointerType>(SrcTy))) { 763 llvm::LoadInst *Load = CGF.Builder.CreateLoad(SrcPtr); 764 return CoerceIntOrPtrToIntOrPtr(Load, Ty, CGF); 765 } 766 767 // If load is legal, just bitcast the src pointer. 768 if (SrcSize >= DstSize) { 769 // Generally SrcSize is never greater than DstSize, since this means we are 770 // losing bits. However, this can happen in cases where the structure has 771 // additional padding, for example due to a user specified alignment. 772 // 773 // FIXME: Assert that we aren't truncating non-padding bits when have access 774 // to that information. 775 llvm::Value *Casted = 776 CGF.Builder.CreateBitCast(SrcPtr, llvm::PointerType::getUnqual(Ty)); 777 llvm::LoadInst *Load = CGF.Builder.CreateLoad(Casted); 778 // FIXME: Use better alignment / avoid requiring aligned load. 779 Load->setAlignment(1); 780 return Load; 781 } 782 783 // Otherwise do coercion through memory. This is stupid, but 784 // simple. 785 llvm::Value *Tmp = CGF.CreateTempAlloca(Ty); 786 llvm::Type *I8PtrTy = CGF.Builder.getInt8PtrTy(); 787 llvm::Value *Casted = CGF.Builder.CreateBitCast(Tmp, I8PtrTy); 788 llvm::Value *SrcCasted = CGF.Builder.CreateBitCast(SrcPtr, I8PtrTy); 789 // FIXME: Use better alignment. 790 CGF.Builder.CreateMemCpy(Casted, SrcCasted, 791 llvm::ConstantInt::get(CGF.IntPtrTy, SrcSize), 792 1, false); 793 return CGF.Builder.CreateLoad(Tmp); 794 } 795 796 // Function to store a first-class aggregate into memory. We prefer to 797 // store the elements rather than the aggregate to be more friendly to 798 // fast-isel. 799 // FIXME: Do we need to recurse here? 800 static void BuildAggStore(CodeGenFunction &CGF, llvm::Value *Val, 801 llvm::Value *DestPtr, bool DestIsVolatile, 802 bool LowAlignment) { 803 // Prefer scalar stores to first-class aggregate stores. 804 if (llvm::StructType *STy = 805 dyn_cast<llvm::StructType>(Val->getType())) { 806 for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) { 807 llvm::Value *EltPtr = CGF.Builder.CreateConstGEP2_32(DestPtr, 0, i); 808 llvm::Value *Elt = CGF.Builder.CreateExtractValue(Val, i); 809 llvm::StoreInst *SI = CGF.Builder.CreateStore(Elt, EltPtr, 810 DestIsVolatile); 811 if (LowAlignment) 812 SI->setAlignment(1); 813 } 814 } else { 815 llvm::StoreInst *SI = CGF.Builder.CreateStore(Val, DestPtr, DestIsVolatile); 816 if (LowAlignment) 817 SI->setAlignment(1); 818 } 819 } 820 821 /// CreateCoercedStore - Create a store to \arg DstPtr from \arg Src, 822 /// where the source and destination may have different types. 823 /// 824 /// This safely handles the case when the src type is larger than the 825 /// destination type; the upper bits of the src will be lost. 826 static void CreateCoercedStore(llvm::Value *Src, 827 llvm::Value *DstPtr, 828 bool DstIsVolatile, 829 CodeGenFunction &CGF) { 830 llvm::Type *SrcTy = Src->getType(); 831 llvm::Type *DstTy = 832 cast<llvm::PointerType>(DstPtr->getType())->getElementType(); 833 if (SrcTy == DstTy) { 834 CGF.Builder.CreateStore(Src, DstPtr, DstIsVolatile); 835 return; 836 } 837 838 uint64_t SrcSize = CGF.CGM.getDataLayout().getTypeAllocSize(SrcTy); 839 840 if (llvm::StructType *DstSTy = dyn_cast<llvm::StructType>(DstTy)) { 841 DstPtr = EnterStructPointerForCoercedAccess(DstPtr, DstSTy, SrcSize, CGF); 842 DstTy = cast<llvm::PointerType>(DstPtr->getType())->getElementType(); 843 } 844 845 // If the source and destination are integer or pointer types, just do an 846 // extension or truncation to the desired type. 847 if ((isa<llvm::IntegerType>(SrcTy) || isa<llvm::PointerType>(SrcTy)) && 848 (isa<llvm::IntegerType>(DstTy) || isa<llvm::PointerType>(DstTy))) { 849 Src = CoerceIntOrPtrToIntOrPtr(Src, DstTy, CGF); 850 CGF.Builder.CreateStore(Src, DstPtr, DstIsVolatile); 851 return; 852 } 853 854 uint64_t DstSize = CGF.CGM.getDataLayout().getTypeAllocSize(DstTy); 855 856 // If store is legal, just bitcast the src pointer. 857 if (SrcSize <= DstSize) { 858 llvm::Value *Casted = 859 CGF.Builder.CreateBitCast(DstPtr, llvm::PointerType::getUnqual(SrcTy)); 860 // FIXME: Use better alignment / avoid requiring aligned store. 861 BuildAggStore(CGF, Src, Casted, DstIsVolatile, true); 862 } else { 863 // Otherwise do coercion through memory. This is stupid, but 864 // simple. 865 866 // Generally SrcSize is never greater than DstSize, since this means we are 867 // losing bits. However, this can happen in cases where the structure has 868 // additional padding, for example due to a user specified alignment. 869 // 870 // FIXME: Assert that we aren't truncating non-padding bits when have access 871 // to that information. 872 llvm::Value *Tmp = CGF.CreateTempAlloca(SrcTy); 873 CGF.Builder.CreateStore(Src, Tmp); 874 llvm::Type *I8PtrTy = CGF.Builder.getInt8PtrTy(); 875 llvm::Value *Casted = CGF.Builder.CreateBitCast(Tmp, I8PtrTy); 876 llvm::Value *DstCasted = CGF.Builder.CreateBitCast(DstPtr, I8PtrTy); 877 // FIXME: Use better alignment. 878 CGF.Builder.CreateMemCpy(DstCasted, Casted, 879 llvm::ConstantInt::get(CGF.IntPtrTy, DstSize), 880 1, false); 881 } 882 } 883 884 /***/ 885 886 bool CodeGenModule::ReturnTypeUsesSRet(const CGFunctionInfo &FI) { 887 return FI.getReturnInfo().isIndirect(); 888 } 889 890 bool CodeGenModule::ReturnTypeUsesFPRet(QualType ResultType) { 891 if (const BuiltinType *BT = ResultType->getAs<BuiltinType>()) { 892 switch (BT->getKind()) { 893 default: 894 return false; 895 case BuiltinType::Float: 896 return getTarget().useObjCFPRetForRealType(TargetInfo::Float); 897 case BuiltinType::Double: 898 return getTarget().useObjCFPRetForRealType(TargetInfo::Double); 899 case BuiltinType::LongDouble: 900 return getTarget().useObjCFPRetForRealType(TargetInfo::LongDouble); 901 } 902 } 903 904 return false; 905 } 906 907 bool CodeGenModule::ReturnTypeUsesFP2Ret(QualType ResultType) { 908 if (const ComplexType *CT = ResultType->getAs<ComplexType>()) { 909 if (const BuiltinType *BT = CT->getElementType()->getAs<BuiltinType>()) { 910 if (BT->getKind() == BuiltinType::LongDouble) 911 return getTarget().useObjCFP2RetForComplexLongDouble(); 912 } 913 } 914 915 return false; 916 } 917 918 llvm::FunctionType *CodeGenTypes::GetFunctionType(GlobalDecl GD) { 919 const CGFunctionInfo &FI = arrangeGlobalDeclaration(GD); 920 return GetFunctionType(FI); 921 } 922 923 llvm::FunctionType * 924 CodeGenTypes::GetFunctionType(const CGFunctionInfo &FI) { 925 926 bool Inserted = FunctionsBeingProcessed.insert(&FI); (void)Inserted; 927 assert(Inserted && "Recursively being processed?"); 928 929 SmallVector<llvm::Type*, 8> argTypes; 930 llvm::Type *resultType = 0; 931 932 const ABIArgInfo &retAI = FI.getReturnInfo(); 933 switch (retAI.getKind()) { 934 case ABIArgInfo::Expand: 935 llvm_unreachable("Invalid ABI kind for return argument"); 936 937 case ABIArgInfo::Extend: 938 case ABIArgInfo::Direct: 939 resultType = retAI.getCoerceToType(); 940 break; 941 942 case ABIArgInfo::InAlloca: 943 if (retAI.getInAllocaSRet()) { 944 // sret things on win32 aren't void, they return the sret pointer. 945 QualType ret = FI.getReturnType(); 946 llvm::Type *ty = ConvertType(ret); 947 unsigned addressSpace = Context.getTargetAddressSpace(ret); 948 resultType = llvm::PointerType::get(ty, addressSpace); 949 } else { 950 resultType = llvm::Type::getVoidTy(getLLVMContext()); 951 } 952 break; 953 954 case ABIArgInfo::Indirect: { 955 assert(!retAI.getIndirectAlign() && "Align unused on indirect return."); 956 resultType = llvm::Type::getVoidTy(getLLVMContext()); 957 958 QualType ret = FI.getReturnType(); 959 llvm::Type *ty = ConvertType(ret); 960 unsigned addressSpace = Context.getTargetAddressSpace(ret); 961 argTypes.push_back(llvm::PointerType::get(ty, addressSpace)); 962 break; 963 } 964 965 case ABIArgInfo::Ignore: 966 resultType = llvm::Type::getVoidTy(getLLVMContext()); 967 break; 968 } 969 970 // Add in all of the required arguments. 971 CGFunctionInfo::const_arg_iterator it = FI.arg_begin(), ie; 972 if (FI.isVariadic()) { 973 ie = it + FI.getRequiredArgs().getNumRequiredArgs(); 974 } else { 975 ie = FI.arg_end(); 976 } 977 for (; it != ie; ++it) { 978 const ABIArgInfo &argAI = it->info; 979 980 // Insert a padding type to ensure proper alignment. 981 if (llvm::Type *PaddingType = argAI.getPaddingType()) 982 argTypes.push_back(PaddingType); 983 984 switch (argAI.getKind()) { 985 case ABIArgInfo::Ignore: 986 case ABIArgInfo::InAlloca: 987 break; 988 989 case ABIArgInfo::Indirect: { 990 // indirect arguments are always on the stack, which is addr space #0. 991 llvm::Type *LTy = ConvertTypeForMem(it->type); 992 argTypes.push_back(LTy->getPointerTo()); 993 break; 994 } 995 996 case ABIArgInfo::Extend: 997 case ABIArgInfo::Direct: { 998 // If the coerce-to type is a first class aggregate, flatten it. Either 999 // way is semantically identical, but fast-isel and the optimizer 1000 // generally likes scalar values better than FCAs. 1001 llvm::Type *argType = argAI.getCoerceToType(); 1002 if (llvm::StructType *st = dyn_cast<llvm::StructType>(argType)) { 1003 for (unsigned i = 0, e = st->getNumElements(); i != e; ++i) 1004 argTypes.push_back(st->getElementType(i)); 1005 } else { 1006 argTypes.push_back(argType); 1007 } 1008 break; 1009 } 1010 1011 case ABIArgInfo::Expand: 1012 GetExpandedTypes(it->type, argTypes); 1013 break; 1014 } 1015 } 1016 1017 // Add the inalloca struct as the last parameter type. 1018 if (llvm::StructType *ArgStruct = FI.getArgStruct()) 1019 argTypes.push_back(ArgStruct->getPointerTo()); 1020 1021 bool Erased = FunctionsBeingProcessed.erase(&FI); (void)Erased; 1022 assert(Erased && "Not in set?"); 1023 1024 return llvm::FunctionType::get(resultType, argTypes, FI.isVariadic()); 1025 } 1026 1027 llvm::Type *CodeGenTypes::GetFunctionTypeForVTable(GlobalDecl GD) { 1028 const CXXMethodDecl *MD = cast<CXXMethodDecl>(GD.getDecl()); 1029 const FunctionProtoType *FPT = MD->getType()->getAs<FunctionProtoType>(); 1030 1031 if (!isFuncTypeConvertible(FPT)) 1032 return llvm::StructType::get(getLLVMContext()); 1033 1034 const CGFunctionInfo *Info; 1035 if (isa<CXXDestructorDecl>(MD)) 1036 Info = &arrangeCXXDestructor(cast<CXXDestructorDecl>(MD), GD.getDtorType()); 1037 else 1038 Info = &arrangeCXXMethodDeclaration(MD); 1039 return GetFunctionType(*Info); 1040 } 1041 1042 void CodeGenModule::ConstructAttributeList(const CGFunctionInfo &FI, 1043 const Decl *TargetDecl, 1044 AttributeListType &PAL, 1045 unsigned &CallingConv, 1046 bool AttrOnCallSite) { 1047 llvm::AttrBuilder FuncAttrs; 1048 llvm::AttrBuilder RetAttrs; 1049 1050 CallingConv = FI.getEffectiveCallingConvention(); 1051 1052 if (FI.isNoReturn()) 1053 FuncAttrs.addAttribute(llvm::Attribute::NoReturn); 1054 1055 // FIXME: handle sseregparm someday... 1056 if (TargetDecl) { 1057 if (TargetDecl->hasAttr<ReturnsTwiceAttr>()) 1058 FuncAttrs.addAttribute(llvm::Attribute::ReturnsTwice); 1059 if (TargetDecl->hasAttr<NoThrowAttr>()) 1060 FuncAttrs.addAttribute(llvm::Attribute::NoUnwind); 1061 if (TargetDecl->hasAttr<NoReturnAttr>()) 1062 FuncAttrs.addAttribute(llvm::Attribute::NoReturn); 1063 if (TargetDecl->hasAttr<NoDuplicateAttr>()) 1064 FuncAttrs.addAttribute(llvm::Attribute::NoDuplicate); 1065 1066 if (const FunctionDecl *Fn = dyn_cast<FunctionDecl>(TargetDecl)) { 1067 const FunctionProtoType *FPT = Fn->getType()->getAs<FunctionProtoType>(); 1068 if (FPT && FPT->isNothrow(getContext())) 1069 FuncAttrs.addAttribute(llvm::Attribute::NoUnwind); 1070 // Don't use [[noreturn]] or _Noreturn for a call to a virtual function. 1071 // These attributes are not inherited by overloads. 1072 const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(Fn); 1073 if (Fn->isNoReturn() && !(AttrOnCallSite && MD && MD->isVirtual())) 1074 FuncAttrs.addAttribute(llvm::Attribute::NoReturn); 1075 } 1076 1077 // 'const' and 'pure' attribute functions are also nounwind. 1078 if (TargetDecl->hasAttr<ConstAttr>()) { 1079 FuncAttrs.addAttribute(llvm::Attribute::ReadNone); 1080 FuncAttrs.addAttribute(llvm::Attribute::NoUnwind); 1081 } else if (TargetDecl->hasAttr<PureAttr>()) { 1082 FuncAttrs.addAttribute(llvm::Attribute::ReadOnly); 1083 FuncAttrs.addAttribute(llvm::Attribute::NoUnwind); 1084 } 1085 if (TargetDecl->hasAttr<MallocAttr>()) 1086 RetAttrs.addAttribute(llvm::Attribute::NoAlias); 1087 } 1088 1089 if (CodeGenOpts.OptimizeSize) 1090 FuncAttrs.addAttribute(llvm::Attribute::OptimizeForSize); 1091 if (CodeGenOpts.OptimizeSize == 2) 1092 FuncAttrs.addAttribute(llvm::Attribute::MinSize); 1093 if (CodeGenOpts.DisableRedZone) 1094 FuncAttrs.addAttribute(llvm::Attribute::NoRedZone); 1095 if (CodeGenOpts.NoImplicitFloat) 1096 FuncAttrs.addAttribute(llvm::Attribute::NoImplicitFloat); 1097 1098 if (AttrOnCallSite) { 1099 // Attributes that should go on the call site only. 1100 if (!CodeGenOpts.SimplifyLibCalls) 1101 FuncAttrs.addAttribute(llvm::Attribute::NoBuiltin); 1102 } else { 1103 // Attributes that should go on the function, but not the call site. 1104 if (!CodeGenOpts.DisableFPElim) { 1105 FuncAttrs.addAttribute("no-frame-pointer-elim", "false"); 1106 } else if (CodeGenOpts.OmitLeafFramePointer) { 1107 FuncAttrs.addAttribute("no-frame-pointer-elim", "false"); 1108 FuncAttrs.addAttribute("no-frame-pointer-elim-non-leaf"); 1109 } else { 1110 FuncAttrs.addAttribute("no-frame-pointer-elim", "true"); 1111 FuncAttrs.addAttribute("no-frame-pointer-elim-non-leaf"); 1112 } 1113 1114 FuncAttrs.addAttribute("less-precise-fpmad", 1115 llvm::toStringRef(CodeGenOpts.LessPreciseFPMAD)); 1116 FuncAttrs.addAttribute("no-infs-fp-math", 1117 llvm::toStringRef(CodeGenOpts.NoInfsFPMath)); 1118 FuncAttrs.addAttribute("no-nans-fp-math", 1119 llvm::toStringRef(CodeGenOpts.NoNaNsFPMath)); 1120 FuncAttrs.addAttribute("unsafe-fp-math", 1121 llvm::toStringRef(CodeGenOpts.UnsafeFPMath)); 1122 FuncAttrs.addAttribute("use-soft-float", 1123 llvm::toStringRef(CodeGenOpts.SoftFloat)); 1124 FuncAttrs.addAttribute("stack-protector-buffer-size", 1125 llvm::utostr(CodeGenOpts.SSPBufferSize)); 1126 1127 if (!CodeGenOpts.StackRealignment) 1128 FuncAttrs.addAttribute("no-realign-stack"); 1129 } 1130 1131 QualType RetTy = FI.getReturnType(); 1132 unsigned Index = 1; 1133 const ABIArgInfo &RetAI = FI.getReturnInfo(); 1134 switch (RetAI.getKind()) { 1135 case ABIArgInfo::Extend: 1136 if (RetTy->hasSignedIntegerRepresentation()) 1137 RetAttrs.addAttribute(llvm::Attribute::SExt); 1138 else if (RetTy->hasUnsignedIntegerRepresentation()) 1139 RetAttrs.addAttribute(llvm::Attribute::ZExt); 1140 // FALL THROUGH 1141 case ABIArgInfo::Direct: 1142 if (RetAI.getInReg()) 1143 RetAttrs.addAttribute(llvm::Attribute::InReg); 1144 break; 1145 case ABIArgInfo::Ignore: 1146 break; 1147 1148 case ABIArgInfo::InAlloca: { 1149 // inalloca disables readnone and readonly 1150 FuncAttrs.removeAttribute(llvm::Attribute::ReadOnly) 1151 .removeAttribute(llvm::Attribute::ReadNone); 1152 break; 1153 } 1154 1155 case ABIArgInfo::Indirect: { 1156 llvm::AttrBuilder SRETAttrs; 1157 SRETAttrs.addAttribute(llvm::Attribute::StructRet); 1158 if (RetAI.getInReg()) 1159 SRETAttrs.addAttribute(llvm::Attribute::InReg); 1160 PAL.push_back(llvm:: 1161 AttributeSet::get(getLLVMContext(), Index, SRETAttrs)); 1162 1163 ++Index; 1164 // sret disables readnone and readonly 1165 FuncAttrs.removeAttribute(llvm::Attribute::ReadOnly) 1166 .removeAttribute(llvm::Attribute::ReadNone); 1167 break; 1168 } 1169 1170 case ABIArgInfo::Expand: 1171 llvm_unreachable("Invalid ABI kind for return argument"); 1172 } 1173 1174 if (RetAttrs.hasAttributes()) 1175 PAL.push_back(llvm:: 1176 AttributeSet::get(getLLVMContext(), 1177 llvm::AttributeSet::ReturnIndex, 1178 RetAttrs)); 1179 1180 for (CGFunctionInfo::const_arg_iterator it = FI.arg_begin(), 1181 ie = FI.arg_end(); it != ie; ++it) { 1182 QualType ParamType = it->type; 1183 const ABIArgInfo &AI = it->info; 1184 llvm::AttrBuilder Attrs; 1185 1186 if (AI.getPaddingType()) { 1187 if (AI.getPaddingInReg()) 1188 PAL.push_back(llvm::AttributeSet::get(getLLVMContext(), Index, 1189 llvm::Attribute::InReg)); 1190 // Increment Index if there is padding. 1191 ++Index; 1192 } 1193 1194 // 'restrict' -> 'noalias' is done in EmitFunctionProlog when we 1195 // have the corresponding parameter variable. It doesn't make 1196 // sense to do it here because parameters are so messed up. 1197 switch (AI.getKind()) { 1198 case ABIArgInfo::Extend: 1199 if (ParamType->isSignedIntegerOrEnumerationType()) 1200 Attrs.addAttribute(llvm::Attribute::SExt); 1201 else if (ParamType->isUnsignedIntegerOrEnumerationType()) 1202 Attrs.addAttribute(llvm::Attribute::ZExt); 1203 // FALL THROUGH 1204 case ABIArgInfo::Direct: 1205 if (AI.getInReg()) 1206 Attrs.addAttribute(llvm::Attribute::InReg); 1207 1208 // FIXME: handle sseregparm someday... 1209 1210 if (llvm::StructType *STy = 1211 dyn_cast<llvm::StructType>(AI.getCoerceToType())) { 1212 unsigned Extra = STy->getNumElements()-1; // 1 will be added below. 1213 if (Attrs.hasAttributes()) 1214 for (unsigned I = 0; I < Extra; ++I) 1215 PAL.push_back(llvm::AttributeSet::get(getLLVMContext(), Index + I, 1216 Attrs)); 1217 Index += Extra; 1218 } 1219 break; 1220 1221 case ABIArgInfo::Indirect: 1222 if (AI.getInReg()) 1223 Attrs.addAttribute(llvm::Attribute::InReg); 1224 1225 if (AI.getIndirectByVal()) 1226 Attrs.addAttribute(llvm::Attribute::ByVal); 1227 1228 Attrs.addAlignmentAttr(AI.getIndirectAlign()); 1229 1230 // byval disables readnone and readonly. 1231 FuncAttrs.removeAttribute(llvm::Attribute::ReadOnly) 1232 .removeAttribute(llvm::Attribute::ReadNone); 1233 break; 1234 1235 case ABIArgInfo::Ignore: 1236 // Skip increment, no matching LLVM parameter. 1237 continue; 1238 1239 case ABIArgInfo::InAlloca: 1240 // inalloca disables readnone and readonly. 1241 FuncAttrs.removeAttribute(llvm::Attribute::ReadOnly) 1242 .removeAttribute(llvm::Attribute::ReadNone); 1243 // Skip increment, no matching LLVM parameter. 1244 continue; 1245 1246 case ABIArgInfo::Expand: { 1247 SmallVector<llvm::Type*, 8> types; 1248 // FIXME: This is rather inefficient. Do we ever actually need to do 1249 // anything here? The result should be just reconstructed on the other 1250 // side, so extension should be a non-issue. 1251 getTypes().GetExpandedTypes(ParamType, types); 1252 Index += types.size(); 1253 continue; 1254 } 1255 } 1256 1257 if (Attrs.hasAttributes()) 1258 PAL.push_back(llvm::AttributeSet::get(getLLVMContext(), Index, Attrs)); 1259 ++Index; 1260 } 1261 1262 // Add the inalloca attribute to the trailing inalloca parameter if present. 1263 if (FI.usesInAlloca()) { 1264 llvm::AttrBuilder Attrs; 1265 Attrs.addAttribute(llvm::Attribute::InAlloca); 1266 PAL.push_back(llvm::AttributeSet::get(getLLVMContext(), Index, Attrs)); 1267 } 1268 1269 if (FuncAttrs.hasAttributes()) 1270 PAL.push_back(llvm:: 1271 AttributeSet::get(getLLVMContext(), 1272 llvm::AttributeSet::FunctionIndex, 1273 FuncAttrs)); 1274 } 1275 1276 /// An argument came in as a promoted argument; demote it back to its 1277 /// declared type. 1278 static llvm::Value *emitArgumentDemotion(CodeGenFunction &CGF, 1279 const VarDecl *var, 1280 llvm::Value *value) { 1281 llvm::Type *varType = CGF.ConvertType(var->getType()); 1282 1283 // This can happen with promotions that actually don't change the 1284 // underlying type, like the enum promotions. 1285 if (value->getType() == varType) return value; 1286 1287 assert((varType->isIntegerTy() || varType->isFloatingPointTy()) 1288 && "unexpected promotion type"); 1289 1290 if (isa<llvm::IntegerType>(varType)) 1291 return CGF.Builder.CreateTrunc(value, varType, "arg.unpromote"); 1292 1293 return CGF.Builder.CreateFPCast(value, varType, "arg.unpromote"); 1294 } 1295 1296 void CodeGenFunction::EmitFunctionProlog(const CGFunctionInfo &FI, 1297 llvm::Function *Fn, 1298 const FunctionArgList &Args) { 1299 // If this is an implicit-return-zero function, go ahead and 1300 // initialize the return value. TODO: it might be nice to have 1301 // a more general mechanism for this that didn't require synthesized 1302 // return statements. 1303 if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(CurCodeDecl)) { 1304 if (FD->hasImplicitReturnZero()) { 1305 QualType RetTy = FD->getReturnType().getUnqualifiedType(); 1306 llvm::Type* LLVMTy = CGM.getTypes().ConvertType(RetTy); 1307 llvm::Constant* Zero = llvm::Constant::getNullValue(LLVMTy); 1308 Builder.CreateStore(Zero, ReturnValue); 1309 } 1310 } 1311 1312 // FIXME: We no longer need the types from FunctionArgList; lift up and 1313 // simplify. 1314 1315 // Emit allocs for param decls. Give the LLVM Argument nodes names. 1316 llvm::Function::arg_iterator AI = Fn->arg_begin(); 1317 1318 // If we're using inalloca, all the memory arguments are GEPs off of the last 1319 // parameter, which is a pointer to the complete memory area. 1320 llvm::Value *ArgStruct = 0; 1321 if (FI.usesInAlloca()) { 1322 llvm::Function::arg_iterator EI = Fn->arg_end(); 1323 --EI; 1324 ArgStruct = EI; 1325 assert(ArgStruct->getType() == FI.getArgStruct()->getPointerTo()); 1326 } 1327 1328 // Name the struct return argument. 1329 if (CGM.ReturnTypeUsesSRet(FI)) { 1330 AI->setName("agg.result"); 1331 AI->addAttr(llvm::AttributeSet::get(getLLVMContext(), 1332 AI->getArgNo() + 1, 1333 llvm::Attribute::NoAlias)); 1334 ++AI; 1335 } 1336 1337 // Track if we received the parameter as a pointer (indirect, byval, or 1338 // inalloca). If already have a pointer, EmitParmDecl doesn't need to copy it 1339 // into a local alloca for us. 1340 enum ValOrPointer { HaveValue = 0, HavePointer = 1 }; 1341 typedef llvm::PointerIntPair<llvm::Value *, 1> ValueAndIsPtr; 1342 SmallVector<ValueAndIsPtr, 16> ArgVals; 1343 ArgVals.reserve(Args.size()); 1344 1345 // Create a pointer value for every parameter declaration. This usually 1346 // entails copying one or more LLVM IR arguments into an alloca. Don't push 1347 // any cleanups or do anything that might unwind. We do that separately, so 1348 // we can push the cleanups in the correct order for the ABI. 1349 assert(FI.arg_size() == Args.size() && 1350 "Mismatch between function signature & arguments."); 1351 unsigned ArgNo = 1; 1352 CGFunctionInfo::const_arg_iterator info_it = FI.arg_begin(); 1353 for (FunctionArgList::const_iterator i = Args.begin(), e = Args.end(); 1354 i != e; ++i, ++info_it, ++ArgNo) { 1355 const VarDecl *Arg = *i; 1356 QualType Ty = info_it->type; 1357 const ABIArgInfo &ArgI = info_it->info; 1358 1359 bool isPromoted = 1360 isa<ParmVarDecl>(Arg) && cast<ParmVarDecl>(Arg)->isKNRPromoted(); 1361 1362 // Skip the dummy padding argument. 1363 if (ArgI.getPaddingType()) 1364 ++AI; 1365 1366 switch (ArgI.getKind()) { 1367 case ABIArgInfo::InAlloca: { 1368 llvm::Value *V = Builder.CreateStructGEP( 1369 ArgStruct, ArgI.getInAllocaFieldIndex(), Arg->getName()); 1370 ArgVals.push_back(ValueAndIsPtr(V, HavePointer)); 1371 continue; // Don't increment AI! 1372 } 1373 1374 case ABIArgInfo::Indirect: { 1375 llvm::Value *V = AI; 1376 1377 if (!hasScalarEvaluationKind(Ty)) { 1378 // Aggregates and complex variables are accessed by reference. All we 1379 // need to do is realign the value, if requested 1380 if (ArgI.getIndirectRealign()) { 1381 llvm::Value *AlignedTemp = CreateMemTemp(Ty, "coerce"); 1382 1383 // Copy from the incoming argument pointer to the temporary with the 1384 // appropriate alignment. 1385 // 1386 // FIXME: We should have a common utility for generating an aggregate 1387 // copy. 1388 llvm::Type *I8PtrTy = Builder.getInt8PtrTy(); 1389 CharUnits Size = getContext().getTypeSizeInChars(Ty); 1390 llvm::Value *Dst = Builder.CreateBitCast(AlignedTemp, I8PtrTy); 1391 llvm::Value *Src = Builder.CreateBitCast(V, I8PtrTy); 1392 Builder.CreateMemCpy(Dst, 1393 Src, 1394 llvm::ConstantInt::get(IntPtrTy, 1395 Size.getQuantity()), 1396 ArgI.getIndirectAlign(), 1397 false); 1398 V = AlignedTemp; 1399 } 1400 ArgVals.push_back(ValueAndIsPtr(V, HavePointer)); 1401 } else { 1402 // Load scalar value from indirect argument. 1403 CharUnits Alignment = getContext().getTypeAlignInChars(Ty); 1404 V = EmitLoadOfScalar(V, false, Alignment.getQuantity(), Ty, 1405 Arg->getLocStart()); 1406 1407 if (isPromoted) 1408 V = emitArgumentDemotion(*this, Arg, V); 1409 ArgVals.push_back(ValueAndIsPtr(V, HaveValue)); 1410 } 1411 break; 1412 } 1413 1414 case ABIArgInfo::Extend: 1415 case ABIArgInfo::Direct: { 1416 1417 // If we have the trivial case, handle it with no muss and fuss. 1418 if (!isa<llvm::StructType>(ArgI.getCoerceToType()) && 1419 ArgI.getCoerceToType() == ConvertType(Ty) && 1420 ArgI.getDirectOffset() == 0) { 1421 assert(AI != Fn->arg_end() && "Argument mismatch!"); 1422 llvm::Value *V = AI; 1423 1424 if (Arg->getType().isRestrictQualified()) 1425 AI->addAttr(llvm::AttributeSet::get(getLLVMContext(), 1426 AI->getArgNo() + 1, 1427 llvm::Attribute::NoAlias)); 1428 1429 // Ensure the argument is the correct type. 1430 if (V->getType() != ArgI.getCoerceToType()) 1431 V = Builder.CreateBitCast(V, ArgI.getCoerceToType()); 1432 1433 if (isPromoted) 1434 V = emitArgumentDemotion(*this, Arg, V); 1435 1436 if (const CXXMethodDecl *MD = 1437 dyn_cast_or_null<CXXMethodDecl>(CurCodeDecl)) { 1438 if (MD->isVirtual() && Arg == CXXABIThisDecl) 1439 V = CGM.getCXXABI(). 1440 adjustThisParameterInVirtualFunctionPrologue(*this, CurGD, V); 1441 } 1442 1443 // Because of merging of function types from multiple decls it is 1444 // possible for the type of an argument to not match the corresponding 1445 // type in the function type. Since we are codegening the callee 1446 // in here, add a cast to the argument type. 1447 llvm::Type *LTy = ConvertType(Arg->getType()); 1448 if (V->getType() != LTy) 1449 V = Builder.CreateBitCast(V, LTy); 1450 1451 ArgVals.push_back(ValueAndIsPtr(V, HaveValue)); 1452 break; 1453 } 1454 1455 llvm::AllocaInst *Alloca = CreateMemTemp(Ty, Arg->getName()); 1456 1457 // The alignment we need to use is the max of the requested alignment for 1458 // the argument plus the alignment required by our access code below. 1459 unsigned AlignmentToUse = 1460 CGM.getDataLayout().getABITypeAlignment(ArgI.getCoerceToType()); 1461 AlignmentToUse = std::max(AlignmentToUse, 1462 (unsigned)getContext().getDeclAlign(Arg).getQuantity()); 1463 1464 Alloca->setAlignment(AlignmentToUse); 1465 llvm::Value *V = Alloca; 1466 llvm::Value *Ptr = V; // Pointer to store into. 1467 1468 // If the value is offset in memory, apply the offset now. 1469 if (unsigned Offs = ArgI.getDirectOffset()) { 1470 Ptr = Builder.CreateBitCast(Ptr, Builder.getInt8PtrTy()); 1471 Ptr = Builder.CreateConstGEP1_32(Ptr, Offs); 1472 Ptr = Builder.CreateBitCast(Ptr, 1473 llvm::PointerType::getUnqual(ArgI.getCoerceToType())); 1474 } 1475 1476 // If the coerce-to type is a first class aggregate, we flatten it and 1477 // pass the elements. Either way is semantically identical, but fast-isel 1478 // and the optimizer generally likes scalar values better than FCAs. 1479 llvm::StructType *STy = dyn_cast<llvm::StructType>(ArgI.getCoerceToType()); 1480 if (STy && STy->getNumElements() > 1) { 1481 uint64_t SrcSize = CGM.getDataLayout().getTypeAllocSize(STy); 1482 llvm::Type *DstTy = 1483 cast<llvm::PointerType>(Ptr->getType())->getElementType(); 1484 uint64_t DstSize = CGM.getDataLayout().getTypeAllocSize(DstTy); 1485 1486 if (SrcSize <= DstSize) { 1487 Ptr = Builder.CreateBitCast(Ptr, llvm::PointerType::getUnqual(STy)); 1488 1489 for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) { 1490 assert(AI != Fn->arg_end() && "Argument mismatch!"); 1491 AI->setName(Arg->getName() + ".coerce" + Twine(i)); 1492 llvm::Value *EltPtr = Builder.CreateConstGEP2_32(Ptr, 0, i); 1493 Builder.CreateStore(AI++, EltPtr); 1494 } 1495 } else { 1496 llvm::AllocaInst *TempAlloca = 1497 CreateTempAlloca(ArgI.getCoerceToType(), "coerce"); 1498 TempAlloca->setAlignment(AlignmentToUse); 1499 llvm::Value *TempV = TempAlloca; 1500 1501 for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) { 1502 assert(AI != Fn->arg_end() && "Argument mismatch!"); 1503 AI->setName(Arg->getName() + ".coerce" + Twine(i)); 1504 llvm::Value *EltPtr = Builder.CreateConstGEP2_32(TempV, 0, i); 1505 Builder.CreateStore(AI++, EltPtr); 1506 } 1507 1508 Builder.CreateMemCpy(Ptr, TempV, DstSize, AlignmentToUse); 1509 } 1510 } else { 1511 // Simple case, just do a coerced store of the argument into the alloca. 1512 assert(AI != Fn->arg_end() && "Argument mismatch!"); 1513 AI->setName(Arg->getName() + ".coerce"); 1514 CreateCoercedStore(AI++, Ptr, /*DestIsVolatile=*/false, *this); 1515 } 1516 1517 1518 // Match to what EmitParmDecl is expecting for this type. 1519 if (CodeGenFunction::hasScalarEvaluationKind(Ty)) { 1520 V = EmitLoadOfScalar(V, false, AlignmentToUse, Ty, Arg->getLocStart()); 1521 if (isPromoted) 1522 V = emitArgumentDemotion(*this, Arg, V); 1523 ArgVals.push_back(ValueAndIsPtr(V, HaveValue)); 1524 } else { 1525 ArgVals.push_back(ValueAndIsPtr(V, HavePointer)); 1526 } 1527 continue; // Skip ++AI increment, already done. 1528 } 1529 1530 case ABIArgInfo::Expand: { 1531 // If this structure was expanded into multiple arguments then 1532 // we need to create a temporary and reconstruct it from the 1533 // arguments. 1534 llvm::AllocaInst *Alloca = CreateMemTemp(Ty); 1535 CharUnits Align = getContext().getDeclAlign(Arg); 1536 Alloca->setAlignment(Align.getQuantity()); 1537 LValue LV = MakeAddrLValue(Alloca, Ty, Align); 1538 llvm::Function::arg_iterator End = ExpandTypeFromArgs(Ty, LV, AI); 1539 ArgVals.push_back(ValueAndIsPtr(Alloca, HavePointer)); 1540 1541 // Name the arguments used in expansion and increment AI. 1542 unsigned Index = 0; 1543 for (; AI != End; ++AI, ++Index) 1544 AI->setName(Arg->getName() + "." + Twine(Index)); 1545 continue; 1546 } 1547 1548 case ABIArgInfo::Ignore: 1549 // Initialize the local variable appropriately. 1550 if (!hasScalarEvaluationKind(Ty)) { 1551 ArgVals.push_back(ValueAndIsPtr(CreateMemTemp(Ty), HavePointer)); 1552 } else { 1553 llvm::Value *U = llvm::UndefValue::get(ConvertType(Arg->getType())); 1554 ArgVals.push_back(ValueAndIsPtr(U, HaveValue)); 1555 } 1556 1557 // Skip increment, no matching LLVM parameter. 1558 continue; 1559 } 1560 1561 ++AI; 1562 } 1563 1564 if (FI.usesInAlloca()) 1565 ++AI; 1566 assert(AI == Fn->arg_end() && "Argument mismatch!"); 1567 1568 if (getTarget().getCXXABI().areArgsDestroyedLeftToRightInCallee()) { 1569 for (int I = Args.size() - 1; I >= 0; --I) 1570 EmitParmDecl(*Args[I], ArgVals[I].getPointer(), ArgVals[I].getInt(), 1571 I + 1); 1572 } else { 1573 for (unsigned I = 0, E = Args.size(); I != E; ++I) 1574 EmitParmDecl(*Args[I], ArgVals[I].getPointer(), ArgVals[I].getInt(), 1575 I + 1); 1576 } 1577 } 1578 1579 static void eraseUnusedBitCasts(llvm::Instruction *insn) { 1580 while (insn->use_empty()) { 1581 llvm::BitCastInst *bitcast = dyn_cast<llvm::BitCastInst>(insn); 1582 if (!bitcast) return; 1583 1584 // This is "safe" because we would have used a ConstantExpr otherwise. 1585 insn = cast<llvm::Instruction>(bitcast->getOperand(0)); 1586 bitcast->eraseFromParent(); 1587 } 1588 } 1589 1590 /// Try to emit a fused autorelease of a return result. 1591 static llvm::Value *tryEmitFusedAutoreleaseOfResult(CodeGenFunction &CGF, 1592 llvm::Value *result) { 1593 // We must be immediately followed the cast. 1594 llvm::BasicBlock *BB = CGF.Builder.GetInsertBlock(); 1595 if (BB->empty()) return 0; 1596 if (&BB->back() != result) return 0; 1597 1598 llvm::Type *resultType = result->getType(); 1599 1600 // result is in a BasicBlock and is therefore an Instruction. 1601 llvm::Instruction *generator = cast<llvm::Instruction>(result); 1602 1603 SmallVector<llvm::Instruction*,4> insnsToKill; 1604 1605 // Look for: 1606 // %generator = bitcast %type1* %generator2 to %type2* 1607 while (llvm::BitCastInst *bitcast = dyn_cast<llvm::BitCastInst>(generator)) { 1608 // We would have emitted this as a constant if the operand weren't 1609 // an Instruction. 1610 generator = cast<llvm::Instruction>(bitcast->getOperand(0)); 1611 1612 // Require the generator to be immediately followed by the cast. 1613 if (generator->getNextNode() != bitcast) 1614 return 0; 1615 1616 insnsToKill.push_back(bitcast); 1617 } 1618 1619 // Look for: 1620 // %generator = call i8* @objc_retain(i8* %originalResult) 1621 // or 1622 // %generator = call i8* @objc_retainAutoreleasedReturnValue(i8* %originalResult) 1623 llvm::CallInst *call = dyn_cast<llvm::CallInst>(generator); 1624 if (!call) return 0; 1625 1626 bool doRetainAutorelease; 1627 1628 if (call->getCalledValue() == CGF.CGM.getARCEntrypoints().objc_retain) { 1629 doRetainAutorelease = true; 1630 } else if (call->getCalledValue() == CGF.CGM.getARCEntrypoints() 1631 .objc_retainAutoreleasedReturnValue) { 1632 doRetainAutorelease = false; 1633 1634 // If we emitted an assembly marker for this call (and the 1635 // ARCEntrypoints field should have been set if so), go looking 1636 // for that call. If we can't find it, we can't do this 1637 // optimization. But it should always be the immediately previous 1638 // instruction, unless we needed bitcasts around the call. 1639 if (CGF.CGM.getARCEntrypoints().retainAutoreleasedReturnValueMarker) { 1640 llvm::Instruction *prev = call->getPrevNode(); 1641 assert(prev); 1642 if (isa<llvm::BitCastInst>(prev)) { 1643 prev = prev->getPrevNode(); 1644 assert(prev); 1645 } 1646 assert(isa<llvm::CallInst>(prev)); 1647 assert(cast<llvm::CallInst>(prev)->getCalledValue() == 1648 CGF.CGM.getARCEntrypoints().retainAutoreleasedReturnValueMarker); 1649 insnsToKill.push_back(prev); 1650 } 1651 } else { 1652 return 0; 1653 } 1654 1655 result = call->getArgOperand(0); 1656 insnsToKill.push_back(call); 1657 1658 // Keep killing bitcasts, for sanity. Note that we no longer care 1659 // about precise ordering as long as there's exactly one use. 1660 while (llvm::BitCastInst *bitcast = dyn_cast<llvm::BitCastInst>(result)) { 1661 if (!bitcast->hasOneUse()) break; 1662 insnsToKill.push_back(bitcast); 1663 result = bitcast->getOperand(0); 1664 } 1665 1666 // Delete all the unnecessary instructions, from latest to earliest. 1667 for (SmallVectorImpl<llvm::Instruction*>::iterator 1668 i = insnsToKill.begin(), e = insnsToKill.end(); i != e; ++i) 1669 (*i)->eraseFromParent(); 1670 1671 // Do the fused retain/autorelease if we were asked to. 1672 if (doRetainAutorelease) 1673 result = CGF.EmitARCRetainAutoreleaseReturnValue(result); 1674 1675 // Cast back to the result type. 1676 return CGF.Builder.CreateBitCast(result, resultType); 1677 } 1678 1679 /// If this is a +1 of the value of an immutable 'self', remove it. 1680 static llvm::Value *tryRemoveRetainOfSelf(CodeGenFunction &CGF, 1681 llvm::Value *result) { 1682 // This is only applicable to a method with an immutable 'self'. 1683 const ObjCMethodDecl *method = 1684 dyn_cast_or_null<ObjCMethodDecl>(CGF.CurCodeDecl); 1685 if (!method) return 0; 1686 const VarDecl *self = method->getSelfDecl(); 1687 if (!self->getType().isConstQualified()) return 0; 1688 1689 // Look for a retain call. 1690 llvm::CallInst *retainCall = 1691 dyn_cast<llvm::CallInst>(result->stripPointerCasts()); 1692 if (!retainCall || 1693 retainCall->getCalledValue() != CGF.CGM.getARCEntrypoints().objc_retain) 1694 return 0; 1695 1696 // Look for an ordinary load of 'self'. 1697 llvm::Value *retainedValue = retainCall->getArgOperand(0); 1698 llvm::LoadInst *load = 1699 dyn_cast<llvm::LoadInst>(retainedValue->stripPointerCasts()); 1700 if (!load || load->isAtomic() || load->isVolatile() || 1701 load->getPointerOperand() != CGF.GetAddrOfLocalVar(self)) 1702 return 0; 1703 1704 // Okay! Burn it all down. This relies for correctness on the 1705 // assumption that the retain is emitted as part of the return and 1706 // that thereafter everything is used "linearly". 1707 llvm::Type *resultType = result->getType(); 1708 eraseUnusedBitCasts(cast<llvm::Instruction>(result)); 1709 assert(retainCall->use_empty()); 1710 retainCall->eraseFromParent(); 1711 eraseUnusedBitCasts(cast<llvm::Instruction>(retainedValue)); 1712 1713 return CGF.Builder.CreateBitCast(load, resultType); 1714 } 1715 1716 /// Emit an ARC autorelease of the result of a function. 1717 /// 1718 /// \return the value to actually return from the function 1719 static llvm::Value *emitAutoreleaseOfResult(CodeGenFunction &CGF, 1720 llvm::Value *result) { 1721 // If we're returning 'self', kill the initial retain. This is a 1722 // heuristic attempt to "encourage correctness" in the really unfortunate 1723 // case where we have a return of self during a dealloc and we desperately 1724 // need to avoid the possible autorelease. 1725 if (llvm::Value *self = tryRemoveRetainOfSelf(CGF, result)) 1726 return self; 1727 1728 // At -O0, try to emit a fused retain/autorelease. 1729 if (CGF.shouldUseFusedARCCalls()) 1730 if (llvm::Value *fused = tryEmitFusedAutoreleaseOfResult(CGF, result)) 1731 return fused; 1732 1733 return CGF.EmitARCAutoreleaseReturnValue(result); 1734 } 1735 1736 /// Heuristically search for a dominating store to the return-value slot. 1737 static llvm::StoreInst *findDominatingStoreToReturnValue(CodeGenFunction &CGF) { 1738 // If there are multiple uses of the return-value slot, just check 1739 // for something immediately preceding the IP. Sometimes this can 1740 // happen with how we generate implicit-returns; it can also happen 1741 // with noreturn cleanups. 1742 if (!CGF.ReturnValue->hasOneUse()) { 1743 llvm::BasicBlock *IP = CGF.Builder.GetInsertBlock(); 1744 if (IP->empty()) return 0; 1745 llvm::StoreInst *store = dyn_cast<llvm::StoreInst>(&IP->back()); 1746 if (!store) return 0; 1747 if (store->getPointerOperand() != CGF.ReturnValue) return 0; 1748 assert(!store->isAtomic() && !store->isVolatile()); // see below 1749 return store; 1750 } 1751 1752 llvm::StoreInst *store = 1753 dyn_cast<llvm::StoreInst>(CGF.ReturnValue->use_back()); 1754 if (!store) return 0; 1755 1756 // These aren't actually possible for non-coerced returns, and we 1757 // only care about non-coerced returns on this code path. 1758 assert(!store->isAtomic() && !store->isVolatile()); 1759 1760 // Now do a first-and-dirty dominance check: just walk up the 1761 // single-predecessors chain from the current insertion point. 1762 llvm::BasicBlock *StoreBB = store->getParent(); 1763 llvm::BasicBlock *IP = CGF.Builder.GetInsertBlock(); 1764 while (IP != StoreBB) { 1765 if (!(IP = IP->getSinglePredecessor())) 1766 return 0; 1767 } 1768 1769 // Okay, the store's basic block dominates the insertion point; we 1770 // can do our thing. 1771 return store; 1772 } 1773 1774 void CodeGenFunction::EmitFunctionEpilog(const CGFunctionInfo &FI, 1775 bool EmitRetDbgLoc, 1776 SourceLocation EndLoc) { 1777 // Functions with no result always return void. 1778 if (ReturnValue == 0) { 1779 Builder.CreateRetVoid(); 1780 return; 1781 } 1782 1783 llvm::DebugLoc RetDbgLoc; 1784 llvm::Value *RV = 0; 1785 QualType RetTy = FI.getReturnType(); 1786 const ABIArgInfo &RetAI = FI.getReturnInfo(); 1787 1788 switch (RetAI.getKind()) { 1789 case ABIArgInfo::InAlloca: 1790 // Aggregrates get evaluated directly into the destination. Sometimes we 1791 // need to return the sret value in a register, though. 1792 assert(hasAggregateEvaluationKind(RetTy)); 1793 if (RetAI.getInAllocaSRet()) { 1794 llvm::Function::arg_iterator EI = CurFn->arg_end(); 1795 --EI; 1796 llvm::Value *ArgStruct = EI; 1797 llvm::Value *SRet = 1798 Builder.CreateStructGEP(ArgStruct, RetAI.getInAllocaFieldIndex()); 1799 RV = Builder.CreateLoad(SRet, "sret"); 1800 } 1801 break; 1802 1803 case ABIArgInfo::Indirect: { 1804 switch (getEvaluationKind(RetTy)) { 1805 case TEK_Complex: { 1806 ComplexPairTy RT = 1807 EmitLoadOfComplex(MakeNaturalAlignAddrLValue(ReturnValue, RetTy), 1808 EndLoc); 1809 EmitStoreOfComplex(RT, 1810 MakeNaturalAlignAddrLValue(CurFn->arg_begin(), RetTy), 1811 /*isInit*/ true); 1812 break; 1813 } 1814 case TEK_Aggregate: 1815 // Do nothing; aggregrates get evaluated directly into the destination. 1816 break; 1817 case TEK_Scalar: 1818 EmitStoreOfScalar(Builder.CreateLoad(ReturnValue), 1819 MakeNaturalAlignAddrLValue(CurFn->arg_begin(), RetTy), 1820 /*isInit*/ true); 1821 break; 1822 } 1823 break; 1824 } 1825 1826 case ABIArgInfo::Extend: 1827 case ABIArgInfo::Direct: 1828 if (RetAI.getCoerceToType() == ConvertType(RetTy) && 1829 RetAI.getDirectOffset() == 0) { 1830 // The internal return value temp always will have pointer-to-return-type 1831 // type, just do a load. 1832 1833 // If there is a dominating store to ReturnValue, we can elide 1834 // the load, zap the store, and usually zap the alloca. 1835 if (llvm::StoreInst *SI = findDominatingStoreToReturnValue(*this)) { 1836 // Reuse the debug location from the store unless there is 1837 // cleanup code to be emitted between the store and return 1838 // instruction. 1839 if (EmitRetDbgLoc && !AutoreleaseResult) 1840 RetDbgLoc = SI->getDebugLoc(); 1841 // Get the stored value and nuke the now-dead store. 1842 RV = SI->getValueOperand(); 1843 SI->eraseFromParent(); 1844 1845 // If that was the only use of the return value, nuke it as well now. 1846 if (ReturnValue->use_empty() && isa<llvm::AllocaInst>(ReturnValue)) { 1847 cast<llvm::AllocaInst>(ReturnValue)->eraseFromParent(); 1848 ReturnValue = 0; 1849 } 1850 1851 // Otherwise, we have to do a simple load. 1852 } else { 1853 RV = Builder.CreateLoad(ReturnValue); 1854 } 1855 } else { 1856 llvm::Value *V = ReturnValue; 1857 // If the value is offset in memory, apply the offset now. 1858 if (unsigned Offs = RetAI.getDirectOffset()) { 1859 V = Builder.CreateBitCast(V, Builder.getInt8PtrTy()); 1860 V = Builder.CreateConstGEP1_32(V, Offs); 1861 V = Builder.CreateBitCast(V, 1862 llvm::PointerType::getUnqual(RetAI.getCoerceToType())); 1863 } 1864 1865 RV = CreateCoercedLoad(V, RetAI.getCoerceToType(), *this); 1866 } 1867 1868 // In ARC, end functions that return a retainable type with a call 1869 // to objc_autoreleaseReturnValue. 1870 if (AutoreleaseResult) { 1871 assert(getLangOpts().ObjCAutoRefCount && 1872 !FI.isReturnsRetained() && 1873 RetTy->isObjCRetainableType()); 1874 RV = emitAutoreleaseOfResult(*this, RV); 1875 } 1876 1877 break; 1878 1879 case ABIArgInfo::Ignore: 1880 break; 1881 1882 case ABIArgInfo::Expand: 1883 llvm_unreachable("Invalid ABI kind for return argument"); 1884 } 1885 1886 llvm::Instruction *Ret = RV ? Builder.CreateRet(RV) : Builder.CreateRetVoid(); 1887 if (!RetDbgLoc.isUnknown()) 1888 Ret->setDebugLoc(RetDbgLoc); 1889 } 1890 1891 static bool isInAllocaArgument(CGCXXABI &ABI, QualType type) { 1892 const CXXRecordDecl *RD = type->getAsCXXRecordDecl(); 1893 return RD && ABI.getRecordArgABI(RD) == CGCXXABI::RAA_DirectInMemory; 1894 } 1895 1896 static AggValueSlot createPlaceholderSlot(CodeGenFunction &CGF, QualType Ty) { 1897 // FIXME: Generate IR in one pass, rather than going back and fixing up these 1898 // placeholders. 1899 llvm::Type *IRTy = CGF.ConvertTypeForMem(Ty); 1900 llvm::Value *Placeholder = 1901 llvm::UndefValue::get(IRTy->getPointerTo()->getPointerTo()); 1902 Placeholder = CGF.Builder.CreateLoad(Placeholder); 1903 return AggValueSlot::forAddr(Placeholder, CharUnits::Zero(), 1904 Ty.getQualifiers(), 1905 AggValueSlot::IsNotDestructed, 1906 AggValueSlot::DoesNotNeedGCBarriers, 1907 AggValueSlot::IsNotAliased); 1908 } 1909 1910 void CodeGenFunction::EmitDelegateCallArg(CallArgList &args, 1911 const VarDecl *param, 1912 SourceLocation loc) { 1913 // StartFunction converted the ABI-lowered parameter(s) into a 1914 // local alloca. We need to turn that into an r-value suitable 1915 // for EmitCall. 1916 llvm::Value *local = GetAddrOfLocalVar(param); 1917 1918 QualType type = param->getType(); 1919 1920 // For the most part, we just need to load the alloca, except: 1921 // 1) aggregate r-values are actually pointers to temporaries, and 1922 // 2) references to non-scalars are pointers directly to the aggregate. 1923 // I don't know why references to scalars are different here. 1924 if (const ReferenceType *ref = type->getAs<ReferenceType>()) { 1925 if (!hasScalarEvaluationKind(ref->getPointeeType())) 1926 return args.add(RValue::getAggregate(local), type); 1927 1928 // Locals which are references to scalars are represented 1929 // with allocas holding the pointer. 1930 return args.add(RValue::get(Builder.CreateLoad(local)), type); 1931 } 1932 1933 if (isInAllocaArgument(CGM.getCXXABI(), type)) { 1934 AggValueSlot Slot = createPlaceholderSlot(*this, type); 1935 Slot.setExternallyDestructed(); 1936 1937 // FIXME: Either emit a copy constructor call, or figure out how to do 1938 // guaranteed tail calls with perfect forwarding in LLVM. 1939 CGM.ErrorUnsupported(param, "non-trivial argument copy for thunk"); 1940 EmitNullInitialization(Slot.getAddr(), type); 1941 1942 RValue RV = Slot.asRValue(); 1943 args.add(RV, type); 1944 return; 1945 } 1946 1947 args.add(convertTempToRValue(local, type, loc), type); 1948 } 1949 1950 static bool isProvablyNull(llvm::Value *addr) { 1951 return isa<llvm::ConstantPointerNull>(addr); 1952 } 1953 1954 static bool isProvablyNonNull(llvm::Value *addr) { 1955 return isa<llvm::AllocaInst>(addr); 1956 } 1957 1958 /// Emit the actual writing-back of a writeback. 1959 static void emitWriteback(CodeGenFunction &CGF, 1960 const CallArgList::Writeback &writeback) { 1961 const LValue &srcLV = writeback.Source; 1962 llvm::Value *srcAddr = srcLV.getAddress(); 1963 assert(!isProvablyNull(srcAddr) && 1964 "shouldn't have writeback for provably null argument"); 1965 1966 llvm::BasicBlock *contBB = 0; 1967 1968 // If the argument wasn't provably non-null, we need to null check 1969 // before doing the store. 1970 bool provablyNonNull = isProvablyNonNull(srcAddr); 1971 if (!provablyNonNull) { 1972 llvm::BasicBlock *writebackBB = CGF.createBasicBlock("icr.writeback"); 1973 contBB = CGF.createBasicBlock("icr.done"); 1974 1975 llvm::Value *isNull = CGF.Builder.CreateIsNull(srcAddr, "icr.isnull"); 1976 CGF.Builder.CreateCondBr(isNull, contBB, writebackBB); 1977 CGF.EmitBlock(writebackBB); 1978 } 1979 1980 // Load the value to writeback. 1981 llvm::Value *value = CGF.Builder.CreateLoad(writeback.Temporary); 1982 1983 // Cast it back, in case we're writing an id to a Foo* or something. 1984 value = CGF.Builder.CreateBitCast(value, 1985 cast<llvm::PointerType>(srcAddr->getType())->getElementType(), 1986 "icr.writeback-cast"); 1987 1988 // Perform the writeback. 1989 1990 // If we have a "to use" value, it's something we need to emit a use 1991 // of. This has to be carefully threaded in: if it's done after the 1992 // release it's potentially undefined behavior (and the optimizer 1993 // will ignore it), and if it happens before the retain then the 1994 // optimizer could move the release there. 1995 if (writeback.ToUse) { 1996 assert(srcLV.getObjCLifetime() == Qualifiers::OCL_Strong); 1997 1998 // Retain the new value. No need to block-copy here: the block's 1999 // being passed up the stack. 2000 value = CGF.EmitARCRetainNonBlock(value); 2001 2002 // Emit the intrinsic use here. 2003 CGF.EmitARCIntrinsicUse(writeback.ToUse); 2004 2005 // Load the old value (primitively). 2006 llvm::Value *oldValue = CGF.EmitLoadOfScalar(srcLV, SourceLocation()); 2007 2008 // Put the new value in place (primitively). 2009 CGF.EmitStoreOfScalar(value, srcLV, /*init*/ false); 2010 2011 // Release the old value. 2012 CGF.EmitARCRelease(oldValue, srcLV.isARCPreciseLifetime()); 2013 2014 // Otherwise, we can just do a normal lvalue store. 2015 } else { 2016 CGF.EmitStoreThroughLValue(RValue::get(value), srcLV); 2017 } 2018 2019 // Jump to the continuation block. 2020 if (!provablyNonNull) 2021 CGF.EmitBlock(contBB); 2022 } 2023 2024 static void emitWritebacks(CodeGenFunction &CGF, 2025 const CallArgList &args) { 2026 for (CallArgList::writeback_iterator 2027 i = args.writeback_begin(), e = args.writeback_end(); i != e; ++i) 2028 emitWriteback(CGF, *i); 2029 } 2030 2031 static void deactivateArgCleanupsBeforeCall(CodeGenFunction &CGF, 2032 const CallArgList &CallArgs) { 2033 assert(CGF.getTarget().getCXXABI().areArgsDestroyedLeftToRightInCallee()); 2034 ArrayRef<CallArgList::CallArgCleanup> Cleanups = 2035 CallArgs.getCleanupsToDeactivate(); 2036 // Iterate in reverse to increase the likelihood of popping the cleanup. 2037 for (ArrayRef<CallArgList::CallArgCleanup>::reverse_iterator 2038 I = Cleanups.rbegin(), E = Cleanups.rend(); I != E; ++I) { 2039 CGF.DeactivateCleanupBlock(I->Cleanup, I->IsActiveIP); 2040 I->IsActiveIP->eraseFromParent(); 2041 } 2042 } 2043 2044 static const Expr *maybeGetUnaryAddrOfOperand(const Expr *E) { 2045 if (const UnaryOperator *uop = dyn_cast<UnaryOperator>(E->IgnoreParens())) 2046 if (uop->getOpcode() == UO_AddrOf) 2047 return uop->getSubExpr(); 2048 return 0; 2049 } 2050 2051 /// Emit an argument that's being passed call-by-writeback. That is, 2052 /// we are passing the address of 2053 static void emitWritebackArg(CodeGenFunction &CGF, CallArgList &args, 2054 const ObjCIndirectCopyRestoreExpr *CRE) { 2055 LValue srcLV; 2056 2057 // Make an optimistic effort to emit the address as an l-value. 2058 // This can fail if the the argument expression is more complicated. 2059 if (const Expr *lvExpr = maybeGetUnaryAddrOfOperand(CRE->getSubExpr())) { 2060 srcLV = CGF.EmitLValue(lvExpr); 2061 2062 // Otherwise, just emit it as a scalar. 2063 } else { 2064 llvm::Value *srcAddr = CGF.EmitScalarExpr(CRE->getSubExpr()); 2065 2066 QualType srcAddrType = 2067 CRE->getSubExpr()->getType()->castAs<PointerType>()->getPointeeType(); 2068 srcLV = CGF.MakeNaturalAlignAddrLValue(srcAddr, srcAddrType); 2069 } 2070 llvm::Value *srcAddr = srcLV.getAddress(); 2071 2072 // The dest and src types don't necessarily match in LLVM terms 2073 // because of the crazy ObjC compatibility rules. 2074 2075 llvm::PointerType *destType = 2076 cast<llvm::PointerType>(CGF.ConvertType(CRE->getType())); 2077 2078 // If the address is a constant null, just pass the appropriate null. 2079 if (isProvablyNull(srcAddr)) { 2080 args.add(RValue::get(llvm::ConstantPointerNull::get(destType)), 2081 CRE->getType()); 2082 return; 2083 } 2084 2085 // Create the temporary. 2086 llvm::Value *temp = CGF.CreateTempAlloca(destType->getElementType(), 2087 "icr.temp"); 2088 // Loading an l-value can introduce a cleanup if the l-value is __weak, 2089 // and that cleanup will be conditional if we can't prove that the l-value 2090 // isn't null, so we need to register a dominating point so that the cleanups 2091 // system will make valid IR. 2092 CodeGenFunction::ConditionalEvaluation condEval(CGF); 2093 2094 // Zero-initialize it if we're not doing a copy-initialization. 2095 bool shouldCopy = CRE->shouldCopy(); 2096 if (!shouldCopy) { 2097 llvm::Value *null = 2098 llvm::ConstantPointerNull::get( 2099 cast<llvm::PointerType>(destType->getElementType())); 2100 CGF.Builder.CreateStore(null, temp); 2101 } 2102 2103 llvm::BasicBlock *contBB = 0; 2104 llvm::BasicBlock *originBB = 0; 2105 2106 // If the address is *not* known to be non-null, we need to switch. 2107 llvm::Value *finalArgument; 2108 2109 bool provablyNonNull = isProvablyNonNull(srcAddr); 2110 if (provablyNonNull) { 2111 finalArgument = temp; 2112 } else { 2113 llvm::Value *isNull = CGF.Builder.CreateIsNull(srcAddr, "icr.isnull"); 2114 2115 finalArgument = CGF.Builder.CreateSelect(isNull, 2116 llvm::ConstantPointerNull::get(destType), 2117 temp, "icr.argument"); 2118 2119 // If we need to copy, then the load has to be conditional, which 2120 // means we need control flow. 2121 if (shouldCopy) { 2122 originBB = CGF.Builder.GetInsertBlock(); 2123 contBB = CGF.createBasicBlock("icr.cont"); 2124 llvm::BasicBlock *copyBB = CGF.createBasicBlock("icr.copy"); 2125 CGF.Builder.CreateCondBr(isNull, contBB, copyBB); 2126 CGF.EmitBlock(copyBB); 2127 condEval.begin(CGF); 2128 } 2129 } 2130 2131 llvm::Value *valueToUse = 0; 2132 2133 // Perform a copy if necessary. 2134 if (shouldCopy) { 2135 RValue srcRV = CGF.EmitLoadOfLValue(srcLV, SourceLocation()); 2136 assert(srcRV.isScalar()); 2137 2138 llvm::Value *src = srcRV.getScalarVal(); 2139 src = CGF.Builder.CreateBitCast(src, destType->getElementType(), 2140 "icr.cast"); 2141 2142 // Use an ordinary store, not a store-to-lvalue. 2143 CGF.Builder.CreateStore(src, temp); 2144 2145 // If optimization is enabled, and the value was held in a 2146 // __strong variable, we need to tell the optimizer that this 2147 // value has to stay alive until we're doing the store back. 2148 // This is because the temporary is effectively unretained, 2149 // and so otherwise we can violate the high-level semantics. 2150 if (CGF.CGM.getCodeGenOpts().OptimizationLevel != 0 && 2151 srcLV.getObjCLifetime() == Qualifiers::OCL_Strong) { 2152 valueToUse = src; 2153 } 2154 } 2155 2156 // Finish the control flow if we needed it. 2157 if (shouldCopy && !provablyNonNull) { 2158 llvm::BasicBlock *copyBB = CGF.Builder.GetInsertBlock(); 2159 CGF.EmitBlock(contBB); 2160 2161 // Make a phi for the value to intrinsically use. 2162 if (valueToUse) { 2163 llvm::PHINode *phiToUse = CGF.Builder.CreatePHI(valueToUse->getType(), 2, 2164 "icr.to-use"); 2165 phiToUse->addIncoming(valueToUse, copyBB); 2166 phiToUse->addIncoming(llvm::UndefValue::get(valueToUse->getType()), 2167 originBB); 2168 valueToUse = phiToUse; 2169 } 2170 2171 condEval.end(CGF); 2172 } 2173 2174 args.addWriteback(srcLV, temp, valueToUse); 2175 args.add(RValue::get(finalArgument), CRE->getType()); 2176 } 2177 2178 void CallArgList::allocateArgumentMemory(CodeGenFunction &CGF) { 2179 assert(!StackBase && !StackCleanup.isValid()); 2180 2181 // Save the stack. 2182 llvm::Function *F = CGF.CGM.getIntrinsic(llvm::Intrinsic::stacksave); 2183 StackBase = CGF.Builder.CreateCall(F, "inalloca.save"); 2184 2185 // Control gets really tied up in landing pads, so we have to spill the 2186 // stacksave to an alloca to avoid violating SSA form. 2187 // TODO: This is dead if we never emit the cleanup. We should create the 2188 // alloca and store lazily on the first cleanup emission. 2189 StackBaseMem = CGF.CreateTempAlloca(CGF.Int8PtrTy, "inalloca.spmem"); 2190 CGF.Builder.CreateStore(StackBase, StackBaseMem); 2191 CGF.pushStackRestore(EHCleanup, StackBaseMem); 2192 StackCleanup = CGF.EHStack.getInnermostEHScope(); 2193 assert(StackCleanup.isValid()); 2194 } 2195 2196 void CallArgList::freeArgumentMemory(CodeGenFunction &CGF) const { 2197 if (StackBase) { 2198 CGF.DeactivateCleanupBlock(StackCleanup, StackBase); 2199 llvm::Value *F = CGF.CGM.getIntrinsic(llvm::Intrinsic::stackrestore); 2200 // We could load StackBase from StackBaseMem, but in the non-exceptional 2201 // case we can skip it. 2202 CGF.Builder.CreateCall(F, StackBase); 2203 } 2204 } 2205 2206 void CodeGenFunction::EmitCallArgs(CallArgList &Args, 2207 ArrayRef<QualType> ArgTypes, 2208 CallExpr::const_arg_iterator ArgBeg, 2209 CallExpr::const_arg_iterator ArgEnd, 2210 bool ForceColumnInfo) { 2211 CGDebugInfo *DI = getDebugInfo(); 2212 SourceLocation CallLoc; 2213 if (DI) CallLoc = DI->getLocation(); 2214 2215 // We *have* to evaluate arguments from right to left in the MS C++ ABI, 2216 // because arguments are destroyed left to right in the callee. 2217 if (CGM.getTarget().getCXXABI().areArgsDestroyedLeftToRightInCallee()) { 2218 // Insert a stack save if we're going to need any inalloca args. 2219 bool HasInAllocaArgs = false; 2220 for (ArrayRef<QualType>::iterator I = ArgTypes.begin(), E = ArgTypes.end(); 2221 I != E && !HasInAllocaArgs; ++I) 2222 HasInAllocaArgs = isInAllocaArgument(CGM.getCXXABI(), *I); 2223 if (HasInAllocaArgs) { 2224 assert(getTarget().getTriple().getArch() == llvm::Triple::x86); 2225 Args.allocateArgumentMemory(*this); 2226 } 2227 2228 // Evaluate each argument. 2229 size_t CallArgsStart = Args.size(); 2230 for (int I = ArgTypes.size() - 1; I >= 0; --I) { 2231 CallExpr::const_arg_iterator Arg = ArgBeg + I; 2232 EmitCallArg(Args, *Arg, ArgTypes[I]); 2233 // Restore the debug location. 2234 if (DI) DI->EmitLocation(Builder, CallLoc, ForceColumnInfo); 2235 } 2236 2237 // Un-reverse the arguments we just evaluated so they match up with the LLVM 2238 // IR function. 2239 std::reverse(Args.begin() + CallArgsStart, Args.end()); 2240 return; 2241 } 2242 2243 for (unsigned I = 0, E = ArgTypes.size(); I != E; ++I) { 2244 CallExpr::const_arg_iterator Arg = ArgBeg + I; 2245 assert(Arg != ArgEnd); 2246 EmitCallArg(Args, *Arg, ArgTypes[I]); 2247 // Restore the debug location. 2248 if (DI) DI->EmitLocation(Builder, CallLoc, ForceColumnInfo); 2249 } 2250 } 2251 2252 namespace { 2253 2254 struct DestroyUnpassedArg : EHScopeStack::Cleanup { 2255 DestroyUnpassedArg(llvm::Value *Addr, QualType Ty) 2256 : Addr(Addr), Ty(Ty) {} 2257 2258 llvm::Value *Addr; 2259 QualType Ty; 2260 2261 void Emit(CodeGenFunction &CGF, Flags flags) { 2262 const CXXDestructorDecl *Dtor = Ty->getAsCXXRecordDecl()->getDestructor(); 2263 assert(!Dtor->isTrivial()); 2264 CGF.EmitCXXDestructorCall(Dtor, Dtor_Complete, /*for vbase*/ false, 2265 /*Delegating=*/false, Addr); 2266 } 2267 }; 2268 2269 } 2270 2271 void CodeGenFunction::EmitCallArg(CallArgList &args, const Expr *E, 2272 QualType type) { 2273 if (const ObjCIndirectCopyRestoreExpr *CRE 2274 = dyn_cast<ObjCIndirectCopyRestoreExpr>(E)) { 2275 assert(getLangOpts().ObjCAutoRefCount); 2276 assert(getContext().hasSameType(E->getType(), type)); 2277 return emitWritebackArg(*this, args, CRE); 2278 } 2279 2280 assert(type->isReferenceType() == E->isGLValue() && 2281 "reference binding to unmaterialized r-value!"); 2282 2283 if (E->isGLValue()) { 2284 assert(E->getObjectKind() == OK_Ordinary); 2285 return args.add(EmitReferenceBindingToExpr(E), type); 2286 } 2287 2288 bool HasAggregateEvalKind = hasAggregateEvaluationKind(type); 2289 2290 // In the Microsoft C++ ABI, aggregate arguments are destructed by the callee. 2291 // However, we still have to push an EH-only cleanup in case we unwind before 2292 // we make it to the call. 2293 if (HasAggregateEvalKind && args.isUsingInAlloca()) { 2294 assert(getTarget().getTriple().getArch() == llvm::Triple::x86); 2295 AggValueSlot Slot = createPlaceholderSlot(*this, type); 2296 Slot.setExternallyDestructed(); 2297 EmitAggExpr(E, Slot); 2298 RValue RV = Slot.asRValue(); 2299 args.add(RV, type); 2300 2301 const CXXRecordDecl *RD = type->getAsCXXRecordDecl(); 2302 if (RD->hasNonTrivialDestructor()) { 2303 // Create a no-op GEP between the placeholder and the cleanup so we can 2304 // RAUW it successfully. It also serves as a marker of the first 2305 // instruction where the cleanup is active. 2306 pushFullExprCleanup<DestroyUnpassedArg>(EHCleanup, Slot.getAddr(), type); 2307 // This unreachable is a temporary marker which will be removed later. 2308 llvm::Instruction *IsActive = Builder.CreateUnreachable(); 2309 args.addArgCleanupDeactivation(EHStack.getInnermostEHScope(), IsActive); 2310 } 2311 return; 2312 } 2313 2314 if (HasAggregateEvalKind && isa<ImplicitCastExpr>(E) && 2315 cast<CastExpr>(E)->getCastKind() == CK_LValueToRValue) { 2316 LValue L = EmitLValue(cast<CastExpr>(E)->getSubExpr()); 2317 assert(L.isSimple()); 2318 if (L.getAlignment() >= getContext().getTypeAlignInChars(type)) { 2319 args.add(L.asAggregateRValue(), type, /*NeedsCopy*/true); 2320 } else { 2321 // We can't represent a misaligned lvalue in the CallArgList, so copy 2322 // to an aligned temporary now. 2323 llvm::Value *tmp = CreateMemTemp(type); 2324 EmitAggregateCopy(tmp, L.getAddress(), type, L.isVolatile(), 2325 L.getAlignment()); 2326 args.add(RValue::getAggregate(tmp), type); 2327 } 2328 return; 2329 } 2330 2331 args.add(EmitAnyExprToTemp(E), type); 2332 } 2333 2334 // In ObjC ARC mode with no ObjC ARC exception safety, tell the ARC 2335 // optimizer it can aggressively ignore unwind edges. 2336 void 2337 CodeGenFunction::AddObjCARCExceptionMetadata(llvm::Instruction *Inst) { 2338 if (CGM.getCodeGenOpts().OptimizationLevel != 0 && 2339 !CGM.getCodeGenOpts().ObjCAutoRefCountExceptions) 2340 Inst->setMetadata("clang.arc.no_objc_arc_exceptions", 2341 CGM.getNoObjCARCExceptionsMetadata()); 2342 } 2343 2344 /// Emits a call to the given no-arguments nounwind runtime function. 2345 llvm::CallInst * 2346 CodeGenFunction::EmitNounwindRuntimeCall(llvm::Value *callee, 2347 const llvm::Twine &name) { 2348 return EmitNounwindRuntimeCall(callee, ArrayRef<llvm::Value*>(), name); 2349 } 2350 2351 /// Emits a call to the given nounwind runtime function. 2352 llvm::CallInst * 2353 CodeGenFunction::EmitNounwindRuntimeCall(llvm::Value *callee, 2354 ArrayRef<llvm::Value*> args, 2355 const llvm::Twine &name) { 2356 llvm::CallInst *call = EmitRuntimeCall(callee, args, name); 2357 call->setDoesNotThrow(); 2358 return call; 2359 } 2360 2361 /// Emits a simple call (never an invoke) to the given no-arguments 2362 /// runtime function. 2363 llvm::CallInst * 2364 CodeGenFunction::EmitRuntimeCall(llvm::Value *callee, 2365 const llvm::Twine &name) { 2366 return EmitRuntimeCall(callee, ArrayRef<llvm::Value*>(), name); 2367 } 2368 2369 /// Emits a simple call (never an invoke) to the given runtime 2370 /// function. 2371 llvm::CallInst * 2372 CodeGenFunction::EmitRuntimeCall(llvm::Value *callee, 2373 ArrayRef<llvm::Value*> args, 2374 const llvm::Twine &name) { 2375 llvm::CallInst *call = Builder.CreateCall(callee, args, name); 2376 call->setCallingConv(getRuntimeCC()); 2377 return call; 2378 } 2379 2380 /// Emits a call or invoke to the given noreturn runtime function. 2381 void CodeGenFunction::EmitNoreturnRuntimeCallOrInvoke(llvm::Value *callee, 2382 ArrayRef<llvm::Value*> args) { 2383 if (getInvokeDest()) { 2384 llvm::InvokeInst *invoke = 2385 Builder.CreateInvoke(callee, 2386 getUnreachableBlock(), 2387 getInvokeDest(), 2388 args); 2389 invoke->setDoesNotReturn(); 2390 invoke->setCallingConv(getRuntimeCC()); 2391 } else { 2392 llvm::CallInst *call = Builder.CreateCall(callee, args); 2393 call->setDoesNotReturn(); 2394 call->setCallingConv(getRuntimeCC()); 2395 Builder.CreateUnreachable(); 2396 } 2397 PGO.setCurrentRegionUnreachable(); 2398 } 2399 2400 /// Emits a call or invoke instruction to the given nullary runtime 2401 /// function. 2402 llvm::CallSite 2403 CodeGenFunction::EmitRuntimeCallOrInvoke(llvm::Value *callee, 2404 const Twine &name) { 2405 return EmitRuntimeCallOrInvoke(callee, ArrayRef<llvm::Value*>(), name); 2406 } 2407 2408 /// Emits a call or invoke instruction to the given runtime function. 2409 llvm::CallSite 2410 CodeGenFunction::EmitRuntimeCallOrInvoke(llvm::Value *callee, 2411 ArrayRef<llvm::Value*> args, 2412 const Twine &name) { 2413 llvm::CallSite callSite = EmitCallOrInvoke(callee, args, name); 2414 callSite.setCallingConv(getRuntimeCC()); 2415 return callSite; 2416 } 2417 2418 llvm::CallSite 2419 CodeGenFunction::EmitCallOrInvoke(llvm::Value *Callee, 2420 const Twine &Name) { 2421 return EmitCallOrInvoke(Callee, ArrayRef<llvm::Value *>(), Name); 2422 } 2423 2424 /// Emits a call or invoke instruction to the given function, depending 2425 /// on the current state of the EH stack. 2426 llvm::CallSite 2427 CodeGenFunction::EmitCallOrInvoke(llvm::Value *Callee, 2428 ArrayRef<llvm::Value *> Args, 2429 const Twine &Name) { 2430 llvm::BasicBlock *InvokeDest = getInvokeDest(); 2431 2432 llvm::Instruction *Inst; 2433 if (!InvokeDest) 2434 Inst = Builder.CreateCall(Callee, Args, Name); 2435 else { 2436 llvm::BasicBlock *ContBB = createBasicBlock("invoke.cont"); 2437 Inst = Builder.CreateInvoke(Callee, ContBB, InvokeDest, Args, Name); 2438 EmitBlock(ContBB); 2439 } 2440 2441 // In ObjC ARC mode with no ObjC ARC exception safety, tell the ARC 2442 // optimizer it can aggressively ignore unwind edges. 2443 if (CGM.getLangOpts().ObjCAutoRefCount) 2444 AddObjCARCExceptionMetadata(Inst); 2445 2446 return Inst; 2447 } 2448 2449 static void checkArgMatches(llvm::Value *Elt, unsigned &ArgNo, 2450 llvm::FunctionType *FTy) { 2451 if (ArgNo < FTy->getNumParams()) 2452 assert(Elt->getType() == FTy->getParamType(ArgNo)); 2453 else 2454 assert(FTy->isVarArg()); 2455 ++ArgNo; 2456 } 2457 2458 void CodeGenFunction::ExpandTypeToArgs(QualType Ty, RValue RV, 2459 SmallVectorImpl<llvm::Value *> &Args, 2460 llvm::FunctionType *IRFuncTy) { 2461 if (const ConstantArrayType *AT = getContext().getAsConstantArrayType(Ty)) { 2462 unsigned NumElts = AT->getSize().getZExtValue(); 2463 QualType EltTy = AT->getElementType(); 2464 llvm::Value *Addr = RV.getAggregateAddr(); 2465 for (unsigned Elt = 0; Elt < NumElts; ++Elt) { 2466 llvm::Value *EltAddr = Builder.CreateConstGEP2_32(Addr, 0, Elt); 2467 RValue EltRV = convertTempToRValue(EltAddr, EltTy, SourceLocation()); 2468 ExpandTypeToArgs(EltTy, EltRV, Args, IRFuncTy); 2469 } 2470 } else if (const RecordType *RT = Ty->getAs<RecordType>()) { 2471 RecordDecl *RD = RT->getDecl(); 2472 assert(RV.isAggregate() && "Unexpected rvalue during struct expansion"); 2473 LValue LV = MakeAddrLValue(RV.getAggregateAddr(), Ty); 2474 2475 if (RD->isUnion()) { 2476 const FieldDecl *LargestFD = 0; 2477 CharUnits UnionSize = CharUnits::Zero(); 2478 2479 for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end(); 2480 i != e; ++i) { 2481 const FieldDecl *FD = *i; 2482 assert(!FD->isBitField() && 2483 "Cannot expand structure with bit-field members."); 2484 CharUnits FieldSize = getContext().getTypeSizeInChars(FD->getType()); 2485 if (UnionSize < FieldSize) { 2486 UnionSize = FieldSize; 2487 LargestFD = FD; 2488 } 2489 } 2490 if (LargestFD) { 2491 RValue FldRV = EmitRValueForField(LV, LargestFD, SourceLocation()); 2492 ExpandTypeToArgs(LargestFD->getType(), FldRV, Args, IRFuncTy); 2493 } 2494 } else { 2495 for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end(); 2496 i != e; ++i) { 2497 FieldDecl *FD = *i; 2498 2499 RValue FldRV = EmitRValueForField(LV, FD, SourceLocation()); 2500 ExpandTypeToArgs(FD->getType(), FldRV, Args, IRFuncTy); 2501 } 2502 } 2503 } else if (Ty->isAnyComplexType()) { 2504 ComplexPairTy CV = RV.getComplexVal(); 2505 Args.push_back(CV.first); 2506 Args.push_back(CV.second); 2507 } else { 2508 assert(RV.isScalar() && 2509 "Unexpected non-scalar rvalue during struct expansion."); 2510 2511 // Insert a bitcast as needed. 2512 llvm::Value *V = RV.getScalarVal(); 2513 if (Args.size() < IRFuncTy->getNumParams() && 2514 V->getType() != IRFuncTy->getParamType(Args.size())) 2515 V = Builder.CreateBitCast(V, IRFuncTy->getParamType(Args.size())); 2516 2517 Args.push_back(V); 2518 } 2519 } 2520 2521 /// \brief Store a non-aggregate value to an address to initialize it. For 2522 /// initialization, a non-atomic store will be used. 2523 static void EmitInitStoreOfNonAggregate(CodeGenFunction &CGF, RValue Src, 2524 LValue Dst) { 2525 if (Src.isScalar()) 2526 CGF.EmitStoreOfScalar(Src.getScalarVal(), Dst, /*init=*/true); 2527 else 2528 CGF.EmitStoreOfComplex(Src.getComplexVal(), Dst, /*init=*/true); 2529 } 2530 2531 void CodeGenFunction::deferPlaceholderReplacement(llvm::Instruction *Old, 2532 llvm::Value *New) { 2533 DeferredReplacements.push_back(std::make_pair(Old, New)); 2534 } 2535 2536 RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo, 2537 llvm::Value *Callee, 2538 ReturnValueSlot ReturnValue, 2539 const CallArgList &CallArgs, 2540 const Decl *TargetDecl, 2541 llvm::Instruction **callOrInvoke) { 2542 // FIXME: We no longer need the types from CallArgs; lift up and simplify. 2543 SmallVector<llvm::Value*, 16> Args; 2544 2545 // Handle struct-return functions by passing a pointer to the 2546 // location that we would like to return into. 2547 QualType RetTy = CallInfo.getReturnType(); 2548 const ABIArgInfo &RetAI = CallInfo.getReturnInfo(); 2549 2550 // IRArgNo - Keep track of the argument number in the callee we're looking at. 2551 unsigned IRArgNo = 0; 2552 llvm::FunctionType *IRFuncTy = 2553 cast<llvm::FunctionType>( 2554 cast<llvm::PointerType>(Callee->getType())->getElementType()); 2555 2556 // If we're using inalloca, insert the allocation after the stack save. 2557 // FIXME: Do this earlier rather than hacking it in here! 2558 llvm::Value *ArgMemory = 0; 2559 if (llvm::StructType *ArgStruct = CallInfo.getArgStruct()) { 2560 llvm::AllocaInst *AI = new llvm::AllocaInst( 2561 ArgStruct, "argmem", CallArgs.getStackBase()->getNextNode()); 2562 AI->setUsedWithInAlloca(true); 2563 assert(AI->isUsedWithInAlloca() && !AI->isStaticAlloca()); 2564 ArgMemory = AI; 2565 } 2566 2567 // If the call returns a temporary with struct return, create a temporary 2568 // alloca to hold the result, unless one is given to us. 2569 llvm::Value *SRetPtr = 0; 2570 if (CGM.ReturnTypeUsesSRet(CallInfo) || RetAI.isInAlloca()) { 2571 SRetPtr = ReturnValue.getValue(); 2572 if (!SRetPtr) 2573 SRetPtr = CreateMemTemp(RetTy); 2574 if (CGM.ReturnTypeUsesSRet(CallInfo)) { 2575 Args.push_back(SRetPtr); 2576 checkArgMatches(SRetPtr, IRArgNo, IRFuncTy); 2577 } else { 2578 llvm::Value *Addr = 2579 Builder.CreateStructGEP(ArgMemory, RetAI.getInAllocaFieldIndex()); 2580 Builder.CreateStore(SRetPtr, Addr); 2581 } 2582 } 2583 2584 assert(CallInfo.arg_size() == CallArgs.size() && 2585 "Mismatch between function signature & arguments."); 2586 CGFunctionInfo::const_arg_iterator info_it = CallInfo.arg_begin(); 2587 for (CallArgList::const_iterator I = CallArgs.begin(), E = CallArgs.end(); 2588 I != E; ++I, ++info_it) { 2589 const ABIArgInfo &ArgInfo = info_it->info; 2590 RValue RV = I->RV; 2591 2592 CharUnits TypeAlign = getContext().getTypeAlignInChars(I->Ty); 2593 2594 // Insert a padding argument to ensure proper alignment. 2595 if (llvm::Type *PaddingType = ArgInfo.getPaddingType()) { 2596 Args.push_back(llvm::UndefValue::get(PaddingType)); 2597 ++IRArgNo; 2598 } 2599 2600 switch (ArgInfo.getKind()) { 2601 case ABIArgInfo::InAlloca: { 2602 assert(getTarget().getTriple().getArch() == llvm::Triple::x86); 2603 if (RV.isAggregate()) { 2604 // Replace the placeholder with the appropriate argument slot GEP. 2605 llvm::Instruction *Placeholder = 2606 cast<llvm::Instruction>(RV.getAggregateAddr()); 2607 CGBuilderTy::InsertPoint IP = Builder.saveIP(); 2608 Builder.SetInsertPoint(Placeholder); 2609 llvm::Value *Addr = Builder.CreateStructGEP( 2610 ArgMemory, ArgInfo.getInAllocaFieldIndex()); 2611 Builder.restoreIP(IP); 2612 deferPlaceholderReplacement(Placeholder, Addr); 2613 } else { 2614 // Store the RValue into the argument struct. 2615 llvm::Value *Addr = 2616 Builder.CreateStructGEP(ArgMemory, ArgInfo.getInAllocaFieldIndex()); 2617 LValue argLV = MakeAddrLValue(Addr, I->Ty, TypeAlign); 2618 EmitInitStoreOfNonAggregate(*this, RV, argLV); 2619 } 2620 break; // Don't increment IRArgNo! 2621 } 2622 2623 case ABIArgInfo::Indirect: { 2624 if (RV.isScalar() || RV.isComplex()) { 2625 // Make a temporary alloca to pass the argument. 2626 llvm::AllocaInst *AI = CreateMemTemp(I->Ty); 2627 if (ArgInfo.getIndirectAlign() > AI->getAlignment()) 2628 AI->setAlignment(ArgInfo.getIndirectAlign()); 2629 Args.push_back(AI); 2630 2631 LValue argLV = MakeAddrLValue(Args.back(), I->Ty, TypeAlign); 2632 EmitInitStoreOfNonAggregate(*this, RV, argLV); 2633 2634 // Validate argument match. 2635 checkArgMatches(AI, IRArgNo, IRFuncTy); 2636 } else { 2637 // We want to avoid creating an unnecessary temporary+copy here; 2638 // however, we need one in three cases: 2639 // 1. If the argument is not byval, and we are required to copy the 2640 // source. (This case doesn't occur on any common architecture.) 2641 // 2. If the argument is byval, RV is not sufficiently aligned, and 2642 // we cannot force it to be sufficiently aligned. 2643 // 3. If the argument is byval, but RV is located in an address space 2644 // different than that of the argument (0). 2645 llvm::Value *Addr = RV.getAggregateAddr(); 2646 unsigned Align = ArgInfo.getIndirectAlign(); 2647 const llvm::DataLayout *TD = &CGM.getDataLayout(); 2648 const unsigned RVAddrSpace = Addr->getType()->getPointerAddressSpace(); 2649 const unsigned ArgAddrSpace = (IRArgNo < IRFuncTy->getNumParams() ? 2650 IRFuncTy->getParamType(IRArgNo)->getPointerAddressSpace() : 0); 2651 if ((!ArgInfo.getIndirectByVal() && I->NeedsCopy) || 2652 (ArgInfo.getIndirectByVal() && TypeAlign.getQuantity() < Align && 2653 llvm::getOrEnforceKnownAlignment(Addr, Align, TD) < Align) || 2654 (ArgInfo.getIndirectByVal() && (RVAddrSpace != ArgAddrSpace))) { 2655 // Create an aligned temporary, and copy to it. 2656 llvm::AllocaInst *AI = CreateMemTemp(I->Ty); 2657 if (Align > AI->getAlignment()) 2658 AI->setAlignment(Align); 2659 Args.push_back(AI); 2660 EmitAggregateCopy(AI, Addr, I->Ty, RV.isVolatileQualified()); 2661 2662 // Validate argument match. 2663 checkArgMatches(AI, IRArgNo, IRFuncTy); 2664 } else { 2665 // Skip the extra memcpy call. 2666 Args.push_back(Addr); 2667 2668 // Validate argument match. 2669 checkArgMatches(Addr, IRArgNo, IRFuncTy); 2670 } 2671 } 2672 break; 2673 } 2674 2675 case ABIArgInfo::Ignore: 2676 break; 2677 2678 case ABIArgInfo::Extend: 2679 case ABIArgInfo::Direct: { 2680 if (!isa<llvm::StructType>(ArgInfo.getCoerceToType()) && 2681 ArgInfo.getCoerceToType() == ConvertType(info_it->type) && 2682 ArgInfo.getDirectOffset() == 0) { 2683 llvm::Value *V; 2684 if (RV.isScalar()) 2685 V = RV.getScalarVal(); 2686 else 2687 V = Builder.CreateLoad(RV.getAggregateAddr()); 2688 2689 // If the argument doesn't match, perform a bitcast to coerce it. This 2690 // can happen due to trivial type mismatches. 2691 if (IRArgNo < IRFuncTy->getNumParams() && 2692 V->getType() != IRFuncTy->getParamType(IRArgNo)) 2693 V = Builder.CreateBitCast(V, IRFuncTy->getParamType(IRArgNo)); 2694 Args.push_back(V); 2695 2696 checkArgMatches(V, IRArgNo, IRFuncTy); 2697 break; 2698 } 2699 2700 // FIXME: Avoid the conversion through memory if possible. 2701 llvm::Value *SrcPtr; 2702 if (RV.isScalar() || RV.isComplex()) { 2703 SrcPtr = CreateMemTemp(I->Ty, "coerce"); 2704 LValue SrcLV = MakeAddrLValue(SrcPtr, I->Ty, TypeAlign); 2705 EmitInitStoreOfNonAggregate(*this, RV, SrcLV); 2706 } else 2707 SrcPtr = RV.getAggregateAddr(); 2708 2709 // If the value is offset in memory, apply the offset now. 2710 if (unsigned Offs = ArgInfo.getDirectOffset()) { 2711 SrcPtr = Builder.CreateBitCast(SrcPtr, Builder.getInt8PtrTy()); 2712 SrcPtr = Builder.CreateConstGEP1_32(SrcPtr, Offs); 2713 SrcPtr = Builder.CreateBitCast(SrcPtr, 2714 llvm::PointerType::getUnqual(ArgInfo.getCoerceToType())); 2715 2716 } 2717 2718 // If the coerce-to type is a first class aggregate, we flatten it and 2719 // pass the elements. Either way is semantically identical, but fast-isel 2720 // and the optimizer generally likes scalar values better than FCAs. 2721 if (llvm::StructType *STy = 2722 dyn_cast<llvm::StructType>(ArgInfo.getCoerceToType())) { 2723 llvm::Type *SrcTy = 2724 cast<llvm::PointerType>(SrcPtr->getType())->getElementType(); 2725 uint64_t SrcSize = CGM.getDataLayout().getTypeAllocSize(SrcTy); 2726 uint64_t DstSize = CGM.getDataLayout().getTypeAllocSize(STy); 2727 2728 // If the source type is smaller than the destination type of the 2729 // coerce-to logic, copy the source value into a temp alloca the size 2730 // of the destination type to allow loading all of it. The bits past 2731 // the source value are left undef. 2732 if (SrcSize < DstSize) { 2733 llvm::AllocaInst *TempAlloca 2734 = CreateTempAlloca(STy, SrcPtr->getName() + ".coerce"); 2735 Builder.CreateMemCpy(TempAlloca, SrcPtr, SrcSize, 0); 2736 SrcPtr = TempAlloca; 2737 } else { 2738 SrcPtr = Builder.CreateBitCast(SrcPtr, 2739 llvm::PointerType::getUnqual(STy)); 2740 } 2741 2742 for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) { 2743 llvm::Value *EltPtr = Builder.CreateConstGEP2_32(SrcPtr, 0, i); 2744 llvm::LoadInst *LI = Builder.CreateLoad(EltPtr); 2745 // We don't know what we're loading from. 2746 LI->setAlignment(1); 2747 Args.push_back(LI); 2748 2749 // Validate argument match. 2750 checkArgMatches(LI, IRArgNo, IRFuncTy); 2751 } 2752 } else { 2753 // In the simple case, just pass the coerced loaded value. 2754 Args.push_back(CreateCoercedLoad(SrcPtr, ArgInfo.getCoerceToType(), 2755 *this)); 2756 2757 // Validate argument match. 2758 checkArgMatches(Args.back(), IRArgNo, IRFuncTy); 2759 } 2760 2761 break; 2762 } 2763 2764 case ABIArgInfo::Expand: 2765 ExpandTypeToArgs(I->Ty, RV, Args, IRFuncTy); 2766 IRArgNo = Args.size(); 2767 break; 2768 } 2769 } 2770 2771 if (ArgMemory) { 2772 llvm::Value *Arg = ArgMemory; 2773 llvm::Type *LastParamTy = 2774 IRFuncTy->getParamType(IRFuncTy->getNumParams() - 1); 2775 if (Arg->getType() != LastParamTy) { 2776 #ifndef NDEBUG 2777 // Assert that these structs have equivalent element types. 2778 llvm::StructType *FullTy = CallInfo.getArgStruct(); 2779 llvm::StructType *Prefix = cast<llvm::StructType>( 2780 cast<llvm::PointerType>(LastParamTy)->getElementType()); 2781 2782 // For variadic functions, the caller might supply a larger struct than 2783 // the callee expects, and that's OK. 2784 assert(Prefix->getNumElements() == FullTy->getNumElements() || 2785 (CallInfo.isVariadic() && 2786 Prefix->getNumElements() <= FullTy->getNumElements())); 2787 2788 for (llvm::StructType::element_iterator PI = Prefix->element_begin(), 2789 PE = Prefix->element_end(), 2790 FI = FullTy->element_begin(); 2791 PI != PE; ++PI, ++FI) 2792 assert(*PI == *FI); 2793 #endif 2794 Arg = Builder.CreateBitCast(Arg, LastParamTy); 2795 } 2796 Args.push_back(Arg); 2797 } 2798 2799 if (!CallArgs.getCleanupsToDeactivate().empty()) 2800 deactivateArgCleanupsBeforeCall(*this, CallArgs); 2801 2802 // If the callee is a bitcast of a function to a varargs pointer to function 2803 // type, check to see if we can remove the bitcast. This handles some cases 2804 // with unprototyped functions. 2805 if (llvm::ConstantExpr *CE = dyn_cast<llvm::ConstantExpr>(Callee)) 2806 if (llvm::Function *CalleeF = dyn_cast<llvm::Function>(CE->getOperand(0))) { 2807 llvm::PointerType *CurPT=cast<llvm::PointerType>(Callee->getType()); 2808 llvm::FunctionType *CurFT = 2809 cast<llvm::FunctionType>(CurPT->getElementType()); 2810 llvm::FunctionType *ActualFT = CalleeF->getFunctionType(); 2811 2812 if (CE->getOpcode() == llvm::Instruction::BitCast && 2813 ActualFT->getReturnType() == CurFT->getReturnType() && 2814 ActualFT->getNumParams() == CurFT->getNumParams() && 2815 ActualFT->getNumParams() == Args.size() && 2816 (CurFT->isVarArg() || !ActualFT->isVarArg())) { 2817 bool ArgsMatch = true; 2818 for (unsigned i = 0, e = ActualFT->getNumParams(); i != e; ++i) 2819 if (ActualFT->getParamType(i) != CurFT->getParamType(i)) { 2820 ArgsMatch = false; 2821 break; 2822 } 2823 2824 // Strip the cast if we can get away with it. This is a nice cleanup, 2825 // but also allows us to inline the function at -O0 if it is marked 2826 // always_inline. 2827 if (ArgsMatch) 2828 Callee = CalleeF; 2829 } 2830 } 2831 2832 unsigned CallingConv; 2833 CodeGen::AttributeListType AttributeList; 2834 CGM.ConstructAttributeList(CallInfo, TargetDecl, AttributeList, 2835 CallingConv, true); 2836 llvm::AttributeSet Attrs = llvm::AttributeSet::get(getLLVMContext(), 2837 AttributeList); 2838 2839 llvm::BasicBlock *InvokeDest = 0; 2840 if (!Attrs.hasAttribute(llvm::AttributeSet::FunctionIndex, 2841 llvm::Attribute::NoUnwind)) 2842 InvokeDest = getInvokeDest(); 2843 2844 llvm::CallSite CS; 2845 if (!InvokeDest) { 2846 CS = Builder.CreateCall(Callee, Args); 2847 } else { 2848 llvm::BasicBlock *Cont = createBasicBlock("invoke.cont"); 2849 CS = Builder.CreateInvoke(Callee, Cont, InvokeDest, Args); 2850 EmitBlock(Cont); 2851 } 2852 if (callOrInvoke) 2853 *callOrInvoke = CS.getInstruction(); 2854 2855 CS.setAttributes(Attrs); 2856 CS.setCallingConv(static_cast<llvm::CallingConv::ID>(CallingConv)); 2857 2858 // In ObjC ARC mode with no ObjC ARC exception safety, tell the ARC 2859 // optimizer it can aggressively ignore unwind edges. 2860 if (CGM.getLangOpts().ObjCAutoRefCount) 2861 AddObjCARCExceptionMetadata(CS.getInstruction()); 2862 2863 // If the call doesn't return, finish the basic block and clear the 2864 // insertion point; this allows the rest of IRgen to discard 2865 // unreachable code. 2866 if (CS.doesNotReturn()) { 2867 Builder.CreateUnreachable(); 2868 Builder.ClearInsertionPoint(); 2869 2870 // FIXME: For now, emit a dummy basic block because expr emitters in 2871 // generally are not ready to handle emitting expressions at unreachable 2872 // points. 2873 EnsureInsertPoint(); 2874 2875 // Return a reasonable RValue. 2876 return GetUndefRValue(RetTy); 2877 } 2878 2879 llvm::Instruction *CI = CS.getInstruction(); 2880 if (Builder.isNamePreserving() && !CI->getType()->isVoidTy()) 2881 CI->setName("call"); 2882 2883 // Emit any writebacks immediately. Arguably this should happen 2884 // after any return-value munging. 2885 if (CallArgs.hasWritebacks()) 2886 emitWritebacks(*this, CallArgs); 2887 2888 // The stack cleanup for inalloca arguments has to run out of the normal 2889 // lexical order, so deactivate it and run it manually here. 2890 CallArgs.freeArgumentMemory(*this); 2891 2892 switch (RetAI.getKind()) { 2893 case ABIArgInfo::InAlloca: 2894 case ABIArgInfo::Indirect: 2895 return convertTempToRValue(SRetPtr, RetTy, SourceLocation()); 2896 2897 case ABIArgInfo::Ignore: 2898 // If we are ignoring an argument that had a result, make sure to 2899 // construct the appropriate return value for our caller. 2900 return GetUndefRValue(RetTy); 2901 2902 case ABIArgInfo::Extend: 2903 case ABIArgInfo::Direct: { 2904 llvm::Type *RetIRTy = ConvertType(RetTy); 2905 if (RetAI.getCoerceToType() == RetIRTy && RetAI.getDirectOffset() == 0) { 2906 switch (getEvaluationKind(RetTy)) { 2907 case TEK_Complex: { 2908 llvm::Value *Real = Builder.CreateExtractValue(CI, 0); 2909 llvm::Value *Imag = Builder.CreateExtractValue(CI, 1); 2910 return RValue::getComplex(std::make_pair(Real, Imag)); 2911 } 2912 case TEK_Aggregate: { 2913 llvm::Value *DestPtr = ReturnValue.getValue(); 2914 bool DestIsVolatile = ReturnValue.isVolatile(); 2915 2916 if (!DestPtr) { 2917 DestPtr = CreateMemTemp(RetTy, "agg.tmp"); 2918 DestIsVolatile = false; 2919 } 2920 BuildAggStore(*this, CI, DestPtr, DestIsVolatile, false); 2921 return RValue::getAggregate(DestPtr); 2922 } 2923 case TEK_Scalar: { 2924 // If the argument doesn't match, perform a bitcast to coerce it. This 2925 // can happen due to trivial type mismatches. 2926 llvm::Value *V = CI; 2927 if (V->getType() != RetIRTy) 2928 V = Builder.CreateBitCast(V, RetIRTy); 2929 return RValue::get(V); 2930 } 2931 } 2932 llvm_unreachable("bad evaluation kind"); 2933 } 2934 2935 llvm::Value *DestPtr = ReturnValue.getValue(); 2936 bool DestIsVolatile = ReturnValue.isVolatile(); 2937 2938 if (!DestPtr) { 2939 DestPtr = CreateMemTemp(RetTy, "coerce"); 2940 DestIsVolatile = false; 2941 } 2942 2943 // If the value is offset in memory, apply the offset now. 2944 llvm::Value *StorePtr = DestPtr; 2945 if (unsigned Offs = RetAI.getDirectOffset()) { 2946 StorePtr = Builder.CreateBitCast(StorePtr, Builder.getInt8PtrTy()); 2947 StorePtr = Builder.CreateConstGEP1_32(StorePtr, Offs); 2948 StorePtr = Builder.CreateBitCast(StorePtr, 2949 llvm::PointerType::getUnqual(RetAI.getCoerceToType())); 2950 } 2951 CreateCoercedStore(CI, StorePtr, DestIsVolatile, *this); 2952 2953 return convertTempToRValue(DestPtr, RetTy, SourceLocation()); 2954 } 2955 2956 case ABIArgInfo::Expand: 2957 llvm_unreachable("Invalid ABI kind for return argument"); 2958 } 2959 2960 llvm_unreachable("Unhandled ABIArgInfo::Kind"); 2961 } 2962 2963 /* VarArg handling */ 2964 2965 llvm::Value *CodeGenFunction::EmitVAArg(llvm::Value *VAListAddr, QualType Ty) { 2966 return CGM.getTypes().getABIInfo().EmitVAArg(VAListAddr, Ty, *this); 2967 } 2968