1 //===--- CGCall.cpp - Encapsulate calling convention details ----*- C++ -*-===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // These classes wrap the information about a call or function 11 // definition used to handle ABI compliancy. 12 // 13 //===----------------------------------------------------------------------===// 14 15 #include "CGCall.h" 16 #include "CGCXXABI.h" 17 #include "ABIInfo.h" 18 #include "CodeGenFunction.h" 19 #include "CodeGenModule.h" 20 #include "TargetInfo.h" 21 #include "clang/Basic/TargetInfo.h" 22 #include "clang/AST/Decl.h" 23 #include "clang/AST/DeclCXX.h" 24 #include "clang/AST/DeclObjC.h" 25 #include "clang/Frontend/CodeGenOptions.h" 26 #include "llvm/Attributes.h" 27 #include "llvm/Support/CallSite.h" 28 #include "llvm/Target/TargetData.h" 29 #include "llvm/InlineAsm.h" 30 #include "llvm/Transforms/Utils/Local.h" 31 using namespace clang; 32 using namespace CodeGen; 33 34 /***/ 35 36 static unsigned ClangCallConvToLLVMCallConv(CallingConv CC) { 37 switch (CC) { 38 default: return llvm::CallingConv::C; 39 case CC_X86StdCall: return llvm::CallingConv::X86_StdCall; 40 case CC_X86FastCall: return llvm::CallingConv::X86_FastCall; 41 case CC_X86ThisCall: return llvm::CallingConv::X86_ThisCall; 42 case CC_AAPCS: return llvm::CallingConv::ARM_AAPCS; 43 case CC_AAPCS_VFP: return llvm::CallingConv::ARM_AAPCS_VFP; 44 // TODO: add support for CC_X86Pascal to llvm 45 } 46 } 47 48 /// Derives the 'this' type for codegen purposes, i.e. ignoring method 49 /// qualification. 50 /// FIXME: address space qualification? 51 static CanQualType GetThisType(ASTContext &Context, const CXXRecordDecl *RD) { 52 QualType RecTy = Context.getTagDeclType(RD)->getCanonicalTypeInternal(); 53 return Context.getPointerType(CanQualType::CreateUnsafe(RecTy)); 54 } 55 56 /// Returns the canonical formal type of the given C++ method. 57 static CanQual<FunctionProtoType> GetFormalType(const CXXMethodDecl *MD) { 58 return MD->getType()->getCanonicalTypeUnqualified() 59 .getAs<FunctionProtoType>(); 60 } 61 62 /// Returns the "extra-canonicalized" return type, which discards 63 /// qualifiers on the return type. Codegen doesn't care about them, 64 /// and it makes ABI code a little easier to be able to assume that 65 /// all parameter and return types are top-level unqualified. 66 static CanQualType GetReturnType(QualType RetTy) { 67 return RetTy->getCanonicalTypeUnqualified().getUnqualifiedType(); 68 } 69 70 /// Arrange the argument and result information for a value of the 71 /// given unprototyped function type. 72 const CGFunctionInfo & 73 CodeGenTypes::arrangeFunctionType(CanQual<FunctionNoProtoType> FTNP) { 74 // When translating an unprototyped function type, always use a 75 // variadic type. 76 return arrangeFunctionType(FTNP->getResultType().getUnqualifiedType(), 77 ArrayRef<CanQualType>(), 78 FTNP->getExtInfo(), 79 RequiredArgs(0)); 80 } 81 82 /// Arrange the argument and result information for a value of the 83 /// given function type, on top of any implicit parameters already 84 /// stored. 85 static const CGFunctionInfo &arrangeFunctionType(CodeGenTypes &CGT, 86 SmallVectorImpl<CanQualType> &argTypes, 87 CanQual<FunctionProtoType> FTP) { 88 RequiredArgs required = RequiredArgs::forPrototypePlus(FTP, argTypes.size()); 89 // FIXME: Kill copy. 90 for (unsigned i = 0, e = FTP->getNumArgs(); i != e; ++i) 91 argTypes.push_back(FTP->getArgType(i)); 92 CanQualType resultType = FTP->getResultType().getUnqualifiedType(); 93 return CGT.arrangeFunctionType(resultType, argTypes, 94 FTP->getExtInfo(), required); 95 } 96 97 /// Arrange the argument and result information for a value of the 98 /// given function type. 99 const CGFunctionInfo & 100 CodeGenTypes::arrangeFunctionType(CanQual<FunctionProtoType> FTP) { 101 SmallVector<CanQualType, 16> argTypes; 102 return ::arrangeFunctionType(*this, argTypes, FTP); 103 } 104 105 static CallingConv getCallingConventionForDecl(const Decl *D) { 106 // Set the appropriate calling convention for the Function. 107 if (D->hasAttr<StdCallAttr>()) 108 return CC_X86StdCall; 109 110 if (D->hasAttr<FastCallAttr>()) 111 return CC_X86FastCall; 112 113 if (D->hasAttr<ThisCallAttr>()) 114 return CC_X86ThisCall; 115 116 if (D->hasAttr<PascalAttr>()) 117 return CC_X86Pascal; 118 119 if (PcsAttr *PCS = D->getAttr<PcsAttr>()) 120 return (PCS->getPCS() == PcsAttr::AAPCS ? CC_AAPCS : CC_AAPCS_VFP); 121 122 return CC_C; 123 } 124 125 /// Arrange the argument and result information for a call to an 126 /// unknown C++ non-static member function of the given abstract type. 127 /// The member function must be an ordinary function, i.e. not a 128 /// constructor or destructor. 129 const CGFunctionInfo & 130 CodeGenTypes::arrangeCXXMethodType(const CXXRecordDecl *RD, 131 const FunctionProtoType *FTP) { 132 SmallVector<CanQualType, 16> argTypes; 133 134 // Add the 'this' pointer. 135 argTypes.push_back(GetThisType(Context, RD)); 136 137 return ::arrangeFunctionType(*this, argTypes, 138 FTP->getCanonicalTypeUnqualified().getAs<FunctionProtoType>()); 139 } 140 141 /// Arrange the argument and result information for a declaration or 142 /// definition of the given C++ non-static member function. The 143 /// member function must be an ordinary function, i.e. not a 144 /// constructor or destructor. 145 const CGFunctionInfo & 146 CodeGenTypes::arrangeCXXMethodDeclaration(const CXXMethodDecl *MD) { 147 assert(!isa<CXXConstructorDecl>(MD) && "wrong method for contructors!"); 148 assert(!isa<CXXDestructorDecl>(MD) && "wrong method for destructors!"); 149 150 CanQual<FunctionProtoType> prototype = GetFormalType(MD); 151 152 if (MD->isInstance()) { 153 // The abstract case is perfectly fine. 154 return arrangeCXXMethodType(MD->getParent(), prototype.getTypePtr()); 155 } 156 157 return arrangeFunctionType(prototype); 158 } 159 160 /// Arrange the argument and result information for a declaration 161 /// or definition to the given constructor variant. 162 const CGFunctionInfo & 163 CodeGenTypes::arrangeCXXConstructorDeclaration(const CXXConstructorDecl *D, 164 CXXCtorType ctorKind) { 165 SmallVector<CanQualType, 16> argTypes; 166 argTypes.push_back(GetThisType(Context, D->getParent())); 167 CanQualType resultType = Context.VoidTy; 168 169 TheCXXABI.BuildConstructorSignature(D, ctorKind, resultType, argTypes); 170 171 CanQual<FunctionProtoType> FTP = GetFormalType(D); 172 173 RequiredArgs required = RequiredArgs::forPrototypePlus(FTP, argTypes.size()); 174 175 // Add the formal parameters. 176 for (unsigned i = 0, e = FTP->getNumArgs(); i != e; ++i) 177 argTypes.push_back(FTP->getArgType(i)); 178 179 return arrangeFunctionType(resultType, argTypes, FTP->getExtInfo(), required); 180 } 181 182 /// Arrange the argument and result information for a declaration, 183 /// definition, or call to the given destructor variant. It so 184 /// happens that all three cases produce the same information. 185 const CGFunctionInfo & 186 CodeGenTypes::arrangeCXXDestructor(const CXXDestructorDecl *D, 187 CXXDtorType dtorKind) { 188 SmallVector<CanQualType, 2> argTypes; 189 argTypes.push_back(GetThisType(Context, D->getParent())); 190 CanQualType resultType = Context.VoidTy; 191 192 TheCXXABI.BuildDestructorSignature(D, dtorKind, resultType, argTypes); 193 194 CanQual<FunctionProtoType> FTP = GetFormalType(D); 195 assert(FTP->getNumArgs() == 0 && "dtor with formal parameters"); 196 197 return arrangeFunctionType(resultType, argTypes, FTP->getExtInfo(), 198 RequiredArgs::All); 199 } 200 201 /// Arrange the argument and result information for the declaration or 202 /// definition of the given function. 203 const CGFunctionInfo & 204 CodeGenTypes::arrangeFunctionDeclaration(const FunctionDecl *FD) { 205 if (const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(FD)) 206 if (MD->isInstance()) 207 return arrangeCXXMethodDeclaration(MD); 208 209 CanQualType FTy = FD->getType()->getCanonicalTypeUnqualified(); 210 211 assert(isa<FunctionType>(FTy)); 212 213 // When declaring a function without a prototype, always use a 214 // non-variadic type. 215 if (isa<FunctionNoProtoType>(FTy)) { 216 CanQual<FunctionNoProtoType> noProto = FTy.getAs<FunctionNoProtoType>(); 217 return arrangeFunctionType(noProto->getResultType(), 218 ArrayRef<CanQualType>(), 219 noProto->getExtInfo(), 220 RequiredArgs::All); 221 } 222 223 assert(isa<FunctionProtoType>(FTy)); 224 return arrangeFunctionType(FTy.getAs<FunctionProtoType>()); 225 } 226 227 /// Arrange the argument and result information for the declaration or 228 /// definition of an Objective-C method. 229 const CGFunctionInfo & 230 CodeGenTypes::arrangeObjCMethodDeclaration(const ObjCMethodDecl *MD) { 231 // It happens that this is the same as a call with no optional 232 // arguments, except also using the formal 'self' type. 233 return arrangeObjCMessageSendSignature(MD, MD->getSelfDecl()->getType()); 234 } 235 236 /// Arrange the argument and result information for the function type 237 /// through which to perform a send to the given Objective-C method, 238 /// using the given receiver type. The receiver type is not always 239 /// the 'self' type of the method or even an Objective-C pointer type. 240 /// This is *not* the right method for actually performing such a 241 /// message send, due to the possibility of optional arguments. 242 const CGFunctionInfo & 243 CodeGenTypes::arrangeObjCMessageSendSignature(const ObjCMethodDecl *MD, 244 QualType receiverType) { 245 SmallVector<CanQualType, 16> argTys; 246 argTys.push_back(Context.getCanonicalParamType(receiverType)); 247 argTys.push_back(Context.getCanonicalParamType(Context.getObjCSelType())); 248 // FIXME: Kill copy? 249 for (ObjCMethodDecl::param_const_iterator i = MD->param_begin(), 250 e = MD->param_end(); i != e; ++i) { 251 argTys.push_back(Context.getCanonicalParamType((*i)->getType())); 252 } 253 254 FunctionType::ExtInfo einfo; 255 einfo = einfo.withCallingConv(getCallingConventionForDecl(MD)); 256 257 if (getContext().getLangOptions().ObjCAutoRefCount && 258 MD->hasAttr<NSReturnsRetainedAttr>()) 259 einfo = einfo.withProducesResult(true); 260 261 RequiredArgs required = 262 (MD->isVariadic() ? RequiredArgs(argTys.size()) : RequiredArgs::All); 263 264 return arrangeFunctionType(GetReturnType(MD->getResultType()), argTys, 265 einfo, required); 266 } 267 268 const CGFunctionInfo & 269 CodeGenTypes::arrangeGlobalDeclaration(GlobalDecl GD) { 270 // FIXME: Do we need to handle ObjCMethodDecl? 271 const FunctionDecl *FD = cast<FunctionDecl>(GD.getDecl()); 272 273 if (const CXXConstructorDecl *CD = dyn_cast<CXXConstructorDecl>(FD)) 274 return arrangeCXXConstructorDeclaration(CD, GD.getCtorType()); 275 276 if (const CXXDestructorDecl *DD = dyn_cast<CXXDestructorDecl>(FD)) 277 return arrangeCXXDestructor(DD, GD.getDtorType()); 278 279 return arrangeFunctionDeclaration(FD); 280 } 281 282 /// Figure out the rules for calling a function with the given formal 283 /// type using the given arguments. The arguments are necessary 284 /// because the function might be unprototyped, in which case it's 285 /// target-dependent in crazy ways. 286 const CGFunctionInfo & 287 CodeGenTypes::arrangeFunctionCall(const CallArgList &args, 288 const FunctionType *fnType) { 289 RequiredArgs required = RequiredArgs::All; 290 if (const FunctionProtoType *proto = dyn_cast<FunctionProtoType>(fnType)) { 291 if (proto->isVariadic()) 292 required = RequiredArgs(proto->getNumArgs()); 293 } else if (CGM.getTargetCodeGenInfo() 294 .isNoProtoCallVariadic(args, cast<FunctionNoProtoType>(fnType))) { 295 required = RequiredArgs(0); 296 } 297 298 return arrangeFunctionCall(fnType->getResultType(), args, 299 fnType->getExtInfo(), required); 300 } 301 302 const CGFunctionInfo & 303 CodeGenTypes::arrangeFunctionCall(QualType resultType, 304 const CallArgList &args, 305 const FunctionType::ExtInfo &info, 306 RequiredArgs required) { 307 // FIXME: Kill copy. 308 SmallVector<CanQualType, 16> argTypes; 309 for (CallArgList::const_iterator i = args.begin(), e = args.end(); 310 i != e; ++i) 311 argTypes.push_back(Context.getCanonicalParamType(i->Ty)); 312 return arrangeFunctionType(GetReturnType(resultType), argTypes, info, 313 required); 314 } 315 316 const CGFunctionInfo & 317 CodeGenTypes::arrangeFunctionDeclaration(QualType resultType, 318 const FunctionArgList &args, 319 const FunctionType::ExtInfo &info, 320 bool isVariadic) { 321 // FIXME: Kill copy. 322 SmallVector<CanQualType, 16> argTypes; 323 for (FunctionArgList::const_iterator i = args.begin(), e = args.end(); 324 i != e; ++i) 325 argTypes.push_back(Context.getCanonicalParamType((*i)->getType())); 326 327 RequiredArgs required = 328 (isVariadic ? RequiredArgs(args.size()) : RequiredArgs::All); 329 return arrangeFunctionType(GetReturnType(resultType), argTypes, info, 330 required); 331 } 332 333 const CGFunctionInfo &CodeGenTypes::arrangeNullaryFunction() { 334 return arrangeFunctionType(getContext().VoidTy, ArrayRef<CanQualType>(), 335 FunctionType::ExtInfo(), RequiredArgs::All); 336 } 337 338 /// Arrange the argument and result information for an abstract value 339 /// of a given function type. This is the method which all of the 340 /// above functions ultimately defer to. 341 const CGFunctionInfo & 342 CodeGenTypes::arrangeFunctionType(CanQualType resultType, 343 ArrayRef<CanQualType> argTypes, 344 const FunctionType::ExtInfo &info, 345 RequiredArgs required) { 346 #ifndef NDEBUG 347 for (ArrayRef<CanQualType>::const_iterator 348 I = argTypes.begin(), E = argTypes.end(); I != E; ++I) 349 assert(I->isCanonicalAsParam()); 350 #endif 351 352 unsigned CC = ClangCallConvToLLVMCallConv(info.getCC()); 353 354 // Lookup or create unique function info. 355 llvm::FoldingSetNodeID ID; 356 CGFunctionInfo::Profile(ID, info, required, resultType, argTypes); 357 358 void *insertPos = 0; 359 CGFunctionInfo *FI = FunctionInfos.FindNodeOrInsertPos(ID, insertPos); 360 if (FI) 361 return *FI; 362 363 // Construct the function info. We co-allocate the ArgInfos. 364 FI = CGFunctionInfo::create(CC, info, resultType, argTypes, required); 365 FunctionInfos.InsertNode(FI, insertPos); 366 367 bool inserted = FunctionsBeingProcessed.insert(FI); (void)inserted; 368 assert(inserted && "Recursively being processed?"); 369 370 // Compute ABI information. 371 getABIInfo().computeInfo(*FI); 372 373 // Loop over all of the computed argument and return value info. If any of 374 // them are direct or extend without a specified coerce type, specify the 375 // default now. 376 ABIArgInfo &retInfo = FI->getReturnInfo(); 377 if (retInfo.canHaveCoerceToType() && retInfo.getCoerceToType() == 0) 378 retInfo.setCoerceToType(ConvertType(FI->getReturnType())); 379 380 for (CGFunctionInfo::arg_iterator I = FI->arg_begin(), E = FI->arg_end(); 381 I != E; ++I) 382 if (I->info.canHaveCoerceToType() && I->info.getCoerceToType() == 0) 383 I->info.setCoerceToType(ConvertType(I->type)); 384 385 bool erased = FunctionsBeingProcessed.erase(FI); (void)erased; 386 assert(erased && "Not in set?"); 387 388 return *FI; 389 } 390 391 CGFunctionInfo *CGFunctionInfo::create(unsigned llvmCC, 392 const FunctionType::ExtInfo &info, 393 CanQualType resultType, 394 ArrayRef<CanQualType> argTypes, 395 RequiredArgs required) { 396 void *buffer = operator new(sizeof(CGFunctionInfo) + 397 sizeof(ArgInfo) * (argTypes.size() + 1)); 398 CGFunctionInfo *FI = new(buffer) CGFunctionInfo(); 399 FI->CallingConvention = llvmCC; 400 FI->EffectiveCallingConvention = llvmCC; 401 FI->ASTCallingConvention = info.getCC(); 402 FI->NoReturn = info.getNoReturn(); 403 FI->ReturnsRetained = info.getProducesResult(); 404 FI->Required = required; 405 FI->HasRegParm = info.getHasRegParm(); 406 FI->RegParm = info.getRegParm(); 407 FI->NumArgs = argTypes.size(); 408 FI->getArgsBuffer()[0].type = resultType; 409 for (unsigned i = 0, e = argTypes.size(); i != e; ++i) 410 FI->getArgsBuffer()[i + 1].type = argTypes[i]; 411 return FI; 412 } 413 414 /***/ 415 416 void CodeGenTypes::GetExpandedTypes(QualType type, 417 SmallVectorImpl<llvm::Type*> &expandedTypes) { 418 if (const ConstantArrayType *AT = Context.getAsConstantArrayType(type)) { 419 uint64_t NumElts = AT->getSize().getZExtValue(); 420 for (uint64_t Elt = 0; Elt < NumElts; ++Elt) 421 GetExpandedTypes(AT->getElementType(), expandedTypes); 422 } else if (const RecordType *RT = type->getAsStructureType()) { 423 const RecordDecl *RD = RT->getDecl(); 424 assert(!RD->hasFlexibleArrayMember() && 425 "Cannot expand structure with flexible array."); 426 for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end(); 427 i != e; ++i) { 428 const FieldDecl *FD = *i; 429 assert(!FD->isBitField() && 430 "Cannot expand structure with bit-field members."); 431 GetExpandedTypes(FD->getType(), expandedTypes); 432 } 433 } else if (const ComplexType *CT = type->getAs<ComplexType>()) { 434 llvm::Type *EltTy = ConvertType(CT->getElementType()); 435 expandedTypes.push_back(EltTy); 436 expandedTypes.push_back(EltTy); 437 } else 438 expandedTypes.push_back(ConvertType(type)); 439 } 440 441 llvm::Function::arg_iterator 442 CodeGenFunction::ExpandTypeFromArgs(QualType Ty, LValue LV, 443 llvm::Function::arg_iterator AI) { 444 assert(LV.isSimple() && 445 "Unexpected non-simple lvalue during struct expansion."); 446 llvm::Value *Addr = LV.getAddress(); 447 448 if (const ConstantArrayType *AT = getContext().getAsConstantArrayType(Ty)) { 449 unsigned NumElts = AT->getSize().getZExtValue(); 450 QualType EltTy = AT->getElementType(); 451 for (unsigned Elt = 0; Elt < NumElts; ++Elt) { 452 llvm::Value *EltAddr = Builder.CreateConstGEP2_32(Addr, 0, Elt); 453 LValue LV = MakeAddrLValue(EltAddr, EltTy); 454 AI = ExpandTypeFromArgs(EltTy, LV, AI); 455 } 456 } else if (const RecordType *RT = Ty->getAsStructureType()) { 457 RecordDecl *RD = RT->getDecl(); 458 for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end(); 459 i != e; ++i) { 460 FieldDecl *FD = *i; 461 QualType FT = FD->getType(); 462 463 // FIXME: What are the right qualifiers here? 464 LValue LV = EmitLValueForField(Addr, FD, 0); 465 AI = ExpandTypeFromArgs(FT, LV, AI); 466 } 467 } else if (const ComplexType *CT = Ty->getAs<ComplexType>()) { 468 QualType EltTy = CT->getElementType(); 469 llvm::Value *RealAddr = Builder.CreateStructGEP(Addr, 0, "real"); 470 EmitStoreThroughLValue(RValue::get(AI++), MakeAddrLValue(RealAddr, EltTy)); 471 llvm::Value *ImagAddr = Builder.CreateStructGEP(Addr, 1, "imag"); 472 EmitStoreThroughLValue(RValue::get(AI++), MakeAddrLValue(ImagAddr, EltTy)); 473 } else { 474 EmitStoreThroughLValue(RValue::get(AI), LV); 475 ++AI; 476 } 477 478 return AI; 479 } 480 481 /// EnterStructPointerForCoercedAccess - Given a struct pointer that we are 482 /// accessing some number of bytes out of it, try to gep into the struct to get 483 /// at its inner goodness. Dive as deep as possible without entering an element 484 /// with an in-memory size smaller than DstSize. 485 static llvm::Value * 486 EnterStructPointerForCoercedAccess(llvm::Value *SrcPtr, 487 llvm::StructType *SrcSTy, 488 uint64_t DstSize, CodeGenFunction &CGF) { 489 // We can't dive into a zero-element struct. 490 if (SrcSTy->getNumElements() == 0) return SrcPtr; 491 492 llvm::Type *FirstElt = SrcSTy->getElementType(0); 493 494 // If the first elt is at least as large as what we're looking for, or if the 495 // first element is the same size as the whole struct, we can enter it. 496 uint64_t FirstEltSize = 497 CGF.CGM.getTargetData().getTypeAllocSize(FirstElt); 498 if (FirstEltSize < DstSize && 499 FirstEltSize < CGF.CGM.getTargetData().getTypeAllocSize(SrcSTy)) 500 return SrcPtr; 501 502 // GEP into the first element. 503 SrcPtr = CGF.Builder.CreateConstGEP2_32(SrcPtr, 0, 0, "coerce.dive"); 504 505 // If the first element is a struct, recurse. 506 llvm::Type *SrcTy = 507 cast<llvm::PointerType>(SrcPtr->getType())->getElementType(); 508 if (llvm::StructType *SrcSTy = dyn_cast<llvm::StructType>(SrcTy)) 509 return EnterStructPointerForCoercedAccess(SrcPtr, SrcSTy, DstSize, CGF); 510 511 return SrcPtr; 512 } 513 514 /// CoerceIntOrPtrToIntOrPtr - Convert a value Val to the specific Ty where both 515 /// are either integers or pointers. This does a truncation of the value if it 516 /// is too large or a zero extension if it is too small. 517 static llvm::Value *CoerceIntOrPtrToIntOrPtr(llvm::Value *Val, 518 llvm::Type *Ty, 519 CodeGenFunction &CGF) { 520 if (Val->getType() == Ty) 521 return Val; 522 523 if (isa<llvm::PointerType>(Val->getType())) { 524 // If this is Pointer->Pointer avoid conversion to and from int. 525 if (isa<llvm::PointerType>(Ty)) 526 return CGF.Builder.CreateBitCast(Val, Ty, "coerce.val"); 527 528 // Convert the pointer to an integer so we can play with its width. 529 Val = CGF.Builder.CreatePtrToInt(Val, CGF.IntPtrTy, "coerce.val.pi"); 530 } 531 532 llvm::Type *DestIntTy = Ty; 533 if (isa<llvm::PointerType>(DestIntTy)) 534 DestIntTy = CGF.IntPtrTy; 535 536 if (Val->getType() != DestIntTy) 537 Val = CGF.Builder.CreateIntCast(Val, DestIntTy, false, "coerce.val.ii"); 538 539 if (isa<llvm::PointerType>(Ty)) 540 Val = CGF.Builder.CreateIntToPtr(Val, Ty, "coerce.val.ip"); 541 return Val; 542 } 543 544 545 546 /// CreateCoercedLoad - Create a load from \arg SrcPtr interpreted as 547 /// a pointer to an object of type \arg Ty. 548 /// 549 /// This safely handles the case when the src type is smaller than the 550 /// destination type; in this situation the values of bits which not 551 /// present in the src are undefined. 552 static llvm::Value *CreateCoercedLoad(llvm::Value *SrcPtr, 553 llvm::Type *Ty, 554 CodeGenFunction &CGF) { 555 llvm::Type *SrcTy = 556 cast<llvm::PointerType>(SrcPtr->getType())->getElementType(); 557 558 // If SrcTy and Ty are the same, just do a load. 559 if (SrcTy == Ty) 560 return CGF.Builder.CreateLoad(SrcPtr); 561 562 uint64_t DstSize = CGF.CGM.getTargetData().getTypeAllocSize(Ty); 563 564 if (llvm::StructType *SrcSTy = dyn_cast<llvm::StructType>(SrcTy)) { 565 SrcPtr = EnterStructPointerForCoercedAccess(SrcPtr, SrcSTy, DstSize, CGF); 566 SrcTy = cast<llvm::PointerType>(SrcPtr->getType())->getElementType(); 567 } 568 569 uint64_t SrcSize = CGF.CGM.getTargetData().getTypeAllocSize(SrcTy); 570 571 // If the source and destination are integer or pointer types, just do an 572 // extension or truncation to the desired type. 573 if ((isa<llvm::IntegerType>(Ty) || isa<llvm::PointerType>(Ty)) && 574 (isa<llvm::IntegerType>(SrcTy) || isa<llvm::PointerType>(SrcTy))) { 575 llvm::LoadInst *Load = CGF.Builder.CreateLoad(SrcPtr); 576 return CoerceIntOrPtrToIntOrPtr(Load, Ty, CGF); 577 } 578 579 // If load is legal, just bitcast the src pointer. 580 if (SrcSize >= DstSize) { 581 // Generally SrcSize is never greater than DstSize, since this means we are 582 // losing bits. However, this can happen in cases where the structure has 583 // additional padding, for example due to a user specified alignment. 584 // 585 // FIXME: Assert that we aren't truncating non-padding bits when have access 586 // to that information. 587 llvm::Value *Casted = 588 CGF.Builder.CreateBitCast(SrcPtr, llvm::PointerType::getUnqual(Ty)); 589 llvm::LoadInst *Load = CGF.Builder.CreateLoad(Casted); 590 // FIXME: Use better alignment / avoid requiring aligned load. 591 Load->setAlignment(1); 592 return Load; 593 } 594 595 // Otherwise do coercion through memory. This is stupid, but 596 // simple. 597 llvm::Value *Tmp = CGF.CreateTempAlloca(Ty); 598 llvm::Value *Casted = 599 CGF.Builder.CreateBitCast(Tmp, llvm::PointerType::getUnqual(SrcTy)); 600 llvm::StoreInst *Store = 601 CGF.Builder.CreateStore(CGF.Builder.CreateLoad(SrcPtr), Casted); 602 // FIXME: Use better alignment / avoid requiring aligned store. 603 Store->setAlignment(1); 604 return CGF.Builder.CreateLoad(Tmp); 605 } 606 607 // Function to store a first-class aggregate into memory. We prefer to 608 // store the elements rather than the aggregate to be more friendly to 609 // fast-isel. 610 // FIXME: Do we need to recurse here? 611 static void BuildAggStore(CodeGenFunction &CGF, llvm::Value *Val, 612 llvm::Value *DestPtr, bool DestIsVolatile, 613 bool LowAlignment) { 614 // Prefer scalar stores to first-class aggregate stores. 615 if (llvm::StructType *STy = 616 dyn_cast<llvm::StructType>(Val->getType())) { 617 for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) { 618 llvm::Value *EltPtr = CGF.Builder.CreateConstGEP2_32(DestPtr, 0, i); 619 llvm::Value *Elt = CGF.Builder.CreateExtractValue(Val, i); 620 llvm::StoreInst *SI = CGF.Builder.CreateStore(Elt, EltPtr, 621 DestIsVolatile); 622 if (LowAlignment) 623 SI->setAlignment(1); 624 } 625 } else { 626 CGF.Builder.CreateStore(Val, DestPtr, DestIsVolatile); 627 } 628 } 629 630 /// CreateCoercedStore - Create a store to \arg DstPtr from \arg Src, 631 /// where the source and destination may have different types. 632 /// 633 /// This safely handles the case when the src type is larger than the 634 /// destination type; the upper bits of the src will be lost. 635 static void CreateCoercedStore(llvm::Value *Src, 636 llvm::Value *DstPtr, 637 bool DstIsVolatile, 638 CodeGenFunction &CGF) { 639 llvm::Type *SrcTy = Src->getType(); 640 llvm::Type *DstTy = 641 cast<llvm::PointerType>(DstPtr->getType())->getElementType(); 642 if (SrcTy == DstTy) { 643 CGF.Builder.CreateStore(Src, DstPtr, DstIsVolatile); 644 return; 645 } 646 647 uint64_t SrcSize = CGF.CGM.getTargetData().getTypeAllocSize(SrcTy); 648 649 if (llvm::StructType *DstSTy = dyn_cast<llvm::StructType>(DstTy)) { 650 DstPtr = EnterStructPointerForCoercedAccess(DstPtr, DstSTy, SrcSize, CGF); 651 DstTy = cast<llvm::PointerType>(DstPtr->getType())->getElementType(); 652 } 653 654 // If the source and destination are integer or pointer types, just do an 655 // extension or truncation to the desired type. 656 if ((isa<llvm::IntegerType>(SrcTy) || isa<llvm::PointerType>(SrcTy)) && 657 (isa<llvm::IntegerType>(DstTy) || isa<llvm::PointerType>(DstTy))) { 658 Src = CoerceIntOrPtrToIntOrPtr(Src, DstTy, CGF); 659 CGF.Builder.CreateStore(Src, DstPtr, DstIsVolatile); 660 return; 661 } 662 663 uint64_t DstSize = CGF.CGM.getTargetData().getTypeAllocSize(DstTy); 664 665 // If store is legal, just bitcast the src pointer. 666 if (SrcSize <= DstSize) { 667 llvm::Value *Casted = 668 CGF.Builder.CreateBitCast(DstPtr, llvm::PointerType::getUnqual(SrcTy)); 669 // FIXME: Use better alignment / avoid requiring aligned store. 670 BuildAggStore(CGF, Src, Casted, DstIsVolatile, true); 671 } else { 672 // Otherwise do coercion through memory. This is stupid, but 673 // simple. 674 675 // Generally SrcSize is never greater than DstSize, since this means we are 676 // losing bits. However, this can happen in cases where the structure has 677 // additional padding, for example due to a user specified alignment. 678 // 679 // FIXME: Assert that we aren't truncating non-padding bits when have access 680 // to that information. 681 llvm::Value *Tmp = CGF.CreateTempAlloca(SrcTy); 682 CGF.Builder.CreateStore(Src, Tmp); 683 llvm::Value *Casted = 684 CGF.Builder.CreateBitCast(Tmp, llvm::PointerType::getUnqual(DstTy)); 685 llvm::LoadInst *Load = CGF.Builder.CreateLoad(Casted); 686 // FIXME: Use better alignment / avoid requiring aligned load. 687 Load->setAlignment(1); 688 CGF.Builder.CreateStore(Load, DstPtr, DstIsVolatile); 689 } 690 } 691 692 /***/ 693 694 bool CodeGenModule::ReturnTypeUsesSRet(const CGFunctionInfo &FI) { 695 return FI.getReturnInfo().isIndirect(); 696 } 697 698 bool CodeGenModule::ReturnTypeUsesFPRet(QualType ResultType) { 699 if (const BuiltinType *BT = ResultType->getAs<BuiltinType>()) { 700 switch (BT->getKind()) { 701 default: 702 return false; 703 case BuiltinType::Float: 704 return getContext().getTargetInfo().useObjCFPRetForRealType(TargetInfo::Float); 705 case BuiltinType::Double: 706 return getContext().getTargetInfo().useObjCFPRetForRealType(TargetInfo::Double); 707 case BuiltinType::LongDouble: 708 return getContext().getTargetInfo().useObjCFPRetForRealType( 709 TargetInfo::LongDouble); 710 } 711 } 712 713 return false; 714 } 715 716 bool CodeGenModule::ReturnTypeUsesFP2Ret(QualType ResultType) { 717 if (const ComplexType *CT = ResultType->getAs<ComplexType>()) { 718 if (const BuiltinType *BT = CT->getElementType()->getAs<BuiltinType>()) { 719 if (BT->getKind() == BuiltinType::LongDouble) 720 return getContext().getTargetInfo().useObjCFP2RetForComplexLongDouble(); 721 } 722 } 723 724 return false; 725 } 726 727 llvm::FunctionType *CodeGenTypes::GetFunctionType(GlobalDecl GD) { 728 const CGFunctionInfo &FI = arrangeGlobalDeclaration(GD); 729 return GetFunctionType(FI); 730 } 731 732 llvm::FunctionType * 733 CodeGenTypes::GetFunctionType(const CGFunctionInfo &FI) { 734 735 bool Inserted = FunctionsBeingProcessed.insert(&FI); (void)Inserted; 736 assert(Inserted && "Recursively being processed?"); 737 738 SmallVector<llvm::Type*, 8> argTypes; 739 llvm::Type *resultType = 0; 740 741 const ABIArgInfo &retAI = FI.getReturnInfo(); 742 switch (retAI.getKind()) { 743 case ABIArgInfo::Expand: 744 llvm_unreachable("Invalid ABI kind for return argument"); 745 746 case ABIArgInfo::Extend: 747 case ABIArgInfo::Direct: 748 resultType = retAI.getCoerceToType(); 749 break; 750 751 case ABIArgInfo::Indirect: { 752 assert(!retAI.getIndirectAlign() && "Align unused on indirect return."); 753 resultType = llvm::Type::getVoidTy(getLLVMContext()); 754 755 QualType ret = FI.getReturnType(); 756 llvm::Type *ty = ConvertType(ret); 757 unsigned addressSpace = Context.getTargetAddressSpace(ret); 758 argTypes.push_back(llvm::PointerType::get(ty, addressSpace)); 759 break; 760 } 761 762 case ABIArgInfo::Ignore: 763 resultType = llvm::Type::getVoidTy(getLLVMContext()); 764 break; 765 } 766 767 for (CGFunctionInfo::const_arg_iterator it = FI.arg_begin(), 768 ie = FI.arg_end(); it != ie; ++it) { 769 const ABIArgInfo &argAI = it->info; 770 771 switch (argAI.getKind()) { 772 case ABIArgInfo::Ignore: 773 break; 774 775 case ABIArgInfo::Indirect: { 776 // indirect arguments are always on the stack, which is addr space #0. 777 llvm::Type *LTy = ConvertTypeForMem(it->type); 778 argTypes.push_back(LTy->getPointerTo()); 779 break; 780 } 781 782 case ABIArgInfo::Extend: 783 case ABIArgInfo::Direct: { 784 // Insert a padding type to ensure proper alignment. 785 if (llvm::Type *PaddingType = argAI.getPaddingType()) 786 argTypes.push_back(PaddingType); 787 // If the coerce-to type is a first class aggregate, flatten it. Either 788 // way is semantically identical, but fast-isel and the optimizer 789 // generally likes scalar values better than FCAs. 790 llvm::Type *argType = argAI.getCoerceToType(); 791 if (llvm::StructType *st = dyn_cast<llvm::StructType>(argType)) { 792 for (unsigned i = 0, e = st->getNumElements(); i != e; ++i) 793 argTypes.push_back(st->getElementType(i)); 794 } else { 795 argTypes.push_back(argType); 796 } 797 break; 798 } 799 800 case ABIArgInfo::Expand: 801 GetExpandedTypes(it->type, argTypes); 802 break; 803 } 804 } 805 806 bool Erased = FunctionsBeingProcessed.erase(&FI); (void)Erased; 807 assert(Erased && "Not in set?"); 808 809 return llvm::FunctionType::get(resultType, argTypes, FI.isVariadic()); 810 } 811 812 llvm::Type *CodeGenTypes::GetFunctionTypeForVTable(GlobalDecl GD) { 813 const CXXMethodDecl *MD = cast<CXXMethodDecl>(GD.getDecl()); 814 const FunctionProtoType *FPT = MD->getType()->getAs<FunctionProtoType>(); 815 816 if (!isFuncTypeConvertible(FPT)) 817 return llvm::StructType::get(getLLVMContext()); 818 819 const CGFunctionInfo *Info; 820 if (isa<CXXDestructorDecl>(MD)) 821 Info = &arrangeCXXDestructor(cast<CXXDestructorDecl>(MD), GD.getDtorType()); 822 else 823 Info = &arrangeCXXMethodDeclaration(MD); 824 return GetFunctionType(*Info); 825 } 826 827 void CodeGenModule::ConstructAttributeList(const CGFunctionInfo &FI, 828 const Decl *TargetDecl, 829 AttributeListType &PAL, 830 unsigned &CallingConv) { 831 llvm::Attributes FuncAttrs; 832 llvm::Attributes RetAttrs; 833 834 CallingConv = FI.getEffectiveCallingConvention(); 835 836 if (FI.isNoReturn()) 837 FuncAttrs |= llvm::Attribute::NoReturn; 838 839 // FIXME: handle sseregparm someday... 840 if (TargetDecl) { 841 if (TargetDecl->hasAttr<ReturnsTwiceAttr>()) 842 FuncAttrs |= llvm::Attribute::ReturnsTwice; 843 if (TargetDecl->hasAttr<NoThrowAttr>()) 844 FuncAttrs |= llvm::Attribute::NoUnwind; 845 else if (const FunctionDecl *Fn = dyn_cast<FunctionDecl>(TargetDecl)) { 846 const FunctionProtoType *FPT = Fn->getType()->getAs<FunctionProtoType>(); 847 if (FPT && FPT->isNothrow(getContext())) 848 FuncAttrs |= llvm::Attribute::NoUnwind; 849 } 850 851 if (TargetDecl->hasAttr<NoReturnAttr>()) 852 FuncAttrs |= llvm::Attribute::NoReturn; 853 854 if (TargetDecl->hasAttr<ReturnsTwiceAttr>()) 855 FuncAttrs |= llvm::Attribute::ReturnsTwice; 856 857 // 'const' and 'pure' attribute functions are also nounwind. 858 if (TargetDecl->hasAttr<ConstAttr>()) { 859 FuncAttrs |= llvm::Attribute::ReadNone; 860 FuncAttrs |= llvm::Attribute::NoUnwind; 861 } else if (TargetDecl->hasAttr<PureAttr>()) { 862 FuncAttrs |= llvm::Attribute::ReadOnly; 863 FuncAttrs |= llvm::Attribute::NoUnwind; 864 } 865 if (TargetDecl->hasAttr<MallocAttr>()) 866 RetAttrs |= llvm::Attribute::NoAlias; 867 } 868 869 if (CodeGenOpts.OptimizeSize) 870 FuncAttrs |= llvm::Attribute::OptimizeForSize; 871 if (CodeGenOpts.DisableRedZone) 872 FuncAttrs |= llvm::Attribute::NoRedZone; 873 if (CodeGenOpts.NoImplicitFloat) 874 FuncAttrs |= llvm::Attribute::NoImplicitFloat; 875 876 QualType RetTy = FI.getReturnType(); 877 unsigned Index = 1; 878 const ABIArgInfo &RetAI = FI.getReturnInfo(); 879 switch (RetAI.getKind()) { 880 case ABIArgInfo::Extend: 881 if (RetTy->hasSignedIntegerRepresentation()) 882 RetAttrs |= llvm::Attribute::SExt; 883 else if (RetTy->hasUnsignedIntegerRepresentation()) 884 RetAttrs |= llvm::Attribute::ZExt; 885 break; 886 case ABIArgInfo::Direct: 887 case ABIArgInfo::Ignore: 888 break; 889 890 case ABIArgInfo::Indirect: 891 PAL.push_back(llvm::AttributeWithIndex::get(Index, 892 llvm::Attribute::StructRet)); 893 ++Index; 894 // sret disables readnone and readonly 895 FuncAttrs &= ~(llvm::Attribute::ReadOnly | 896 llvm::Attribute::ReadNone); 897 break; 898 899 case ABIArgInfo::Expand: 900 llvm_unreachable("Invalid ABI kind for return argument"); 901 } 902 903 if (RetAttrs) 904 PAL.push_back(llvm::AttributeWithIndex::get(0, RetAttrs)); 905 906 // FIXME: RegParm should be reduced in case of global register variable. 907 signed RegParm; 908 if (FI.getHasRegParm()) 909 RegParm = FI.getRegParm(); 910 else 911 RegParm = CodeGenOpts.NumRegisterParameters; 912 913 unsigned PointerWidth = getContext().getTargetInfo().getPointerWidth(0); 914 for (CGFunctionInfo::const_arg_iterator it = FI.arg_begin(), 915 ie = FI.arg_end(); it != ie; ++it) { 916 QualType ParamType = it->type; 917 const ABIArgInfo &AI = it->info; 918 llvm::Attributes Attrs; 919 920 // 'restrict' -> 'noalias' is done in EmitFunctionProlog when we 921 // have the corresponding parameter variable. It doesn't make 922 // sense to do it here because parameters are so messed up. 923 switch (AI.getKind()) { 924 case ABIArgInfo::Extend: 925 if (ParamType->isSignedIntegerOrEnumerationType()) 926 Attrs |= llvm::Attribute::SExt; 927 else if (ParamType->isUnsignedIntegerOrEnumerationType()) 928 Attrs |= llvm::Attribute::ZExt; 929 // FALL THROUGH 930 case ABIArgInfo::Direct: 931 if (RegParm > 0 && 932 (ParamType->isIntegerType() || ParamType->isPointerType() || 933 ParamType->isReferenceType())) { 934 RegParm -= 935 (Context.getTypeSize(ParamType) + PointerWidth - 1) / PointerWidth; 936 if (RegParm >= 0) 937 Attrs |= llvm::Attribute::InReg; 938 } 939 // FIXME: handle sseregparm someday... 940 941 // Increment Index if there is padding. 942 Index += (AI.getPaddingType() != 0); 943 944 if (llvm::StructType *STy = 945 dyn_cast<llvm::StructType>(AI.getCoerceToType())) 946 Index += STy->getNumElements()-1; // 1 will be added below. 947 break; 948 949 case ABIArgInfo::Indirect: 950 if (AI.getIndirectByVal()) 951 Attrs |= llvm::Attribute::ByVal; 952 953 Attrs |= 954 llvm::Attribute::constructAlignmentFromInt(AI.getIndirectAlign()); 955 // byval disables readnone and readonly. 956 FuncAttrs &= ~(llvm::Attribute::ReadOnly | 957 llvm::Attribute::ReadNone); 958 break; 959 960 case ABIArgInfo::Ignore: 961 // Skip increment, no matching LLVM parameter. 962 continue; 963 964 case ABIArgInfo::Expand: { 965 SmallVector<llvm::Type*, 8> types; 966 // FIXME: This is rather inefficient. Do we ever actually need to do 967 // anything here? The result should be just reconstructed on the other 968 // side, so extension should be a non-issue. 969 getTypes().GetExpandedTypes(ParamType, types); 970 Index += types.size(); 971 continue; 972 } 973 } 974 975 if (Attrs) 976 PAL.push_back(llvm::AttributeWithIndex::get(Index, Attrs)); 977 ++Index; 978 } 979 if (FuncAttrs) 980 PAL.push_back(llvm::AttributeWithIndex::get(~0, FuncAttrs)); 981 } 982 983 /// An argument came in as a promoted argument; demote it back to its 984 /// declared type. 985 static llvm::Value *emitArgumentDemotion(CodeGenFunction &CGF, 986 const VarDecl *var, 987 llvm::Value *value) { 988 llvm::Type *varType = CGF.ConvertType(var->getType()); 989 990 // This can happen with promotions that actually don't change the 991 // underlying type, like the enum promotions. 992 if (value->getType() == varType) return value; 993 994 assert((varType->isIntegerTy() || varType->isFloatingPointTy()) 995 && "unexpected promotion type"); 996 997 if (isa<llvm::IntegerType>(varType)) 998 return CGF.Builder.CreateTrunc(value, varType, "arg.unpromote"); 999 1000 return CGF.Builder.CreateFPCast(value, varType, "arg.unpromote"); 1001 } 1002 1003 void CodeGenFunction::EmitFunctionProlog(const CGFunctionInfo &FI, 1004 llvm::Function *Fn, 1005 const FunctionArgList &Args) { 1006 // If this is an implicit-return-zero function, go ahead and 1007 // initialize the return value. TODO: it might be nice to have 1008 // a more general mechanism for this that didn't require synthesized 1009 // return statements. 1010 if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(CurFuncDecl)) { 1011 if (FD->hasImplicitReturnZero()) { 1012 QualType RetTy = FD->getResultType().getUnqualifiedType(); 1013 llvm::Type* LLVMTy = CGM.getTypes().ConvertType(RetTy); 1014 llvm::Constant* Zero = llvm::Constant::getNullValue(LLVMTy); 1015 Builder.CreateStore(Zero, ReturnValue); 1016 } 1017 } 1018 1019 // FIXME: We no longer need the types from FunctionArgList; lift up and 1020 // simplify. 1021 1022 // Emit allocs for param decls. Give the LLVM Argument nodes names. 1023 llvm::Function::arg_iterator AI = Fn->arg_begin(); 1024 1025 // Name the struct return argument. 1026 if (CGM.ReturnTypeUsesSRet(FI)) { 1027 AI->setName("agg.result"); 1028 AI->addAttr(llvm::Attribute::NoAlias); 1029 ++AI; 1030 } 1031 1032 assert(FI.arg_size() == Args.size() && 1033 "Mismatch between function signature & arguments."); 1034 unsigned ArgNo = 1; 1035 CGFunctionInfo::const_arg_iterator info_it = FI.arg_begin(); 1036 for (FunctionArgList::const_iterator i = Args.begin(), e = Args.end(); 1037 i != e; ++i, ++info_it, ++ArgNo) { 1038 const VarDecl *Arg = *i; 1039 QualType Ty = info_it->type; 1040 const ABIArgInfo &ArgI = info_it->info; 1041 1042 bool isPromoted = 1043 isa<ParmVarDecl>(Arg) && cast<ParmVarDecl>(Arg)->isKNRPromoted(); 1044 1045 switch (ArgI.getKind()) { 1046 case ABIArgInfo::Indirect: { 1047 llvm::Value *V = AI; 1048 1049 if (hasAggregateLLVMType(Ty)) { 1050 // Aggregates and complex variables are accessed by reference. All we 1051 // need to do is realign the value, if requested 1052 if (ArgI.getIndirectRealign()) { 1053 llvm::Value *AlignedTemp = CreateMemTemp(Ty, "coerce"); 1054 1055 // Copy from the incoming argument pointer to the temporary with the 1056 // appropriate alignment. 1057 // 1058 // FIXME: We should have a common utility for generating an aggregate 1059 // copy. 1060 llvm::Type *I8PtrTy = Builder.getInt8PtrTy(); 1061 CharUnits Size = getContext().getTypeSizeInChars(Ty); 1062 llvm::Value *Dst = Builder.CreateBitCast(AlignedTemp, I8PtrTy); 1063 llvm::Value *Src = Builder.CreateBitCast(V, I8PtrTy); 1064 Builder.CreateMemCpy(Dst, 1065 Src, 1066 llvm::ConstantInt::get(IntPtrTy, 1067 Size.getQuantity()), 1068 ArgI.getIndirectAlign(), 1069 false); 1070 V = AlignedTemp; 1071 } 1072 } else { 1073 // Load scalar value from indirect argument. 1074 CharUnits Alignment = getContext().getTypeAlignInChars(Ty); 1075 V = EmitLoadOfScalar(V, false, Alignment.getQuantity(), Ty); 1076 1077 if (isPromoted) 1078 V = emitArgumentDemotion(*this, Arg, V); 1079 } 1080 EmitParmDecl(*Arg, V, ArgNo); 1081 break; 1082 } 1083 1084 case ABIArgInfo::Extend: 1085 case ABIArgInfo::Direct: { 1086 // Skip the dummy padding argument. 1087 if (ArgI.getPaddingType()) 1088 ++AI; 1089 1090 // If we have the trivial case, handle it with no muss and fuss. 1091 if (!isa<llvm::StructType>(ArgI.getCoerceToType()) && 1092 ArgI.getCoerceToType() == ConvertType(Ty) && 1093 ArgI.getDirectOffset() == 0) { 1094 assert(AI != Fn->arg_end() && "Argument mismatch!"); 1095 llvm::Value *V = AI; 1096 1097 if (Arg->getType().isRestrictQualified()) 1098 AI->addAttr(llvm::Attribute::NoAlias); 1099 1100 // Ensure the argument is the correct type. 1101 if (V->getType() != ArgI.getCoerceToType()) 1102 V = Builder.CreateBitCast(V, ArgI.getCoerceToType()); 1103 1104 if (isPromoted) 1105 V = emitArgumentDemotion(*this, Arg, V); 1106 1107 EmitParmDecl(*Arg, V, ArgNo); 1108 break; 1109 } 1110 1111 llvm::AllocaInst *Alloca = CreateMemTemp(Ty, Arg->getName()); 1112 1113 // The alignment we need to use is the max of the requested alignment for 1114 // the argument plus the alignment required by our access code below. 1115 unsigned AlignmentToUse = 1116 CGM.getTargetData().getABITypeAlignment(ArgI.getCoerceToType()); 1117 AlignmentToUse = std::max(AlignmentToUse, 1118 (unsigned)getContext().getDeclAlign(Arg).getQuantity()); 1119 1120 Alloca->setAlignment(AlignmentToUse); 1121 llvm::Value *V = Alloca; 1122 llvm::Value *Ptr = V; // Pointer to store into. 1123 1124 // If the value is offset in memory, apply the offset now. 1125 if (unsigned Offs = ArgI.getDirectOffset()) { 1126 Ptr = Builder.CreateBitCast(Ptr, Builder.getInt8PtrTy()); 1127 Ptr = Builder.CreateConstGEP1_32(Ptr, Offs); 1128 Ptr = Builder.CreateBitCast(Ptr, 1129 llvm::PointerType::getUnqual(ArgI.getCoerceToType())); 1130 } 1131 1132 // If the coerce-to type is a first class aggregate, we flatten it and 1133 // pass the elements. Either way is semantically identical, but fast-isel 1134 // and the optimizer generally likes scalar values better than FCAs. 1135 llvm::StructType *STy = dyn_cast<llvm::StructType>(ArgI.getCoerceToType()); 1136 if (STy && STy->getNumElements() > 1) { 1137 uint64_t SrcSize = CGM.getTargetData().getTypeAllocSize(STy); 1138 llvm::Type *DstTy = 1139 cast<llvm::PointerType>(Ptr->getType())->getElementType(); 1140 uint64_t DstSize = CGM.getTargetData().getTypeAllocSize(DstTy); 1141 1142 if (SrcSize <= DstSize) { 1143 Ptr = Builder.CreateBitCast(Ptr, llvm::PointerType::getUnqual(STy)); 1144 1145 for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) { 1146 assert(AI != Fn->arg_end() && "Argument mismatch!"); 1147 AI->setName(Arg->getName() + ".coerce" + Twine(i)); 1148 llvm::Value *EltPtr = Builder.CreateConstGEP2_32(Ptr, 0, i); 1149 Builder.CreateStore(AI++, EltPtr); 1150 } 1151 } else { 1152 llvm::AllocaInst *TempAlloca = 1153 CreateTempAlloca(ArgI.getCoerceToType(), "coerce"); 1154 TempAlloca->setAlignment(AlignmentToUse); 1155 llvm::Value *TempV = TempAlloca; 1156 1157 for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) { 1158 assert(AI != Fn->arg_end() && "Argument mismatch!"); 1159 AI->setName(Arg->getName() + ".coerce" + Twine(i)); 1160 llvm::Value *EltPtr = Builder.CreateConstGEP2_32(TempV, 0, i); 1161 Builder.CreateStore(AI++, EltPtr); 1162 } 1163 1164 Builder.CreateMemCpy(Ptr, TempV, DstSize, AlignmentToUse); 1165 } 1166 } else { 1167 // Simple case, just do a coerced store of the argument into the alloca. 1168 assert(AI != Fn->arg_end() && "Argument mismatch!"); 1169 AI->setName(Arg->getName() + ".coerce"); 1170 CreateCoercedStore(AI++, Ptr, /*DestIsVolatile=*/false, *this); 1171 } 1172 1173 1174 // Match to what EmitParmDecl is expecting for this type. 1175 if (!CodeGenFunction::hasAggregateLLVMType(Ty)) { 1176 V = EmitLoadOfScalar(V, false, AlignmentToUse, Ty); 1177 if (isPromoted) 1178 V = emitArgumentDemotion(*this, Arg, V); 1179 } 1180 EmitParmDecl(*Arg, V, ArgNo); 1181 continue; // Skip ++AI increment, already done. 1182 } 1183 1184 case ABIArgInfo::Expand: { 1185 // If this structure was expanded into multiple arguments then 1186 // we need to create a temporary and reconstruct it from the 1187 // arguments. 1188 llvm::AllocaInst *Alloca = CreateMemTemp(Ty); 1189 CharUnits Align = getContext().getDeclAlign(Arg); 1190 Alloca->setAlignment(Align.getQuantity()); 1191 LValue LV = MakeAddrLValue(Alloca, Ty, Align); 1192 llvm::Function::arg_iterator End = ExpandTypeFromArgs(Ty, LV, AI); 1193 EmitParmDecl(*Arg, Alloca, ArgNo); 1194 1195 // Name the arguments used in expansion and increment AI. 1196 unsigned Index = 0; 1197 for (; AI != End; ++AI, ++Index) 1198 AI->setName(Arg->getName() + "." + Twine(Index)); 1199 continue; 1200 } 1201 1202 case ABIArgInfo::Ignore: 1203 // Initialize the local variable appropriately. 1204 if (hasAggregateLLVMType(Ty)) 1205 EmitParmDecl(*Arg, CreateMemTemp(Ty), ArgNo); 1206 else 1207 EmitParmDecl(*Arg, llvm::UndefValue::get(ConvertType(Arg->getType())), 1208 ArgNo); 1209 1210 // Skip increment, no matching LLVM parameter. 1211 continue; 1212 } 1213 1214 ++AI; 1215 } 1216 assert(AI == Fn->arg_end() && "Argument mismatch!"); 1217 } 1218 1219 static void eraseUnusedBitCasts(llvm::Instruction *insn) { 1220 while (insn->use_empty()) { 1221 llvm::BitCastInst *bitcast = dyn_cast<llvm::BitCastInst>(insn); 1222 if (!bitcast) return; 1223 1224 // This is "safe" because we would have used a ConstantExpr otherwise. 1225 insn = cast<llvm::Instruction>(bitcast->getOperand(0)); 1226 bitcast->eraseFromParent(); 1227 } 1228 } 1229 1230 /// Try to emit a fused autorelease of a return result. 1231 static llvm::Value *tryEmitFusedAutoreleaseOfResult(CodeGenFunction &CGF, 1232 llvm::Value *result) { 1233 // We must be immediately followed the cast. 1234 llvm::BasicBlock *BB = CGF.Builder.GetInsertBlock(); 1235 if (BB->empty()) return 0; 1236 if (&BB->back() != result) return 0; 1237 1238 llvm::Type *resultType = result->getType(); 1239 1240 // result is in a BasicBlock and is therefore an Instruction. 1241 llvm::Instruction *generator = cast<llvm::Instruction>(result); 1242 1243 SmallVector<llvm::Instruction*,4> insnsToKill; 1244 1245 // Look for: 1246 // %generator = bitcast %type1* %generator2 to %type2* 1247 while (llvm::BitCastInst *bitcast = dyn_cast<llvm::BitCastInst>(generator)) { 1248 // We would have emitted this as a constant if the operand weren't 1249 // an Instruction. 1250 generator = cast<llvm::Instruction>(bitcast->getOperand(0)); 1251 1252 // Require the generator to be immediately followed by the cast. 1253 if (generator->getNextNode() != bitcast) 1254 return 0; 1255 1256 insnsToKill.push_back(bitcast); 1257 } 1258 1259 // Look for: 1260 // %generator = call i8* @objc_retain(i8* %originalResult) 1261 // or 1262 // %generator = call i8* @objc_retainAutoreleasedReturnValue(i8* %originalResult) 1263 llvm::CallInst *call = dyn_cast<llvm::CallInst>(generator); 1264 if (!call) return 0; 1265 1266 bool doRetainAutorelease; 1267 1268 if (call->getCalledValue() == CGF.CGM.getARCEntrypoints().objc_retain) { 1269 doRetainAutorelease = true; 1270 } else if (call->getCalledValue() == CGF.CGM.getARCEntrypoints() 1271 .objc_retainAutoreleasedReturnValue) { 1272 doRetainAutorelease = false; 1273 1274 // Look for an inline asm immediately preceding the call and kill it, too. 1275 llvm::Instruction *prev = call->getPrevNode(); 1276 if (llvm::CallInst *asmCall = dyn_cast_or_null<llvm::CallInst>(prev)) 1277 if (asmCall->getCalledValue() 1278 == CGF.CGM.getARCEntrypoints().retainAutoreleasedReturnValueMarker) 1279 insnsToKill.push_back(prev); 1280 } else { 1281 return 0; 1282 } 1283 1284 result = call->getArgOperand(0); 1285 insnsToKill.push_back(call); 1286 1287 // Keep killing bitcasts, for sanity. Note that we no longer care 1288 // about precise ordering as long as there's exactly one use. 1289 while (llvm::BitCastInst *bitcast = dyn_cast<llvm::BitCastInst>(result)) { 1290 if (!bitcast->hasOneUse()) break; 1291 insnsToKill.push_back(bitcast); 1292 result = bitcast->getOperand(0); 1293 } 1294 1295 // Delete all the unnecessary instructions, from latest to earliest. 1296 for (SmallVectorImpl<llvm::Instruction*>::iterator 1297 i = insnsToKill.begin(), e = insnsToKill.end(); i != e; ++i) 1298 (*i)->eraseFromParent(); 1299 1300 // Do the fused retain/autorelease if we were asked to. 1301 if (doRetainAutorelease) 1302 result = CGF.EmitARCRetainAutoreleaseReturnValue(result); 1303 1304 // Cast back to the result type. 1305 return CGF.Builder.CreateBitCast(result, resultType); 1306 } 1307 1308 /// If this is a +1 of the value of an immutable 'self', remove it. 1309 static llvm::Value *tryRemoveRetainOfSelf(CodeGenFunction &CGF, 1310 llvm::Value *result) { 1311 // This is only applicable to a method with an immutable 'self'. 1312 const ObjCMethodDecl *method = dyn_cast<ObjCMethodDecl>(CGF.CurCodeDecl); 1313 if (!method) return 0; 1314 const VarDecl *self = method->getSelfDecl(); 1315 if (!self->getType().isConstQualified()) return 0; 1316 1317 // Look for a retain call. 1318 llvm::CallInst *retainCall = 1319 dyn_cast<llvm::CallInst>(result->stripPointerCasts()); 1320 if (!retainCall || 1321 retainCall->getCalledValue() != CGF.CGM.getARCEntrypoints().objc_retain) 1322 return 0; 1323 1324 // Look for an ordinary load of 'self'. 1325 llvm::Value *retainedValue = retainCall->getArgOperand(0); 1326 llvm::LoadInst *load = 1327 dyn_cast<llvm::LoadInst>(retainedValue->stripPointerCasts()); 1328 if (!load || load->isAtomic() || load->isVolatile() || 1329 load->getPointerOperand() != CGF.GetAddrOfLocalVar(self)) 1330 return 0; 1331 1332 // Okay! Burn it all down. This relies for correctness on the 1333 // assumption that the retain is emitted as part of the return and 1334 // that thereafter everything is used "linearly". 1335 llvm::Type *resultType = result->getType(); 1336 eraseUnusedBitCasts(cast<llvm::Instruction>(result)); 1337 assert(retainCall->use_empty()); 1338 retainCall->eraseFromParent(); 1339 eraseUnusedBitCasts(cast<llvm::Instruction>(retainedValue)); 1340 1341 return CGF.Builder.CreateBitCast(load, resultType); 1342 } 1343 1344 /// Emit an ARC autorelease of the result of a function. 1345 /// 1346 /// \return the value to actually return from the function 1347 static llvm::Value *emitAutoreleaseOfResult(CodeGenFunction &CGF, 1348 llvm::Value *result) { 1349 // If we're returning 'self', kill the initial retain. This is a 1350 // heuristic attempt to "encourage correctness" in the really unfortunate 1351 // case where we have a return of self during a dealloc and we desperately 1352 // need to avoid the possible autorelease. 1353 if (llvm::Value *self = tryRemoveRetainOfSelf(CGF, result)) 1354 return self; 1355 1356 // At -O0, try to emit a fused retain/autorelease. 1357 if (CGF.shouldUseFusedARCCalls()) 1358 if (llvm::Value *fused = tryEmitFusedAutoreleaseOfResult(CGF, result)) 1359 return fused; 1360 1361 return CGF.EmitARCAutoreleaseReturnValue(result); 1362 } 1363 1364 /// Heuristically search for a dominating store to the return-value slot. 1365 static llvm::StoreInst *findDominatingStoreToReturnValue(CodeGenFunction &CGF) { 1366 // If there are multiple uses of the return-value slot, just check 1367 // for something immediately preceding the IP. Sometimes this can 1368 // happen with how we generate implicit-returns; it can also happen 1369 // with noreturn cleanups. 1370 if (!CGF.ReturnValue->hasOneUse()) { 1371 llvm::BasicBlock *IP = CGF.Builder.GetInsertBlock(); 1372 if (IP->empty()) return 0; 1373 llvm::StoreInst *store = dyn_cast<llvm::StoreInst>(&IP->back()); 1374 if (!store) return 0; 1375 if (store->getPointerOperand() != CGF.ReturnValue) return 0; 1376 assert(!store->isAtomic() && !store->isVolatile()); // see below 1377 return store; 1378 } 1379 1380 llvm::StoreInst *store = 1381 dyn_cast<llvm::StoreInst>(CGF.ReturnValue->use_back()); 1382 if (!store) return 0; 1383 1384 // These aren't actually possible for non-coerced returns, and we 1385 // only care about non-coerced returns on this code path. 1386 assert(!store->isAtomic() && !store->isVolatile()); 1387 1388 // Now do a first-and-dirty dominance check: just walk up the 1389 // single-predecessors chain from the current insertion point. 1390 llvm::BasicBlock *StoreBB = store->getParent(); 1391 llvm::BasicBlock *IP = CGF.Builder.GetInsertBlock(); 1392 while (IP != StoreBB) { 1393 if (!(IP = IP->getSinglePredecessor())) 1394 return 0; 1395 } 1396 1397 // Okay, the store's basic block dominates the insertion point; we 1398 // can do our thing. 1399 return store; 1400 } 1401 1402 void CodeGenFunction::EmitFunctionEpilog(const CGFunctionInfo &FI) { 1403 // Functions with no result always return void. 1404 if (ReturnValue == 0) { 1405 Builder.CreateRetVoid(); 1406 return; 1407 } 1408 1409 llvm::DebugLoc RetDbgLoc; 1410 llvm::Value *RV = 0; 1411 QualType RetTy = FI.getReturnType(); 1412 const ABIArgInfo &RetAI = FI.getReturnInfo(); 1413 1414 switch (RetAI.getKind()) { 1415 case ABIArgInfo::Indirect: { 1416 unsigned Alignment = getContext().getTypeAlignInChars(RetTy).getQuantity(); 1417 if (RetTy->isAnyComplexType()) { 1418 ComplexPairTy RT = LoadComplexFromAddr(ReturnValue, false); 1419 StoreComplexToAddr(RT, CurFn->arg_begin(), false); 1420 } else if (CodeGenFunction::hasAggregateLLVMType(RetTy)) { 1421 // Do nothing; aggregrates get evaluated directly into the destination. 1422 } else { 1423 EmitStoreOfScalar(Builder.CreateLoad(ReturnValue), CurFn->arg_begin(), 1424 false, Alignment, RetTy); 1425 } 1426 break; 1427 } 1428 1429 case ABIArgInfo::Extend: 1430 case ABIArgInfo::Direct: 1431 if (RetAI.getCoerceToType() == ConvertType(RetTy) && 1432 RetAI.getDirectOffset() == 0) { 1433 // The internal return value temp always will have pointer-to-return-type 1434 // type, just do a load. 1435 1436 // If there is a dominating store to ReturnValue, we can elide 1437 // the load, zap the store, and usually zap the alloca. 1438 if (llvm::StoreInst *SI = findDominatingStoreToReturnValue(*this)) { 1439 // Get the stored value and nuke the now-dead store. 1440 RetDbgLoc = SI->getDebugLoc(); 1441 RV = SI->getValueOperand(); 1442 SI->eraseFromParent(); 1443 1444 // If that was the only use of the return value, nuke it as well now. 1445 if (ReturnValue->use_empty() && isa<llvm::AllocaInst>(ReturnValue)) { 1446 cast<llvm::AllocaInst>(ReturnValue)->eraseFromParent(); 1447 ReturnValue = 0; 1448 } 1449 1450 // Otherwise, we have to do a simple load. 1451 } else { 1452 RV = Builder.CreateLoad(ReturnValue); 1453 } 1454 } else { 1455 llvm::Value *V = ReturnValue; 1456 // If the value is offset in memory, apply the offset now. 1457 if (unsigned Offs = RetAI.getDirectOffset()) { 1458 V = Builder.CreateBitCast(V, Builder.getInt8PtrTy()); 1459 V = Builder.CreateConstGEP1_32(V, Offs); 1460 V = Builder.CreateBitCast(V, 1461 llvm::PointerType::getUnqual(RetAI.getCoerceToType())); 1462 } 1463 1464 RV = CreateCoercedLoad(V, RetAI.getCoerceToType(), *this); 1465 } 1466 1467 // In ARC, end functions that return a retainable type with a call 1468 // to objc_autoreleaseReturnValue. 1469 if (AutoreleaseResult) { 1470 assert(getLangOptions().ObjCAutoRefCount && 1471 !FI.isReturnsRetained() && 1472 RetTy->isObjCRetainableType()); 1473 RV = emitAutoreleaseOfResult(*this, RV); 1474 } 1475 1476 break; 1477 1478 case ABIArgInfo::Ignore: 1479 break; 1480 1481 case ABIArgInfo::Expand: 1482 llvm_unreachable("Invalid ABI kind for return argument"); 1483 } 1484 1485 llvm::Instruction *Ret = RV ? Builder.CreateRet(RV) : Builder.CreateRetVoid(); 1486 if (!RetDbgLoc.isUnknown()) 1487 Ret->setDebugLoc(RetDbgLoc); 1488 } 1489 1490 void CodeGenFunction::EmitDelegateCallArg(CallArgList &args, 1491 const VarDecl *param) { 1492 // StartFunction converted the ABI-lowered parameter(s) into a 1493 // local alloca. We need to turn that into an r-value suitable 1494 // for EmitCall. 1495 llvm::Value *local = GetAddrOfLocalVar(param); 1496 1497 QualType type = param->getType(); 1498 1499 // For the most part, we just need to load the alloca, except: 1500 // 1) aggregate r-values are actually pointers to temporaries, and 1501 // 2) references to aggregates are pointers directly to the aggregate. 1502 // I don't know why references to non-aggregates are different here. 1503 if (const ReferenceType *ref = type->getAs<ReferenceType>()) { 1504 if (hasAggregateLLVMType(ref->getPointeeType())) 1505 return args.add(RValue::getAggregate(local), type); 1506 1507 // Locals which are references to scalars are represented 1508 // with allocas holding the pointer. 1509 return args.add(RValue::get(Builder.CreateLoad(local)), type); 1510 } 1511 1512 if (type->isAnyComplexType()) { 1513 ComplexPairTy complex = LoadComplexFromAddr(local, /*volatile*/ false); 1514 return args.add(RValue::getComplex(complex), type); 1515 } 1516 1517 if (hasAggregateLLVMType(type)) 1518 return args.add(RValue::getAggregate(local), type); 1519 1520 unsigned alignment = getContext().getDeclAlign(param).getQuantity(); 1521 llvm::Value *value = EmitLoadOfScalar(local, false, alignment, type); 1522 return args.add(RValue::get(value), type); 1523 } 1524 1525 static bool isProvablyNull(llvm::Value *addr) { 1526 return isa<llvm::ConstantPointerNull>(addr); 1527 } 1528 1529 static bool isProvablyNonNull(llvm::Value *addr) { 1530 return isa<llvm::AllocaInst>(addr); 1531 } 1532 1533 /// Emit the actual writing-back of a writeback. 1534 static void emitWriteback(CodeGenFunction &CGF, 1535 const CallArgList::Writeback &writeback) { 1536 llvm::Value *srcAddr = writeback.Address; 1537 assert(!isProvablyNull(srcAddr) && 1538 "shouldn't have writeback for provably null argument"); 1539 1540 llvm::BasicBlock *contBB = 0; 1541 1542 // If the argument wasn't provably non-null, we need to null check 1543 // before doing the store. 1544 bool provablyNonNull = isProvablyNonNull(srcAddr); 1545 if (!provablyNonNull) { 1546 llvm::BasicBlock *writebackBB = CGF.createBasicBlock("icr.writeback"); 1547 contBB = CGF.createBasicBlock("icr.done"); 1548 1549 llvm::Value *isNull = CGF.Builder.CreateIsNull(srcAddr, "icr.isnull"); 1550 CGF.Builder.CreateCondBr(isNull, contBB, writebackBB); 1551 CGF.EmitBlock(writebackBB); 1552 } 1553 1554 // Load the value to writeback. 1555 llvm::Value *value = CGF.Builder.CreateLoad(writeback.Temporary); 1556 1557 // Cast it back, in case we're writing an id to a Foo* or something. 1558 value = CGF.Builder.CreateBitCast(value, 1559 cast<llvm::PointerType>(srcAddr->getType())->getElementType(), 1560 "icr.writeback-cast"); 1561 1562 // Perform the writeback. 1563 QualType srcAddrType = writeback.AddressType; 1564 CGF.EmitStoreThroughLValue(RValue::get(value), 1565 CGF.MakeAddrLValue(srcAddr, srcAddrType)); 1566 1567 // Jump to the continuation block. 1568 if (!provablyNonNull) 1569 CGF.EmitBlock(contBB); 1570 } 1571 1572 static void emitWritebacks(CodeGenFunction &CGF, 1573 const CallArgList &args) { 1574 for (CallArgList::writeback_iterator 1575 i = args.writeback_begin(), e = args.writeback_end(); i != e; ++i) 1576 emitWriteback(CGF, *i); 1577 } 1578 1579 /// Emit an argument that's being passed call-by-writeback. That is, 1580 /// we are passing the address of 1581 static void emitWritebackArg(CodeGenFunction &CGF, CallArgList &args, 1582 const ObjCIndirectCopyRestoreExpr *CRE) { 1583 llvm::Value *srcAddr = CGF.EmitScalarExpr(CRE->getSubExpr()); 1584 1585 // The dest and src types don't necessarily match in LLVM terms 1586 // because of the crazy ObjC compatibility rules. 1587 1588 llvm::PointerType *destType = 1589 cast<llvm::PointerType>(CGF.ConvertType(CRE->getType())); 1590 1591 // If the address is a constant null, just pass the appropriate null. 1592 if (isProvablyNull(srcAddr)) { 1593 args.add(RValue::get(llvm::ConstantPointerNull::get(destType)), 1594 CRE->getType()); 1595 return; 1596 } 1597 1598 QualType srcAddrType = 1599 CRE->getSubExpr()->getType()->castAs<PointerType>()->getPointeeType(); 1600 1601 // Create the temporary. 1602 llvm::Value *temp = CGF.CreateTempAlloca(destType->getElementType(), 1603 "icr.temp"); 1604 1605 // Zero-initialize it if we're not doing a copy-initialization. 1606 bool shouldCopy = CRE->shouldCopy(); 1607 if (!shouldCopy) { 1608 llvm::Value *null = 1609 llvm::ConstantPointerNull::get( 1610 cast<llvm::PointerType>(destType->getElementType())); 1611 CGF.Builder.CreateStore(null, temp); 1612 } 1613 1614 llvm::BasicBlock *contBB = 0; 1615 1616 // If the address is *not* known to be non-null, we need to switch. 1617 llvm::Value *finalArgument; 1618 1619 bool provablyNonNull = isProvablyNonNull(srcAddr); 1620 if (provablyNonNull) { 1621 finalArgument = temp; 1622 } else { 1623 llvm::Value *isNull = CGF.Builder.CreateIsNull(srcAddr, "icr.isnull"); 1624 1625 finalArgument = CGF.Builder.CreateSelect(isNull, 1626 llvm::ConstantPointerNull::get(destType), 1627 temp, "icr.argument"); 1628 1629 // If we need to copy, then the load has to be conditional, which 1630 // means we need control flow. 1631 if (shouldCopy) { 1632 contBB = CGF.createBasicBlock("icr.cont"); 1633 llvm::BasicBlock *copyBB = CGF.createBasicBlock("icr.copy"); 1634 CGF.Builder.CreateCondBr(isNull, contBB, copyBB); 1635 CGF.EmitBlock(copyBB); 1636 } 1637 } 1638 1639 // Perform a copy if necessary. 1640 if (shouldCopy) { 1641 LValue srcLV = CGF.MakeAddrLValue(srcAddr, srcAddrType); 1642 RValue srcRV = CGF.EmitLoadOfLValue(srcLV); 1643 assert(srcRV.isScalar()); 1644 1645 llvm::Value *src = srcRV.getScalarVal(); 1646 src = CGF.Builder.CreateBitCast(src, destType->getElementType(), 1647 "icr.cast"); 1648 1649 // Use an ordinary store, not a store-to-lvalue. 1650 CGF.Builder.CreateStore(src, temp); 1651 } 1652 1653 // Finish the control flow if we needed it. 1654 if (shouldCopy && !provablyNonNull) 1655 CGF.EmitBlock(contBB); 1656 1657 args.addWriteback(srcAddr, srcAddrType, temp); 1658 args.add(RValue::get(finalArgument), CRE->getType()); 1659 } 1660 1661 void CodeGenFunction::EmitCallArg(CallArgList &args, const Expr *E, 1662 QualType type) { 1663 if (const ObjCIndirectCopyRestoreExpr *CRE 1664 = dyn_cast<ObjCIndirectCopyRestoreExpr>(E)) { 1665 assert(getContext().getLangOptions().ObjCAutoRefCount); 1666 assert(getContext().hasSameType(E->getType(), type)); 1667 return emitWritebackArg(*this, args, CRE); 1668 } 1669 1670 assert(type->isReferenceType() == E->isGLValue() && 1671 "reference binding to unmaterialized r-value!"); 1672 1673 if (E->isGLValue()) { 1674 assert(E->getObjectKind() == OK_Ordinary); 1675 return args.add(EmitReferenceBindingToExpr(E, /*InitializedDecl=*/0), 1676 type); 1677 } 1678 1679 if (hasAggregateLLVMType(type) && !E->getType()->isAnyComplexType() && 1680 isa<ImplicitCastExpr>(E) && 1681 cast<CastExpr>(E)->getCastKind() == CK_LValueToRValue) { 1682 LValue L = EmitLValue(cast<CastExpr>(E)->getSubExpr()); 1683 assert(L.isSimple()); 1684 args.add(L.asAggregateRValue(), type, /*NeedsCopy*/true); 1685 return; 1686 } 1687 1688 args.add(EmitAnyExprToTemp(E), type); 1689 } 1690 1691 // In ObjC ARC mode with no ObjC ARC exception safety, tell the ARC 1692 // optimizer it can aggressively ignore unwind edges. 1693 void 1694 CodeGenFunction::AddObjCARCExceptionMetadata(llvm::Instruction *Inst) { 1695 if (CGM.getCodeGenOpts().OptimizationLevel != 0 && 1696 !CGM.getCodeGenOpts().ObjCAutoRefCountExceptions) 1697 Inst->setMetadata("clang.arc.no_objc_arc_exceptions", 1698 CGM.getNoObjCARCExceptionsMetadata()); 1699 } 1700 1701 /// Emits a call or invoke instruction to the given function, depending 1702 /// on the current state of the EH stack. 1703 llvm::CallSite 1704 CodeGenFunction::EmitCallOrInvoke(llvm::Value *Callee, 1705 ArrayRef<llvm::Value *> Args, 1706 const Twine &Name) { 1707 llvm::BasicBlock *InvokeDest = getInvokeDest(); 1708 1709 llvm::Instruction *Inst; 1710 if (!InvokeDest) 1711 Inst = Builder.CreateCall(Callee, Args, Name); 1712 else { 1713 llvm::BasicBlock *ContBB = createBasicBlock("invoke.cont"); 1714 Inst = Builder.CreateInvoke(Callee, ContBB, InvokeDest, Args, Name); 1715 EmitBlock(ContBB); 1716 } 1717 1718 // In ObjC ARC mode with no ObjC ARC exception safety, tell the ARC 1719 // optimizer it can aggressively ignore unwind edges. 1720 if (CGM.getLangOptions().ObjCAutoRefCount) 1721 AddObjCARCExceptionMetadata(Inst); 1722 1723 return Inst; 1724 } 1725 1726 llvm::CallSite 1727 CodeGenFunction::EmitCallOrInvoke(llvm::Value *Callee, 1728 const Twine &Name) { 1729 return EmitCallOrInvoke(Callee, ArrayRef<llvm::Value *>(), Name); 1730 } 1731 1732 static void checkArgMatches(llvm::Value *Elt, unsigned &ArgNo, 1733 llvm::FunctionType *FTy) { 1734 if (ArgNo < FTy->getNumParams()) 1735 assert(Elt->getType() == FTy->getParamType(ArgNo)); 1736 else 1737 assert(FTy->isVarArg()); 1738 ++ArgNo; 1739 } 1740 1741 void CodeGenFunction::ExpandTypeToArgs(QualType Ty, RValue RV, 1742 SmallVector<llvm::Value*,16> &Args, 1743 llvm::FunctionType *IRFuncTy) { 1744 if (const ConstantArrayType *AT = getContext().getAsConstantArrayType(Ty)) { 1745 unsigned NumElts = AT->getSize().getZExtValue(); 1746 QualType EltTy = AT->getElementType(); 1747 llvm::Value *Addr = RV.getAggregateAddr(); 1748 for (unsigned Elt = 0; Elt < NumElts; ++Elt) { 1749 llvm::Value *EltAddr = Builder.CreateConstGEP2_32(Addr, 0, Elt); 1750 LValue LV = MakeAddrLValue(EltAddr, EltTy); 1751 RValue EltRV; 1752 if (EltTy->isAnyComplexType()) 1753 // FIXME: Volatile? 1754 EltRV = RValue::getComplex(LoadComplexFromAddr(LV.getAddress(), false)); 1755 else if (CodeGenFunction::hasAggregateLLVMType(EltTy)) 1756 EltRV = LV.asAggregateRValue(); 1757 else 1758 EltRV = EmitLoadOfLValue(LV); 1759 ExpandTypeToArgs(EltTy, EltRV, Args, IRFuncTy); 1760 } 1761 } else if (const RecordType *RT = Ty->getAsStructureType()) { 1762 RecordDecl *RD = RT->getDecl(); 1763 assert(RV.isAggregate() && "Unexpected rvalue during struct expansion"); 1764 llvm::Value *Addr = RV.getAggregateAddr(); 1765 for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end(); 1766 i != e; ++i) { 1767 FieldDecl *FD = *i; 1768 QualType FT = FD->getType(); 1769 1770 // FIXME: What are the right qualifiers here? 1771 LValue LV = EmitLValueForField(Addr, FD, 0); 1772 RValue FldRV; 1773 if (FT->isAnyComplexType()) 1774 // FIXME: Volatile? 1775 FldRV = RValue::getComplex(LoadComplexFromAddr(LV.getAddress(), false)); 1776 else if (CodeGenFunction::hasAggregateLLVMType(FT)) 1777 FldRV = LV.asAggregateRValue(); 1778 else 1779 FldRV = EmitLoadOfLValue(LV); 1780 ExpandTypeToArgs(FT, FldRV, Args, IRFuncTy); 1781 } 1782 } else if (Ty->isAnyComplexType()) { 1783 ComplexPairTy CV = RV.getComplexVal(); 1784 Args.push_back(CV.first); 1785 Args.push_back(CV.second); 1786 } else { 1787 assert(RV.isScalar() && 1788 "Unexpected non-scalar rvalue during struct expansion."); 1789 1790 // Insert a bitcast as needed. 1791 llvm::Value *V = RV.getScalarVal(); 1792 if (Args.size() < IRFuncTy->getNumParams() && 1793 V->getType() != IRFuncTy->getParamType(Args.size())) 1794 V = Builder.CreateBitCast(V, IRFuncTy->getParamType(Args.size())); 1795 1796 Args.push_back(V); 1797 } 1798 } 1799 1800 1801 RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo, 1802 llvm::Value *Callee, 1803 ReturnValueSlot ReturnValue, 1804 const CallArgList &CallArgs, 1805 const Decl *TargetDecl, 1806 llvm::Instruction **callOrInvoke) { 1807 // FIXME: We no longer need the types from CallArgs; lift up and simplify. 1808 SmallVector<llvm::Value*, 16> Args; 1809 1810 // Handle struct-return functions by passing a pointer to the 1811 // location that we would like to return into. 1812 QualType RetTy = CallInfo.getReturnType(); 1813 const ABIArgInfo &RetAI = CallInfo.getReturnInfo(); 1814 1815 // IRArgNo - Keep track of the argument number in the callee we're looking at. 1816 unsigned IRArgNo = 0; 1817 llvm::FunctionType *IRFuncTy = 1818 cast<llvm::FunctionType>( 1819 cast<llvm::PointerType>(Callee->getType())->getElementType()); 1820 1821 // If the call returns a temporary with struct return, create a temporary 1822 // alloca to hold the result, unless one is given to us. 1823 if (CGM.ReturnTypeUsesSRet(CallInfo)) { 1824 llvm::Value *Value = ReturnValue.getValue(); 1825 if (!Value) 1826 Value = CreateMemTemp(RetTy); 1827 Args.push_back(Value); 1828 checkArgMatches(Value, IRArgNo, IRFuncTy); 1829 } 1830 1831 assert(CallInfo.arg_size() == CallArgs.size() && 1832 "Mismatch between function signature & arguments."); 1833 CGFunctionInfo::const_arg_iterator info_it = CallInfo.arg_begin(); 1834 for (CallArgList::const_iterator I = CallArgs.begin(), E = CallArgs.end(); 1835 I != E; ++I, ++info_it) { 1836 const ABIArgInfo &ArgInfo = info_it->info; 1837 RValue RV = I->RV; 1838 1839 unsigned TypeAlign = 1840 getContext().getTypeAlignInChars(I->Ty).getQuantity(); 1841 switch (ArgInfo.getKind()) { 1842 case ABIArgInfo::Indirect: { 1843 if (RV.isScalar() || RV.isComplex()) { 1844 // Make a temporary alloca to pass the argument. 1845 llvm::AllocaInst *AI = CreateMemTemp(I->Ty); 1846 if (ArgInfo.getIndirectAlign() > AI->getAlignment()) 1847 AI->setAlignment(ArgInfo.getIndirectAlign()); 1848 Args.push_back(AI); 1849 1850 if (RV.isScalar()) 1851 EmitStoreOfScalar(RV.getScalarVal(), Args.back(), false, 1852 TypeAlign, I->Ty); 1853 else 1854 StoreComplexToAddr(RV.getComplexVal(), Args.back(), false); 1855 1856 // Validate argument match. 1857 checkArgMatches(AI, IRArgNo, IRFuncTy); 1858 } else { 1859 // We want to avoid creating an unnecessary temporary+copy here; 1860 // however, we need one in two cases: 1861 // 1. If the argument is not byval, and we are required to copy the 1862 // source. (This case doesn't occur on any common architecture.) 1863 // 2. If the argument is byval, RV is not sufficiently aligned, and 1864 // we cannot force it to be sufficiently aligned. 1865 llvm::Value *Addr = RV.getAggregateAddr(); 1866 unsigned Align = ArgInfo.getIndirectAlign(); 1867 const llvm::TargetData *TD = &CGM.getTargetData(); 1868 if ((!ArgInfo.getIndirectByVal() && I->NeedsCopy) || 1869 (ArgInfo.getIndirectByVal() && TypeAlign < Align && 1870 llvm::getOrEnforceKnownAlignment(Addr, Align, TD) < Align)) { 1871 // Create an aligned temporary, and copy to it. 1872 llvm::AllocaInst *AI = CreateMemTemp(I->Ty); 1873 if (Align > AI->getAlignment()) 1874 AI->setAlignment(Align); 1875 Args.push_back(AI); 1876 EmitAggregateCopy(AI, Addr, I->Ty, RV.isVolatileQualified()); 1877 1878 // Validate argument match. 1879 checkArgMatches(AI, IRArgNo, IRFuncTy); 1880 } else { 1881 // Skip the extra memcpy call. 1882 Args.push_back(Addr); 1883 1884 // Validate argument match. 1885 checkArgMatches(Addr, IRArgNo, IRFuncTy); 1886 } 1887 } 1888 break; 1889 } 1890 1891 case ABIArgInfo::Ignore: 1892 break; 1893 1894 case ABIArgInfo::Extend: 1895 case ABIArgInfo::Direct: { 1896 // Insert a padding argument to ensure proper alignment. 1897 if (llvm::Type *PaddingType = ArgInfo.getPaddingType()) { 1898 Args.push_back(llvm::UndefValue::get(PaddingType)); 1899 ++IRArgNo; 1900 } 1901 1902 if (!isa<llvm::StructType>(ArgInfo.getCoerceToType()) && 1903 ArgInfo.getCoerceToType() == ConvertType(info_it->type) && 1904 ArgInfo.getDirectOffset() == 0) { 1905 llvm::Value *V; 1906 if (RV.isScalar()) 1907 V = RV.getScalarVal(); 1908 else 1909 V = Builder.CreateLoad(RV.getAggregateAddr()); 1910 1911 // If the argument doesn't match, perform a bitcast to coerce it. This 1912 // can happen due to trivial type mismatches. 1913 if (IRArgNo < IRFuncTy->getNumParams() && 1914 V->getType() != IRFuncTy->getParamType(IRArgNo)) 1915 V = Builder.CreateBitCast(V, IRFuncTy->getParamType(IRArgNo)); 1916 Args.push_back(V); 1917 1918 checkArgMatches(V, IRArgNo, IRFuncTy); 1919 break; 1920 } 1921 1922 // FIXME: Avoid the conversion through memory if possible. 1923 llvm::Value *SrcPtr; 1924 if (RV.isScalar()) { 1925 SrcPtr = CreateMemTemp(I->Ty, "coerce"); 1926 EmitStoreOfScalar(RV.getScalarVal(), SrcPtr, false, TypeAlign, I->Ty); 1927 } else if (RV.isComplex()) { 1928 SrcPtr = CreateMemTemp(I->Ty, "coerce"); 1929 StoreComplexToAddr(RV.getComplexVal(), SrcPtr, false); 1930 } else 1931 SrcPtr = RV.getAggregateAddr(); 1932 1933 // If the value is offset in memory, apply the offset now. 1934 if (unsigned Offs = ArgInfo.getDirectOffset()) { 1935 SrcPtr = Builder.CreateBitCast(SrcPtr, Builder.getInt8PtrTy()); 1936 SrcPtr = Builder.CreateConstGEP1_32(SrcPtr, Offs); 1937 SrcPtr = Builder.CreateBitCast(SrcPtr, 1938 llvm::PointerType::getUnqual(ArgInfo.getCoerceToType())); 1939 1940 } 1941 1942 // If the coerce-to type is a first class aggregate, we flatten it and 1943 // pass the elements. Either way is semantically identical, but fast-isel 1944 // and the optimizer generally likes scalar values better than FCAs. 1945 if (llvm::StructType *STy = 1946 dyn_cast<llvm::StructType>(ArgInfo.getCoerceToType())) { 1947 SrcPtr = Builder.CreateBitCast(SrcPtr, 1948 llvm::PointerType::getUnqual(STy)); 1949 for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) { 1950 llvm::Value *EltPtr = Builder.CreateConstGEP2_32(SrcPtr, 0, i); 1951 llvm::LoadInst *LI = Builder.CreateLoad(EltPtr); 1952 // We don't know what we're loading from. 1953 LI->setAlignment(1); 1954 Args.push_back(LI); 1955 1956 // Validate argument match. 1957 checkArgMatches(LI, IRArgNo, IRFuncTy); 1958 } 1959 } else { 1960 // In the simple case, just pass the coerced loaded value. 1961 Args.push_back(CreateCoercedLoad(SrcPtr, ArgInfo.getCoerceToType(), 1962 *this)); 1963 1964 // Validate argument match. 1965 checkArgMatches(Args.back(), IRArgNo, IRFuncTy); 1966 } 1967 1968 break; 1969 } 1970 1971 case ABIArgInfo::Expand: 1972 ExpandTypeToArgs(I->Ty, RV, Args, IRFuncTy); 1973 IRArgNo = Args.size(); 1974 break; 1975 } 1976 } 1977 1978 // If the callee is a bitcast of a function to a varargs pointer to function 1979 // type, check to see if we can remove the bitcast. This handles some cases 1980 // with unprototyped functions. 1981 if (llvm::ConstantExpr *CE = dyn_cast<llvm::ConstantExpr>(Callee)) 1982 if (llvm::Function *CalleeF = dyn_cast<llvm::Function>(CE->getOperand(0))) { 1983 llvm::PointerType *CurPT=cast<llvm::PointerType>(Callee->getType()); 1984 llvm::FunctionType *CurFT = 1985 cast<llvm::FunctionType>(CurPT->getElementType()); 1986 llvm::FunctionType *ActualFT = CalleeF->getFunctionType(); 1987 1988 if (CE->getOpcode() == llvm::Instruction::BitCast && 1989 ActualFT->getReturnType() == CurFT->getReturnType() && 1990 ActualFT->getNumParams() == CurFT->getNumParams() && 1991 ActualFT->getNumParams() == Args.size() && 1992 (CurFT->isVarArg() || !ActualFT->isVarArg())) { 1993 bool ArgsMatch = true; 1994 for (unsigned i = 0, e = ActualFT->getNumParams(); i != e; ++i) 1995 if (ActualFT->getParamType(i) != CurFT->getParamType(i)) { 1996 ArgsMatch = false; 1997 break; 1998 } 1999 2000 // Strip the cast if we can get away with it. This is a nice cleanup, 2001 // but also allows us to inline the function at -O0 if it is marked 2002 // always_inline. 2003 if (ArgsMatch) 2004 Callee = CalleeF; 2005 } 2006 } 2007 2008 unsigned CallingConv; 2009 CodeGen::AttributeListType AttributeList; 2010 CGM.ConstructAttributeList(CallInfo, TargetDecl, AttributeList, CallingConv); 2011 llvm::AttrListPtr Attrs = llvm::AttrListPtr::get(AttributeList.begin(), 2012 AttributeList.end()); 2013 2014 llvm::BasicBlock *InvokeDest = 0; 2015 if (!(Attrs.getFnAttributes() & llvm::Attribute::NoUnwind)) 2016 InvokeDest = getInvokeDest(); 2017 2018 llvm::CallSite CS; 2019 if (!InvokeDest) { 2020 CS = Builder.CreateCall(Callee, Args); 2021 } else { 2022 llvm::BasicBlock *Cont = createBasicBlock("invoke.cont"); 2023 CS = Builder.CreateInvoke(Callee, Cont, InvokeDest, Args); 2024 EmitBlock(Cont); 2025 } 2026 if (callOrInvoke) 2027 *callOrInvoke = CS.getInstruction(); 2028 2029 CS.setAttributes(Attrs); 2030 CS.setCallingConv(static_cast<llvm::CallingConv::ID>(CallingConv)); 2031 2032 // In ObjC ARC mode with no ObjC ARC exception safety, tell the ARC 2033 // optimizer it can aggressively ignore unwind edges. 2034 if (CGM.getLangOptions().ObjCAutoRefCount) 2035 AddObjCARCExceptionMetadata(CS.getInstruction()); 2036 2037 // If the call doesn't return, finish the basic block and clear the 2038 // insertion point; this allows the rest of IRgen to discard 2039 // unreachable code. 2040 if (CS.doesNotReturn()) { 2041 Builder.CreateUnreachable(); 2042 Builder.ClearInsertionPoint(); 2043 2044 // FIXME: For now, emit a dummy basic block because expr emitters in 2045 // generally are not ready to handle emitting expressions at unreachable 2046 // points. 2047 EnsureInsertPoint(); 2048 2049 // Return a reasonable RValue. 2050 return GetUndefRValue(RetTy); 2051 } 2052 2053 llvm::Instruction *CI = CS.getInstruction(); 2054 if (Builder.isNamePreserving() && !CI->getType()->isVoidTy()) 2055 CI->setName("call"); 2056 2057 // Emit any writebacks immediately. Arguably this should happen 2058 // after any return-value munging. 2059 if (CallArgs.hasWritebacks()) 2060 emitWritebacks(*this, CallArgs); 2061 2062 switch (RetAI.getKind()) { 2063 case ABIArgInfo::Indirect: { 2064 unsigned Alignment = getContext().getTypeAlignInChars(RetTy).getQuantity(); 2065 if (RetTy->isAnyComplexType()) 2066 return RValue::getComplex(LoadComplexFromAddr(Args[0], false)); 2067 if (CodeGenFunction::hasAggregateLLVMType(RetTy)) 2068 return RValue::getAggregate(Args[0]); 2069 return RValue::get(EmitLoadOfScalar(Args[0], false, Alignment, RetTy)); 2070 } 2071 2072 case ABIArgInfo::Ignore: 2073 // If we are ignoring an argument that had a result, make sure to 2074 // construct the appropriate return value for our caller. 2075 return GetUndefRValue(RetTy); 2076 2077 case ABIArgInfo::Extend: 2078 case ABIArgInfo::Direct: { 2079 llvm::Type *RetIRTy = ConvertType(RetTy); 2080 if (RetAI.getCoerceToType() == RetIRTy && RetAI.getDirectOffset() == 0) { 2081 if (RetTy->isAnyComplexType()) { 2082 llvm::Value *Real = Builder.CreateExtractValue(CI, 0); 2083 llvm::Value *Imag = Builder.CreateExtractValue(CI, 1); 2084 return RValue::getComplex(std::make_pair(Real, Imag)); 2085 } 2086 if (CodeGenFunction::hasAggregateLLVMType(RetTy)) { 2087 llvm::Value *DestPtr = ReturnValue.getValue(); 2088 bool DestIsVolatile = ReturnValue.isVolatile(); 2089 2090 if (!DestPtr) { 2091 DestPtr = CreateMemTemp(RetTy, "agg.tmp"); 2092 DestIsVolatile = false; 2093 } 2094 BuildAggStore(*this, CI, DestPtr, DestIsVolatile, false); 2095 return RValue::getAggregate(DestPtr); 2096 } 2097 2098 // If the argument doesn't match, perform a bitcast to coerce it. This 2099 // can happen due to trivial type mismatches. 2100 llvm::Value *V = CI; 2101 if (V->getType() != RetIRTy) 2102 V = Builder.CreateBitCast(V, RetIRTy); 2103 return RValue::get(V); 2104 } 2105 2106 llvm::Value *DestPtr = ReturnValue.getValue(); 2107 bool DestIsVolatile = ReturnValue.isVolatile(); 2108 2109 if (!DestPtr) { 2110 DestPtr = CreateMemTemp(RetTy, "coerce"); 2111 DestIsVolatile = false; 2112 } 2113 2114 // If the value is offset in memory, apply the offset now. 2115 llvm::Value *StorePtr = DestPtr; 2116 if (unsigned Offs = RetAI.getDirectOffset()) { 2117 StorePtr = Builder.CreateBitCast(StorePtr, Builder.getInt8PtrTy()); 2118 StorePtr = Builder.CreateConstGEP1_32(StorePtr, Offs); 2119 StorePtr = Builder.CreateBitCast(StorePtr, 2120 llvm::PointerType::getUnqual(RetAI.getCoerceToType())); 2121 } 2122 CreateCoercedStore(CI, StorePtr, DestIsVolatile, *this); 2123 2124 unsigned Alignment = getContext().getTypeAlignInChars(RetTy).getQuantity(); 2125 if (RetTy->isAnyComplexType()) 2126 return RValue::getComplex(LoadComplexFromAddr(DestPtr, false)); 2127 if (CodeGenFunction::hasAggregateLLVMType(RetTy)) 2128 return RValue::getAggregate(DestPtr); 2129 return RValue::get(EmitLoadOfScalar(DestPtr, false, Alignment, RetTy)); 2130 } 2131 2132 case ABIArgInfo::Expand: 2133 llvm_unreachable("Invalid ABI kind for return argument"); 2134 } 2135 2136 llvm_unreachable("Unhandled ABIArgInfo::Kind"); 2137 } 2138 2139 /* VarArg handling */ 2140 2141 llvm::Value *CodeGenFunction::EmitVAArg(llvm::Value *VAListAddr, QualType Ty) { 2142 return CGM.getTypes().getABIInfo().EmitVAArg(VAListAddr, Ty, *this); 2143 } 2144