1 //===----- CGCall.h - Encapsulate calling convention details ----*- C++ -*-===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // These classes wrap the information about a call or function 11 // definition used to handle ABI compliancy. 12 // 13 //===----------------------------------------------------------------------===// 14 15 #include "CGCall.h" 16 #include "CGCXXABI.h" 17 #include "ABIInfo.h" 18 #include "CodeGenFunction.h" 19 #include "CodeGenModule.h" 20 #include "clang/Basic/TargetInfo.h" 21 #include "clang/AST/Decl.h" 22 #include "clang/AST/DeclCXX.h" 23 #include "clang/AST/DeclObjC.h" 24 #include "clang/Frontend/CodeGenOptions.h" 25 #include "llvm/Attributes.h" 26 #include "llvm/Support/CallSite.h" 27 #include "llvm/Target/TargetData.h" 28 using namespace clang; 29 using namespace CodeGen; 30 31 /***/ 32 33 static unsigned ClangCallConvToLLVMCallConv(CallingConv CC) { 34 switch (CC) { 35 default: return llvm::CallingConv::C; 36 case CC_X86StdCall: return llvm::CallingConv::X86_StdCall; 37 case CC_X86FastCall: return llvm::CallingConv::X86_FastCall; 38 case CC_X86ThisCall: return llvm::CallingConv::X86_ThisCall; 39 // TODO: add support for CC_X86Pascal to llvm 40 } 41 } 42 43 /// Derives the 'this' type for codegen purposes, i.e. ignoring method 44 /// qualification. 45 /// FIXME: address space qualification? 46 static CanQualType GetThisType(ASTContext &Context, const CXXRecordDecl *RD) { 47 QualType RecTy = Context.getTagDeclType(RD)->getCanonicalTypeInternal(); 48 return Context.getPointerType(CanQualType::CreateUnsafe(RecTy)); 49 } 50 51 /// Returns the canonical formal type of the given C++ method. 52 static CanQual<FunctionProtoType> GetFormalType(const CXXMethodDecl *MD) { 53 return MD->getType()->getCanonicalTypeUnqualified() 54 .getAs<FunctionProtoType>(); 55 } 56 57 /// Returns the "extra-canonicalized" return type, which discards 58 /// qualifiers on the return type. Codegen doesn't care about them, 59 /// and it makes ABI code a little easier to be able to assume that 60 /// all parameter and return types are top-level unqualified. 61 static CanQualType GetReturnType(QualType RetTy) { 62 return RetTy->getCanonicalTypeUnqualified().getUnqualifiedType(); 63 } 64 65 const CGFunctionInfo & 66 CodeGenTypes::getFunctionInfo(CanQual<FunctionNoProtoType> FTNP, 67 bool IsRecursive) { 68 return getFunctionInfo(FTNP->getResultType().getUnqualifiedType(), 69 llvm::SmallVector<CanQualType, 16>(), 70 FTNP->getExtInfo(), IsRecursive); 71 } 72 73 /// \param Args - contains any initial parameters besides those 74 /// in the formal type 75 static const CGFunctionInfo &getFunctionInfo(CodeGenTypes &CGT, 76 llvm::SmallVectorImpl<CanQualType> &ArgTys, 77 CanQual<FunctionProtoType> FTP, 78 bool IsRecursive = false) { 79 // FIXME: Kill copy. 80 for (unsigned i = 0, e = FTP->getNumArgs(); i != e; ++i) 81 ArgTys.push_back(FTP->getArgType(i)); 82 CanQualType ResTy = FTP->getResultType().getUnqualifiedType(); 83 return CGT.getFunctionInfo(ResTy, ArgTys, FTP->getExtInfo(), IsRecursive); 84 } 85 86 const CGFunctionInfo & 87 CodeGenTypes::getFunctionInfo(CanQual<FunctionProtoType> FTP, 88 bool IsRecursive) { 89 llvm::SmallVector<CanQualType, 16> ArgTys; 90 return ::getFunctionInfo(*this, ArgTys, FTP, IsRecursive); 91 } 92 93 static CallingConv getCallingConventionForDecl(const Decl *D) { 94 // Set the appropriate calling convention for the Function. 95 if (D->hasAttr<StdCallAttr>()) 96 return CC_X86StdCall; 97 98 if (D->hasAttr<FastCallAttr>()) 99 return CC_X86FastCall; 100 101 if (D->hasAttr<ThisCallAttr>()) 102 return CC_X86ThisCall; 103 104 if (D->hasAttr<PascalAttr>()) 105 return CC_X86Pascal; 106 107 return CC_C; 108 } 109 110 const CGFunctionInfo &CodeGenTypes::getFunctionInfo(const CXXRecordDecl *RD, 111 const FunctionProtoType *FTP) { 112 llvm::SmallVector<CanQualType, 16> ArgTys; 113 114 // Add the 'this' pointer. 115 ArgTys.push_back(GetThisType(Context, RD)); 116 117 return ::getFunctionInfo(*this, ArgTys, 118 FTP->getCanonicalTypeUnqualified().getAs<FunctionProtoType>()); 119 } 120 121 const CGFunctionInfo &CodeGenTypes::getFunctionInfo(const CXXMethodDecl *MD) { 122 llvm::SmallVector<CanQualType, 16> ArgTys; 123 124 assert(!isa<CXXConstructorDecl>(MD) && "wrong method for contructors!"); 125 assert(!isa<CXXDestructorDecl>(MD) && "wrong method for destructors!"); 126 127 // Add the 'this' pointer unless this is a static method. 128 if (MD->isInstance()) 129 ArgTys.push_back(GetThisType(Context, MD->getParent())); 130 131 return ::getFunctionInfo(*this, ArgTys, GetFormalType(MD)); 132 } 133 134 const CGFunctionInfo &CodeGenTypes::getFunctionInfo(const CXXConstructorDecl *D, 135 CXXCtorType Type) { 136 llvm::SmallVector<CanQualType, 16> ArgTys; 137 ArgTys.push_back(GetThisType(Context, D->getParent())); 138 CanQualType ResTy = Context.VoidTy; 139 140 TheCXXABI.BuildConstructorSignature(D, Type, ResTy, ArgTys); 141 142 CanQual<FunctionProtoType> FTP = GetFormalType(D); 143 144 // Add the formal parameters. 145 for (unsigned i = 0, e = FTP->getNumArgs(); i != e; ++i) 146 ArgTys.push_back(FTP->getArgType(i)); 147 148 return getFunctionInfo(ResTy, ArgTys, FTP->getExtInfo()); 149 } 150 151 const CGFunctionInfo &CodeGenTypes::getFunctionInfo(const CXXDestructorDecl *D, 152 CXXDtorType Type) { 153 llvm::SmallVector<CanQualType, 2> ArgTys; 154 ArgTys.push_back(GetThisType(Context, D->getParent())); 155 CanQualType ResTy = Context.VoidTy; 156 157 TheCXXABI.BuildDestructorSignature(D, Type, ResTy, ArgTys); 158 159 CanQual<FunctionProtoType> FTP = GetFormalType(D); 160 assert(FTP->getNumArgs() == 0 && "dtor with formal parameters"); 161 162 return getFunctionInfo(ResTy, ArgTys, FTP->getExtInfo()); 163 } 164 165 const CGFunctionInfo &CodeGenTypes::getFunctionInfo(const FunctionDecl *FD) { 166 if (const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(FD)) 167 if (MD->isInstance()) 168 return getFunctionInfo(MD); 169 170 CanQualType FTy = FD->getType()->getCanonicalTypeUnqualified(); 171 assert(isa<FunctionType>(FTy)); 172 if (isa<FunctionNoProtoType>(FTy)) 173 return getFunctionInfo(FTy.getAs<FunctionNoProtoType>()); 174 assert(isa<FunctionProtoType>(FTy)); 175 return getFunctionInfo(FTy.getAs<FunctionProtoType>()); 176 } 177 178 const CGFunctionInfo &CodeGenTypes::getFunctionInfo(const ObjCMethodDecl *MD) { 179 llvm::SmallVector<CanQualType, 16> ArgTys; 180 ArgTys.push_back(Context.getCanonicalParamType(MD->getSelfDecl()->getType())); 181 ArgTys.push_back(Context.getCanonicalParamType(Context.getObjCSelType())); 182 // FIXME: Kill copy? 183 for (ObjCMethodDecl::param_iterator i = MD->param_begin(), 184 e = MD->param_end(); i != e; ++i) { 185 ArgTys.push_back(Context.getCanonicalParamType((*i)->getType())); 186 } 187 return getFunctionInfo(GetReturnType(MD->getResultType()), 188 ArgTys, 189 FunctionType::ExtInfo( 190 /*NoReturn*/ false, 191 /*RegParm*/ 0, 192 getCallingConventionForDecl(MD))); 193 } 194 195 const CGFunctionInfo &CodeGenTypes::getFunctionInfo(GlobalDecl GD) { 196 // FIXME: Do we need to handle ObjCMethodDecl? 197 const FunctionDecl *FD = cast<FunctionDecl>(GD.getDecl()); 198 199 if (const CXXConstructorDecl *CD = dyn_cast<CXXConstructorDecl>(FD)) 200 return getFunctionInfo(CD, GD.getCtorType()); 201 202 if (const CXXDestructorDecl *DD = dyn_cast<CXXDestructorDecl>(FD)) 203 return getFunctionInfo(DD, GD.getDtorType()); 204 205 return getFunctionInfo(FD); 206 } 207 208 const CGFunctionInfo &CodeGenTypes::getFunctionInfo(QualType ResTy, 209 const CallArgList &Args, 210 const FunctionType::ExtInfo &Info) { 211 // FIXME: Kill copy. 212 llvm::SmallVector<CanQualType, 16> ArgTys; 213 for (CallArgList::const_iterator i = Args.begin(), e = Args.end(); 214 i != e; ++i) 215 ArgTys.push_back(Context.getCanonicalParamType(i->second)); 216 return getFunctionInfo(GetReturnType(ResTy), ArgTys, Info); 217 } 218 219 const CGFunctionInfo &CodeGenTypes::getFunctionInfo(QualType ResTy, 220 const FunctionArgList &Args, 221 const FunctionType::ExtInfo &Info) { 222 // FIXME: Kill copy. 223 llvm::SmallVector<CanQualType, 16> ArgTys; 224 for (FunctionArgList::const_iterator i = Args.begin(), e = Args.end(); 225 i != e; ++i) 226 ArgTys.push_back(Context.getCanonicalParamType(i->second)); 227 return getFunctionInfo(GetReturnType(ResTy), ArgTys, Info); 228 } 229 230 const CGFunctionInfo &CodeGenTypes::getFunctionInfo(CanQualType ResTy, 231 const llvm::SmallVectorImpl<CanQualType> &ArgTys, 232 const FunctionType::ExtInfo &Info, 233 bool IsRecursive) { 234 #ifndef NDEBUG 235 for (llvm::SmallVectorImpl<CanQualType>::const_iterator 236 I = ArgTys.begin(), E = ArgTys.end(); I != E; ++I) 237 assert(I->isCanonicalAsParam()); 238 #endif 239 240 unsigned CC = ClangCallConvToLLVMCallConv(Info.getCC()); 241 242 // Lookup or create unique function info. 243 llvm::FoldingSetNodeID ID; 244 CGFunctionInfo::Profile(ID, Info, ResTy, 245 ArgTys.begin(), ArgTys.end()); 246 247 void *InsertPos = 0; 248 CGFunctionInfo *FI = FunctionInfos.FindNodeOrInsertPos(ID, InsertPos); 249 if (FI) 250 return *FI; 251 252 // Construct the function info. 253 FI = new CGFunctionInfo(CC, Info.getNoReturn(), Info.getRegParm(), ResTy, 254 ArgTys.data(), ArgTys.size()); 255 FunctionInfos.InsertNode(FI, InsertPos); 256 257 // Compute ABI information. 258 getABIInfo().computeInfo(*FI); 259 260 // Loop over all of the computed argument and return value info. If any of 261 // them are direct or extend without a specified coerce type, specify the 262 // default now. 263 ABIArgInfo &RetInfo = FI->getReturnInfo(); 264 if (RetInfo.canHaveCoerceToType() && RetInfo.getCoerceToType() == 0) 265 RetInfo.setCoerceToType(ConvertTypeRecursive(FI->getReturnType())); 266 267 for (CGFunctionInfo::arg_iterator I = FI->arg_begin(), E = FI->arg_end(); 268 I != E; ++I) 269 if (I->info.canHaveCoerceToType() && I->info.getCoerceToType() == 0) 270 I->info.setCoerceToType(ConvertTypeRecursive(I->type)); 271 272 // If this is a top-level call and ConvertTypeRecursive hit unresolved pointer 273 // types, resolve them now. These pointers may point to this function, which 274 // we *just* filled in the FunctionInfo for. 275 if (!IsRecursive && !PointersToResolve.empty()) 276 HandleLateResolvedPointers(); 277 278 return *FI; 279 } 280 281 CGFunctionInfo::CGFunctionInfo(unsigned _CallingConvention, 282 bool _NoReturn, unsigned _RegParm, 283 CanQualType ResTy, 284 const CanQualType *ArgTys, 285 unsigned NumArgTys) 286 : CallingConvention(_CallingConvention), 287 EffectiveCallingConvention(_CallingConvention), 288 NoReturn(_NoReturn), RegParm(_RegParm) 289 { 290 NumArgs = NumArgTys; 291 292 // FIXME: Coallocate with the CGFunctionInfo object. 293 Args = new ArgInfo[1 + NumArgTys]; 294 Args[0].type = ResTy; 295 for (unsigned i = 0; i != NumArgTys; ++i) 296 Args[1 + i].type = ArgTys[i]; 297 } 298 299 /***/ 300 301 void CodeGenTypes::GetExpandedTypes(QualType Ty, 302 std::vector<const llvm::Type*> &ArgTys, 303 bool IsRecursive) { 304 const RecordType *RT = Ty->getAsStructureType(); 305 assert(RT && "Can only expand structure types."); 306 const RecordDecl *RD = RT->getDecl(); 307 assert(!RD->hasFlexibleArrayMember() && 308 "Cannot expand structure with flexible array."); 309 310 for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end(); 311 i != e; ++i) { 312 const FieldDecl *FD = *i; 313 assert(!FD->isBitField() && 314 "Cannot expand structure with bit-field members."); 315 316 QualType FT = FD->getType(); 317 if (CodeGenFunction::hasAggregateLLVMType(FT)) 318 GetExpandedTypes(FT, ArgTys, IsRecursive); 319 else 320 ArgTys.push_back(ConvertType(FT, IsRecursive)); 321 } 322 } 323 324 llvm::Function::arg_iterator 325 CodeGenFunction::ExpandTypeFromArgs(QualType Ty, LValue LV, 326 llvm::Function::arg_iterator AI) { 327 const RecordType *RT = Ty->getAsStructureType(); 328 assert(RT && "Can only expand structure types."); 329 330 RecordDecl *RD = RT->getDecl(); 331 assert(LV.isSimple() && 332 "Unexpected non-simple lvalue during struct expansion."); 333 llvm::Value *Addr = LV.getAddress(); 334 for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end(); 335 i != e; ++i) { 336 FieldDecl *FD = *i; 337 QualType FT = FD->getType(); 338 339 // FIXME: What are the right qualifiers here? 340 LValue LV = EmitLValueForField(Addr, FD, 0); 341 if (CodeGenFunction::hasAggregateLLVMType(FT)) { 342 AI = ExpandTypeFromArgs(FT, LV, AI); 343 } else { 344 EmitStoreThroughLValue(RValue::get(AI), LV, FT); 345 ++AI; 346 } 347 } 348 349 return AI; 350 } 351 352 void 353 CodeGenFunction::ExpandTypeToArgs(QualType Ty, RValue RV, 354 llvm::SmallVector<llvm::Value*, 16> &Args) { 355 const RecordType *RT = Ty->getAsStructureType(); 356 assert(RT && "Can only expand structure types."); 357 358 RecordDecl *RD = RT->getDecl(); 359 assert(RV.isAggregate() && "Unexpected rvalue during struct expansion"); 360 llvm::Value *Addr = RV.getAggregateAddr(); 361 for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end(); 362 i != e; ++i) { 363 FieldDecl *FD = *i; 364 QualType FT = FD->getType(); 365 366 // FIXME: What are the right qualifiers here? 367 LValue LV = EmitLValueForField(Addr, FD, 0); 368 if (CodeGenFunction::hasAggregateLLVMType(FT)) { 369 ExpandTypeToArgs(FT, RValue::getAggregate(LV.getAddress()), Args); 370 } else { 371 RValue RV = EmitLoadOfLValue(LV, FT); 372 assert(RV.isScalar() && 373 "Unexpected non-scalar rvalue during struct expansion."); 374 Args.push_back(RV.getScalarVal()); 375 } 376 } 377 } 378 379 /// EnterStructPointerForCoercedAccess - Given a struct pointer that we are 380 /// accessing some number of bytes out of it, try to gep into the struct to get 381 /// at its inner goodness. Dive as deep as possible without entering an element 382 /// with an in-memory size smaller than DstSize. 383 static llvm::Value * 384 EnterStructPointerForCoercedAccess(llvm::Value *SrcPtr, 385 const llvm::StructType *SrcSTy, 386 uint64_t DstSize, CodeGenFunction &CGF) { 387 // We can't dive into a zero-element struct. 388 if (SrcSTy->getNumElements() == 0) return SrcPtr; 389 390 const llvm::Type *FirstElt = SrcSTy->getElementType(0); 391 392 // If the first elt is at least as large as what we're looking for, or if the 393 // first element is the same size as the whole struct, we can enter it. 394 uint64_t FirstEltSize = 395 CGF.CGM.getTargetData().getTypeAllocSize(FirstElt); 396 if (FirstEltSize < DstSize && 397 FirstEltSize < CGF.CGM.getTargetData().getTypeAllocSize(SrcSTy)) 398 return SrcPtr; 399 400 // GEP into the first element. 401 SrcPtr = CGF.Builder.CreateConstGEP2_32(SrcPtr, 0, 0, "coerce.dive"); 402 403 // If the first element is a struct, recurse. 404 const llvm::Type *SrcTy = 405 cast<llvm::PointerType>(SrcPtr->getType())->getElementType(); 406 if (const llvm::StructType *SrcSTy = dyn_cast<llvm::StructType>(SrcTy)) 407 return EnterStructPointerForCoercedAccess(SrcPtr, SrcSTy, DstSize, CGF); 408 409 return SrcPtr; 410 } 411 412 /// CoerceIntOrPtrToIntOrPtr - Convert a value Val to the specific Ty where both 413 /// are either integers or pointers. This does a truncation of the value if it 414 /// is too large or a zero extension if it is too small. 415 static llvm::Value *CoerceIntOrPtrToIntOrPtr(llvm::Value *Val, 416 const llvm::Type *Ty, 417 CodeGenFunction &CGF) { 418 if (Val->getType() == Ty) 419 return Val; 420 421 if (isa<llvm::PointerType>(Val->getType())) { 422 // If this is Pointer->Pointer avoid conversion to and from int. 423 if (isa<llvm::PointerType>(Ty)) 424 return CGF.Builder.CreateBitCast(Val, Ty, "coerce.val"); 425 426 // Convert the pointer to an integer so we can play with its width. 427 Val = CGF.Builder.CreatePtrToInt(Val, CGF.IntPtrTy, "coerce.val.pi"); 428 } 429 430 const llvm::Type *DestIntTy = Ty; 431 if (isa<llvm::PointerType>(DestIntTy)) 432 DestIntTy = CGF.IntPtrTy; 433 434 if (Val->getType() != DestIntTy) 435 Val = CGF.Builder.CreateIntCast(Val, DestIntTy, false, "coerce.val.ii"); 436 437 if (isa<llvm::PointerType>(Ty)) 438 Val = CGF.Builder.CreateIntToPtr(Val, Ty, "coerce.val.ip"); 439 return Val; 440 } 441 442 443 444 /// CreateCoercedLoad - Create a load from \arg SrcPtr interpreted as 445 /// a pointer to an object of type \arg Ty. 446 /// 447 /// This safely handles the case when the src type is smaller than the 448 /// destination type; in this situation the values of bits which not 449 /// present in the src are undefined. 450 static llvm::Value *CreateCoercedLoad(llvm::Value *SrcPtr, 451 const llvm::Type *Ty, 452 CodeGenFunction &CGF) { 453 const llvm::Type *SrcTy = 454 cast<llvm::PointerType>(SrcPtr->getType())->getElementType(); 455 456 // If SrcTy and Ty are the same, just do a load. 457 if (SrcTy == Ty) 458 return CGF.Builder.CreateLoad(SrcPtr); 459 460 uint64_t DstSize = CGF.CGM.getTargetData().getTypeAllocSize(Ty); 461 462 if (const llvm::StructType *SrcSTy = dyn_cast<llvm::StructType>(SrcTy)) { 463 SrcPtr = EnterStructPointerForCoercedAccess(SrcPtr, SrcSTy, DstSize, CGF); 464 SrcTy = cast<llvm::PointerType>(SrcPtr->getType())->getElementType(); 465 } 466 467 uint64_t SrcSize = CGF.CGM.getTargetData().getTypeAllocSize(SrcTy); 468 469 // If the source and destination are integer or pointer types, just do an 470 // extension or truncation to the desired type. 471 if ((isa<llvm::IntegerType>(Ty) || isa<llvm::PointerType>(Ty)) && 472 (isa<llvm::IntegerType>(SrcTy) || isa<llvm::PointerType>(SrcTy))) { 473 llvm::LoadInst *Load = CGF.Builder.CreateLoad(SrcPtr); 474 return CoerceIntOrPtrToIntOrPtr(Load, Ty, CGF); 475 } 476 477 // If load is legal, just bitcast the src pointer. 478 if (SrcSize >= DstSize) { 479 // Generally SrcSize is never greater than DstSize, since this means we are 480 // losing bits. However, this can happen in cases where the structure has 481 // additional padding, for example due to a user specified alignment. 482 // 483 // FIXME: Assert that we aren't truncating non-padding bits when have access 484 // to that information. 485 llvm::Value *Casted = 486 CGF.Builder.CreateBitCast(SrcPtr, llvm::PointerType::getUnqual(Ty)); 487 llvm::LoadInst *Load = CGF.Builder.CreateLoad(Casted); 488 // FIXME: Use better alignment / avoid requiring aligned load. 489 Load->setAlignment(1); 490 return Load; 491 } 492 493 // Otherwise do coercion through memory. This is stupid, but 494 // simple. 495 llvm::Value *Tmp = CGF.CreateTempAlloca(Ty); 496 llvm::Value *Casted = 497 CGF.Builder.CreateBitCast(Tmp, llvm::PointerType::getUnqual(SrcTy)); 498 llvm::StoreInst *Store = 499 CGF.Builder.CreateStore(CGF.Builder.CreateLoad(SrcPtr), Casted); 500 // FIXME: Use better alignment / avoid requiring aligned store. 501 Store->setAlignment(1); 502 return CGF.Builder.CreateLoad(Tmp); 503 } 504 505 /// CreateCoercedStore - Create a store to \arg DstPtr from \arg Src, 506 /// where the source and destination may have different types. 507 /// 508 /// This safely handles the case when the src type is larger than the 509 /// destination type; the upper bits of the src will be lost. 510 static void CreateCoercedStore(llvm::Value *Src, 511 llvm::Value *DstPtr, 512 bool DstIsVolatile, 513 CodeGenFunction &CGF) { 514 const llvm::Type *SrcTy = Src->getType(); 515 const llvm::Type *DstTy = 516 cast<llvm::PointerType>(DstPtr->getType())->getElementType(); 517 if (SrcTy == DstTy) { 518 CGF.Builder.CreateStore(Src, DstPtr, DstIsVolatile); 519 return; 520 } 521 522 uint64_t SrcSize = CGF.CGM.getTargetData().getTypeAllocSize(SrcTy); 523 524 if (const llvm::StructType *DstSTy = dyn_cast<llvm::StructType>(DstTy)) { 525 DstPtr = EnterStructPointerForCoercedAccess(DstPtr, DstSTy, SrcSize, CGF); 526 DstTy = cast<llvm::PointerType>(DstPtr->getType())->getElementType(); 527 } 528 529 // If the source and destination are integer or pointer types, just do an 530 // extension or truncation to the desired type. 531 if ((isa<llvm::IntegerType>(SrcTy) || isa<llvm::PointerType>(SrcTy)) && 532 (isa<llvm::IntegerType>(DstTy) || isa<llvm::PointerType>(DstTy))) { 533 Src = CoerceIntOrPtrToIntOrPtr(Src, DstTy, CGF); 534 CGF.Builder.CreateStore(Src, DstPtr, DstIsVolatile); 535 return; 536 } 537 538 uint64_t DstSize = CGF.CGM.getTargetData().getTypeAllocSize(DstTy); 539 540 // If store is legal, just bitcast the src pointer. 541 if (SrcSize <= DstSize) { 542 llvm::Value *Casted = 543 CGF.Builder.CreateBitCast(DstPtr, llvm::PointerType::getUnqual(SrcTy)); 544 // FIXME: Use better alignment / avoid requiring aligned store. 545 CGF.Builder.CreateStore(Src, Casted, DstIsVolatile)->setAlignment(1); 546 } else { 547 // Otherwise do coercion through memory. This is stupid, but 548 // simple. 549 550 // Generally SrcSize is never greater than DstSize, since this means we are 551 // losing bits. However, this can happen in cases where the structure has 552 // additional padding, for example due to a user specified alignment. 553 // 554 // FIXME: Assert that we aren't truncating non-padding bits when have access 555 // to that information. 556 llvm::Value *Tmp = CGF.CreateTempAlloca(SrcTy); 557 CGF.Builder.CreateStore(Src, Tmp); 558 llvm::Value *Casted = 559 CGF.Builder.CreateBitCast(Tmp, llvm::PointerType::getUnqual(DstTy)); 560 llvm::LoadInst *Load = CGF.Builder.CreateLoad(Casted); 561 // FIXME: Use better alignment / avoid requiring aligned load. 562 Load->setAlignment(1); 563 CGF.Builder.CreateStore(Load, DstPtr, DstIsVolatile); 564 } 565 } 566 567 /***/ 568 569 bool CodeGenModule::ReturnTypeUsesSRet(const CGFunctionInfo &FI) { 570 return FI.getReturnInfo().isIndirect(); 571 } 572 573 bool CodeGenModule::ReturnTypeUsesFPRet(QualType ResultType) { 574 if (const BuiltinType *BT = ResultType->getAs<BuiltinType>()) { 575 switch (BT->getKind()) { 576 default: 577 return false; 578 case BuiltinType::Float: 579 return getContext().Target.useObjCFPRetForRealType(TargetInfo::Float); 580 case BuiltinType::Double: 581 return getContext().Target.useObjCFPRetForRealType(TargetInfo::Double); 582 case BuiltinType::LongDouble: 583 return getContext().Target.useObjCFPRetForRealType( 584 TargetInfo::LongDouble); 585 } 586 } 587 588 return false; 589 } 590 591 const llvm::FunctionType *CodeGenTypes::GetFunctionType(GlobalDecl GD) { 592 const CGFunctionInfo &FI = getFunctionInfo(GD); 593 594 // For definition purposes, don't consider a K&R function variadic. 595 bool Variadic = false; 596 if (const FunctionProtoType *FPT = 597 cast<FunctionDecl>(GD.getDecl())->getType()->getAs<FunctionProtoType>()) 598 Variadic = FPT->isVariadic(); 599 600 return GetFunctionType(FI, Variadic, false); 601 } 602 603 const llvm::FunctionType * 604 CodeGenTypes::GetFunctionType(const CGFunctionInfo &FI, bool IsVariadic, 605 bool IsRecursive) { 606 std::vector<const llvm::Type*> ArgTys; 607 608 const llvm::Type *ResultType = 0; 609 610 QualType RetTy = FI.getReturnType(); 611 const ABIArgInfo &RetAI = FI.getReturnInfo(); 612 switch (RetAI.getKind()) { 613 case ABIArgInfo::Expand: 614 assert(0 && "Invalid ABI kind for return argument"); 615 616 case ABIArgInfo::Extend: 617 case ABIArgInfo::Direct: 618 ResultType = RetAI.getCoerceToType(); 619 break; 620 621 case ABIArgInfo::Indirect: { 622 assert(!RetAI.getIndirectAlign() && "Align unused on indirect return."); 623 ResultType = llvm::Type::getVoidTy(getLLVMContext()); 624 const llvm::Type *STy = ConvertType(RetTy, IsRecursive); 625 ArgTys.push_back(llvm::PointerType::get(STy, RetTy.getAddressSpace())); 626 break; 627 } 628 629 case ABIArgInfo::Ignore: 630 ResultType = llvm::Type::getVoidTy(getLLVMContext()); 631 break; 632 } 633 634 for (CGFunctionInfo::const_arg_iterator it = FI.arg_begin(), 635 ie = FI.arg_end(); it != ie; ++it) { 636 const ABIArgInfo &AI = it->info; 637 638 switch (AI.getKind()) { 639 case ABIArgInfo::Ignore: 640 break; 641 642 case ABIArgInfo::Indirect: { 643 // indirect arguments are always on the stack, which is addr space #0. 644 const llvm::Type *LTy = ConvertTypeForMem(it->type, IsRecursive); 645 ArgTys.push_back(llvm::PointerType::getUnqual(LTy)); 646 break; 647 } 648 649 case ABIArgInfo::Extend: 650 case ABIArgInfo::Direct: { 651 // If the coerce-to type is a first class aggregate, flatten it. Either 652 // way is semantically identical, but fast-isel and the optimizer 653 // generally likes scalar values better than FCAs. 654 const llvm::Type *ArgTy = AI.getCoerceToType(); 655 if (const llvm::StructType *STy = dyn_cast<llvm::StructType>(ArgTy)) { 656 for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) 657 ArgTys.push_back(STy->getElementType(i)); 658 } else { 659 ArgTys.push_back(ArgTy); 660 } 661 break; 662 } 663 664 case ABIArgInfo::Expand: 665 GetExpandedTypes(it->type, ArgTys, IsRecursive); 666 break; 667 } 668 } 669 670 return llvm::FunctionType::get(ResultType, ArgTys, IsVariadic); 671 } 672 673 const llvm::Type *CodeGenTypes::GetFunctionTypeForVTable(GlobalDecl GD) { 674 const CXXMethodDecl *MD = cast<CXXMethodDecl>(GD.getDecl()); 675 const FunctionProtoType *FPT = MD->getType()->getAs<FunctionProtoType>(); 676 677 if (!VerifyFuncTypeComplete(FPT)) { 678 const CGFunctionInfo *Info; 679 if (isa<CXXDestructorDecl>(MD)) 680 Info = &getFunctionInfo(cast<CXXDestructorDecl>(MD), GD.getDtorType()); 681 else 682 Info = &getFunctionInfo(MD); 683 return GetFunctionType(*Info, FPT->isVariadic(), false); 684 } 685 686 return llvm::OpaqueType::get(getLLVMContext()); 687 } 688 689 void CodeGenModule::ConstructAttributeList(const CGFunctionInfo &FI, 690 const Decl *TargetDecl, 691 AttributeListType &PAL, 692 unsigned &CallingConv) { 693 unsigned FuncAttrs = 0; 694 unsigned RetAttrs = 0; 695 696 CallingConv = FI.getEffectiveCallingConvention(); 697 698 if (FI.isNoReturn()) 699 FuncAttrs |= llvm::Attribute::NoReturn; 700 701 // FIXME: handle sseregparm someday... 702 if (TargetDecl) { 703 if (TargetDecl->hasAttr<NoThrowAttr>()) 704 FuncAttrs |= llvm::Attribute::NoUnwind; 705 else if (const FunctionDecl *Fn = dyn_cast<FunctionDecl>(TargetDecl)) { 706 const FunctionProtoType *FPT = Fn->getType()->getAs<FunctionProtoType>(); 707 if (FPT && FPT->hasEmptyExceptionSpec()) 708 FuncAttrs |= llvm::Attribute::NoUnwind; 709 } 710 711 if (TargetDecl->hasAttr<NoReturnAttr>()) 712 FuncAttrs |= llvm::Attribute::NoReturn; 713 if (TargetDecl->hasAttr<ConstAttr>()) 714 FuncAttrs |= llvm::Attribute::ReadNone; 715 else if (TargetDecl->hasAttr<PureAttr>()) 716 FuncAttrs |= llvm::Attribute::ReadOnly; 717 if (TargetDecl->hasAttr<MallocAttr>()) 718 RetAttrs |= llvm::Attribute::NoAlias; 719 } 720 721 if (CodeGenOpts.OptimizeSize) 722 FuncAttrs |= llvm::Attribute::OptimizeForSize; 723 if (CodeGenOpts.DisableRedZone) 724 FuncAttrs |= llvm::Attribute::NoRedZone; 725 if (CodeGenOpts.NoImplicitFloat) 726 FuncAttrs |= llvm::Attribute::NoImplicitFloat; 727 728 QualType RetTy = FI.getReturnType(); 729 unsigned Index = 1; 730 const ABIArgInfo &RetAI = FI.getReturnInfo(); 731 switch (RetAI.getKind()) { 732 case ABIArgInfo::Extend: 733 if (RetTy->hasSignedIntegerRepresentation()) 734 RetAttrs |= llvm::Attribute::SExt; 735 else if (RetTy->hasUnsignedIntegerRepresentation()) 736 RetAttrs |= llvm::Attribute::ZExt; 737 break; 738 case ABIArgInfo::Direct: 739 case ABIArgInfo::Ignore: 740 break; 741 742 case ABIArgInfo::Indirect: 743 PAL.push_back(llvm::AttributeWithIndex::get(Index, 744 llvm::Attribute::StructRet)); 745 ++Index; 746 // sret disables readnone and readonly 747 FuncAttrs &= ~(llvm::Attribute::ReadOnly | 748 llvm::Attribute::ReadNone); 749 break; 750 751 case ABIArgInfo::Expand: 752 assert(0 && "Invalid ABI kind for return argument"); 753 } 754 755 if (RetAttrs) 756 PAL.push_back(llvm::AttributeWithIndex::get(0, RetAttrs)); 757 758 // FIXME: we need to honor command line settings also. 759 // FIXME: RegParm should be reduced in case of nested functions and/or global 760 // register variable. 761 signed RegParm = FI.getRegParm(); 762 763 unsigned PointerWidth = getContext().Target.getPointerWidth(0); 764 for (CGFunctionInfo::const_arg_iterator it = FI.arg_begin(), 765 ie = FI.arg_end(); it != ie; ++it) { 766 QualType ParamType = it->type; 767 const ABIArgInfo &AI = it->info; 768 unsigned Attributes = 0; 769 770 // 'restrict' -> 'noalias' is done in EmitFunctionProlog when we 771 // have the corresponding parameter variable. It doesn't make 772 // sense to do it here because parameters are so fucked up. 773 switch (AI.getKind()) { 774 case ABIArgInfo::Extend: 775 if (ParamType->isSignedIntegerType()) 776 Attributes |= llvm::Attribute::SExt; 777 else if (ParamType->isUnsignedIntegerType()) 778 Attributes |= llvm::Attribute::ZExt; 779 // FALL THROUGH 780 case ABIArgInfo::Direct: 781 if (RegParm > 0 && 782 (ParamType->isIntegerType() || ParamType->isPointerType())) { 783 RegParm -= 784 (Context.getTypeSize(ParamType) + PointerWidth - 1) / PointerWidth; 785 if (RegParm >= 0) 786 Attributes |= llvm::Attribute::InReg; 787 } 788 // FIXME: handle sseregparm someday... 789 790 if (const llvm::StructType *STy = 791 dyn_cast<llvm::StructType>(AI.getCoerceToType())) 792 Index += STy->getNumElements()-1; // 1 will be added below. 793 break; 794 795 case ABIArgInfo::Indirect: 796 if (AI.getIndirectByVal()) 797 Attributes |= llvm::Attribute::ByVal; 798 799 Attributes |= 800 llvm::Attribute::constructAlignmentFromInt(AI.getIndirectAlign()); 801 // byval disables readnone and readonly. 802 FuncAttrs &= ~(llvm::Attribute::ReadOnly | 803 llvm::Attribute::ReadNone); 804 break; 805 806 case ABIArgInfo::Ignore: 807 // Skip increment, no matching LLVM parameter. 808 continue; 809 810 case ABIArgInfo::Expand: { 811 std::vector<const llvm::Type*> Tys; 812 // FIXME: This is rather inefficient. Do we ever actually need to do 813 // anything here? The result should be just reconstructed on the other 814 // side, so extension should be a non-issue. 815 getTypes().GetExpandedTypes(ParamType, Tys, false); 816 Index += Tys.size(); 817 continue; 818 } 819 } 820 821 if (Attributes) 822 PAL.push_back(llvm::AttributeWithIndex::get(Index, Attributes)); 823 ++Index; 824 } 825 if (FuncAttrs) 826 PAL.push_back(llvm::AttributeWithIndex::get(~0, FuncAttrs)); 827 } 828 829 void CodeGenFunction::EmitFunctionProlog(const CGFunctionInfo &FI, 830 llvm::Function *Fn, 831 const FunctionArgList &Args) { 832 // If this is an implicit-return-zero function, go ahead and 833 // initialize the return value. TODO: it might be nice to have 834 // a more general mechanism for this that didn't require synthesized 835 // return statements. 836 if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(CurFuncDecl)) { 837 if (FD->hasImplicitReturnZero()) { 838 QualType RetTy = FD->getResultType().getUnqualifiedType(); 839 const llvm::Type* LLVMTy = CGM.getTypes().ConvertType(RetTy); 840 llvm::Constant* Zero = llvm::Constant::getNullValue(LLVMTy); 841 Builder.CreateStore(Zero, ReturnValue); 842 } 843 } 844 845 // FIXME: We no longer need the types from FunctionArgList; lift up and 846 // simplify. 847 848 // Emit allocs for param decls. Give the LLVM Argument nodes names. 849 llvm::Function::arg_iterator AI = Fn->arg_begin(); 850 851 // Name the struct return argument. 852 if (CGM.ReturnTypeUsesSRet(FI)) { 853 AI->setName("agg.result"); 854 ++AI; 855 } 856 857 assert(FI.arg_size() == Args.size() && 858 "Mismatch between function signature & arguments."); 859 CGFunctionInfo::const_arg_iterator info_it = FI.arg_begin(); 860 for (FunctionArgList::const_iterator i = Args.begin(), e = Args.end(); 861 i != e; ++i, ++info_it) { 862 const VarDecl *Arg = i->first; 863 QualType Ty = info_it->type; 864 const ABIArgInfo &ArgI = info_it->info; 865 866 switch (ArgI.getKind()) { 867 case ABIArgInfo::Indirect: { 868 llvm::Value *V = AI; 869 870 if (hasAggregateLLVMType(Ty)) { 871 // Aggregates and complex variables are accessed by reference. All we 872 // need to do is realign the value, if requested 873 if (ArgI.getIndirectRealign()) { 874 llvm::Value *AlignedTemp = CreateMemTemp(Ty, "coerce"); 875 876 // Copy from the incoming argument pointer to the temporary with the 877 // appropriate alignment. 878 // 879 // FIXME: We should have a common utility for generating an aggregate 880 // copy. 881 const llvm::Type *I8PtrTy = Builder.getInt8PtrTy(); 882 unsigned Size = getContext().getTypeSize(Ty) / 8; 883 Builder.CreateMemCpy(Builder.CreateBitCast(AlignedTemp, I8PtrTy), 884 Builder.CreateBitCast(V, I8PtrTy), 885 llvm::ConstantInt::get(IntPtrTy, Size), 886 ArgI.getIndirectAlign(), 887 false); 888 V = AlignedTemp; 889 } 890 } else { 891 // Load scalar value from indirect argument. 892 unsigned Alignment = getContext().getTypeAlignInChars(Ty).getQuantity(); 893 V = EmitLoadOfScalar(V, false, Alignment, Ty); 894 if (!getContext().typesAreCompatible(Ty, Arg->getType())) { 895 // This must be a promotion, for something like 896 // "void a(x) short x; {..." 897 V = EmitScalarConversion(V, Ty, Arg->getType()); 898 } 899 } 900 EmitParmDecl(*Arg, V); 901 break; 902 } 903 904 case ABIArgInfo::Extend: 905 case ABIArgInfo::Direct: { 906 // If we have the trivial case, handle it with no muss and fuss. 907 if (!isa<llvm::StructType>(ArgI.getCoerceToType()) && 908 ArgI.getCoerceToType() == ConvertType(Ty) && 909 ArgI.getDirectOffset() == 0) { 910 assert(AI != Fn->arg_end() && "Argument mismatch!"); 911 llvm::Value *V = AI; 912 913 if (Arg->getType().isRestrictQualified()) 914 AI->addAttr(llvm::Attribute::NoAlias); 915 916 if (!getContext().typesAreCompatible(Ty, Arg->getType())) { 917 // This must be a promotion, for something like 918 // "void a(x) short x; {..." 919 V = EmitScalarConversion(V, Ty, Arg->getType()); 920 } 921 EmitParmDecl(*Arg, V); 922 break; 923 } 924 925 llvm::AllocaInst *Alloca = CreateMemTemp(Ty, "coerce"); 926 927 // The alignment we need to use is the max of the requested alignment for 928 // the argument plus the alignment required by our access code below. 929 unsigned AlignmentToUse = 930 CGF.CGM.getTargetData().getABITypeAlignment(ArgI.getCoerceToType()); 931 AlignmentToUse = std::max(AlignmentToUse, 932 (unsigned)getContext().getDeclAlign(Arg).getQuantity()); 933 934 Alloca->setAlignment(AlignmentToUse); 935 llvm::Value *V = Alloca; 936 llvm::Value *Ptr = V; // Pointer to store into. 937 938 // If the value is offset in memory, apply the offset now. 939 if (unsigned Offs = ArgI.getDirectOffset()) { 940 Ptr = Builder.CreateBitCast(Ptr, Builder.getInt8PtrTy()); 941 Ptr = Builder.CreateConstGEP1_32(Ptr, Offs); 942 Ptr = Builder.CreateBitCast(Ptr, 943 llvm::PointerType::getUnqual(ArgI.getCoerceToType())); 944 } 945 946 // If the coerce-to type is a first class aggregate, we flatten it and 947 // pass the elements. Either way is semantically identical, but fast-isel 948 // and the optimizer generally likes scalar values better than FCAs. 949 if (const llvm::StructType *STy = 950 dyn_cast<llvm::StructType>(ArgI.getCoerceToType())) { 951 Ptr = Builder.CreateBitCast(Ptr, llvm::PointerType::getUnqual(STy)); 952 953 for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) { 954 assert(AI != Fn->arg_end() && "Argument mismatch!"); 955 AI->setName(Arg->getName() + ".coerce" + llvm::Twine(i)); 956 llvm::Value *EltPtr = Builder.CreateConstGEP2_32(Ptr, 0, i); 957 Builder.CreateStore(AI++, EltPtr); 958 } 959 } else { 960 // Simple case, just do a coerced store of the argument into the alloca. 961 assert(AI != Fn->arg_end() && "Argument mismatch!"); 962 AI->setName(Arg->getName() + ".coerce"); 963 CreateCoercedStore(AI++, Ptr, /*DestIsVolatile=*/false, *this); 964 } 965 966 967 // Match to what EmitParmDecl is expecting for this type. 968 if (!CodeGenFunction::hasAggregateLLVMType(Ty)) { 969 V = EmitLoadOfScalar(V, false, AlignmentToUse, Ty); 970 if (!getContext().typesAreCompatible(Ty, Arg->getType())) { 971 // This must be a promotion, for something like 972 // "void a(x) short x; {..." 973 V = EmitScalarConversion(V, Ty, Arg->getType()); 974 } 975 } 976 EmitParmDecl(*Arg, V); 977 continue; // Skip ++AI increment, already done. 978 } 979 980 case ABIArgInfo::Expand: { 981 // If this structure was expanded into multiple arguments then 982 // we need to create a temporary and reconstruct it from the 983 // arguments. 984 llvm::Value *Temp = CreateMemTemp(Ty, Arg->getName() + ".addr"); 985 llvm::Function::arg_iterator End = 986 ExpandTypeFromArgs(Ty, MakeAddrLValue(Temp, Ty), AI); 987 EmitParmDecl(*Arg, Temp); 988 989 // Name the arguments used in expansion and increment AI. 990 unsigned Index = 0; 991 for (; AI != End; ++AI, ++Index) 992 AI->setName(Arg->getName() + "." + llvm::Twine(Index)); 993 continue; 994 } 995 996 case ABIArgInfo::Ignore: 997 // Initialize the local variable appropriately. 998 if (hasAggregateLLVMType(Ty)) 999 EmitParmDecl(*Arg, CreateMemTemp(Ty)); 1000 else 1001 EmitParmDecl(*Arg, llvm::UndefValue::get(ConvertType(Arg->getType()))); 1002 1003 // Skip increment, no matching LLVM parameter. 1004 continue; 1005 } 1006 1007 ++AI; 1008 } 1009 assert(AI == Fn->arg_end() && "Argument mismatch!"); 1010 } 1011 1012 void CodeGenFunction::EmitFunctionEpilog(const CGFunctionInfo &FI) { 1013 // Functions with no result always return void. 1014 if (ReturnValue == 0) { 1015 Builder.CreateRetVoid(); 1016 return; 1017 } 1018 1019 llvm::DebugLoc RetDbgLoc; 1020 llvm::Value *RV = 0; 1021 QualType RetTy = FI.getReturnType(); 1022 const ABIArgInfo &RetAI = FI.getReturnInfo(); 1023 1024 switch (RetAI.getKind()) { 1025 case ABIArgInfo::Indirect: { 1026 unsigned Alignment = getContext().getTypeAlignInChars(RetTy).getQuantity(); 1027 if (RetTy->isAnyComplexType()) { 1028 ComplexPairTy RT = LoadComplexFromAddr(ReturnValue, false); 1029 StoreComplexToAddr(RT, CurFn->arg_begin(), false); 1030 } else if (CodeGenFunction::hasAggregateLLVMType(RetTy)) { 1031 // Do nothing; aggregrates get evaluated directly into the destination. 1032 } else { 1033 EmitStoreOfScalar(Builder.CreateLoad(ReturnValue), CurFn->arg_begin(), 1034 false, Alignment, RetTy); 1035 } 1036 break; 1037 } 1038 1039 case ABIArgInfo::Extend: 1040 case ABIArgInfo::Direct: 1041 if (RetAI.getCoerceToType() == ConvertType(RetTy) && 1042 RetAI.getDirectOffset() == 0) { 1043 // The internal return value temp always will have pointer-to-return-type 1044 // type, just do a load. 1045 1046 // If the instruction right before the insertion point is a store to the 1047 // return value, we can elide the load, zap the store, and usually zap the 1048 // alloca. 1049 llvm::BasicBlock *InsertBB = Builder.GetInsertBlock(); 1050 llvm::StoreInst *SI = 0; 1051 if (InsertBB->empty() || 1052 !(SI = dyn_cast<llvm::StoreInst>(&InsertBB->back())) || 1053 SI->getPointerOperand() != ReturnValue || SI->isVolatile()) { 1054 RV = Builder.CreateLoad(ReturnValue); 1055 } else { 1056 // Get the stored value and nuke the now-dead store. 1057 RetDbgLoc = SI->getDebugLoc(); 1058 RV = SI->getValueOperand(); 1059 SI->eraseFromParent(); 1060 1061 // If that was the only use of the return value, nuke it as well now. 1062 if (ReturnValue->use_empty() && isa<llvm::AllocaInst>(ReturnValue)) { 1063 cast<llvm::AllocaInst>(ReturnValue)->eraseFromParent(); 1064 ReturnValue = 0; 1065 } 1066 } 1067 } else { 1068 llvm::Value *V = ReturnValue; 1069 // If the value is offset in memory, apply the offset now. 1070 if (unsigned Offs = RetAI.getDirectOffset()) { 1071 V = Builder.CreateBitCast(V, Builder.getInt8PtrTy()); 1072 V = Builder.CreateConstGEP1_32(V, Offs); 1073 V = Builder.CreateBitCast(V, 1074 llvm::PointerType::getUnqual(RetAI.getCoerceToType())); 1075 } 1076 1077 RV = CreateCoercedLoad(V, RetAI.getCoerceToType(), *this); 1078 } 1079 break; 1080 1081 case ABIArgInfo::Ignore: 1082 break; 1083 1084 case ABIArgInfo::Expand: 1085 assert(0 && "Invalid ABI kind for return argument"); 1086 } 1087 1088 llvm::Instruction *Ret = RV ? Builder.CreateRet(RV) : Builder.CreateRetVoid(); 1089 if (!RetDbgLoc.isUnknown()) 1090 Ret->setDebugLoc(RetDbgLoc); 1091 } 1092 1093 RValue CodeGenFunction::EmitDelegateCallArg(const VarDecl *Param) { 1094 // StartFunction converted the ABI-lowered parameter(s) into a 1095 // local alloca. We need to turn that into an r-value suitable 1096 // for EmitCall. 1097 llvm::Value *Local = GetAddrOfLocalVar(Param); 1098 1099 QualType ArgType = Param->getType(); 1100 1101 // For the most part, we just need to load the alloca, except: 1102 // 1) aggregate r-values are actually pointers to temporaries, and 1103 // 2) references to aggregates are pointers directly to the aggregate. 1104 // I don't know why references to non-aggregates are different here. 1105 if (const ReferenceType *RefType = ArgType->getAs<ReferenceType>()) { 1106 if (hasAggregateLLVMType(RefType->getPointeeType())) 1107 return RValue::getAggregate(Local); 1108 1109 // Locals which are references to scalars are represented 1110 // with allocas holding the pointer. 1111 return RValue::get(Builder.CreateLoad(Local)); 1112 } 1113 1114 if (ArgType->isAnyComplexType()) 1115 return RValue::getComplex(LoadComplexFromAddr(Local, /*volatile*/ false)); 1116 1117 if (hasAggregateLLVMType(ArgType)) 1118 return RValue::getAggregate(Local); 1119 1120 unsigned Alignment = getContext().getDeclAlign(Param).getQuantity(); 1121 return RValue::get(EmitLoadOfScalar(Local, false, Alignment, ArgType)); 1122 } 1123 1124 RValue CodeGenFunction::EmitCallArg(const Expr *E, QualType ArgType) { 1125 if (ArgType->isReferenceType()) 1126 return EmitReferenceBindingToExpr(E, /*InitializedDecl=*/0); 1127 1128 return EmitAnyExprToTemp(E); 1129 } 1130 1131 /// Emits a call or invoke instruction to the given function, depending 1132 /// on the current state of the EH stack. 1133 llvm::CallSite 1134 CodeGenFunction::EmitCallOrInvoke(llvm::Value *Callee, 1135 llvm::Value * const *ArgBegin, 1136 llvm::Value * const *ArgEnd, 1137 const llvm::Twine &Name) { 1138 llvm::BasicBlock *InvokeDest = getInvokeDest(); 1139 if (!InvokeDest) 1140 return Builder.CreateCall(Callee, ArgBegin, ArgEnd, Name); 1141 1142 llvm::BasicBlock *ContBB = createBasicBlock("invoke.cont"); 1143 llvm::InvokeInst *Invoke = Builder.CreateInvoke(Callee, ContBB, InvokeDest, 1144 ArgBegin, ArgEnd, Name); 1145 EmitBlock(ContBB); 1146 return Invoke; 1147 } 1148 1149 RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo, 1150 llvm::Value *Callee, 1151 ReturnValueSlot ReturnValue, 1152 const CallArgList &CallArgs, 1153 const Decl *TargetDecl, 1154 llvm::Instruction **callOrInvoke) { 1155 // FIXME: We no longer need the types from CallArgs; lift up and simplify. 1156 llvm::SmallVector<llvm::Value*, 16> Args; 1157 1158 // Handle struct-return functions by passing a pointer to the 1159 // location that we would like to return into. 1160 QualType RetTy = CallInfo.getReturnType(); 1161 const ABIArgInfo &RetAI = CallInfo.getReturnInfo(); 1162 1163 1164 // If the call returns a temporary with struct return, create a temporary 1165 // alloca to hold the result, unless one is given to us. 1166 if (CGM.ReturnTypeUsesSRet(CallInfo)) { 1167 llvm::Value *Value = ReturnValue.getValue(); 1168 if (!Value) 1169 Value = CreateMemTemp(RetTy); 1170 Args.push_back(Value); 1171 } 1172 1173 assert(CallInfo.arg_size() == CallArgs.size() && 1174 "Mismatch between function signature & arguments."); 1175 CGFunctionInfo::const_arg_iterator info_it = CallInfo.arg_begin(); 1176 for (CallArgList::const_iterator I = CallArgs.begin(), E = CallArgs.end(); 1177 I != E; ++I, ++info_it) { 1178 const ABIArgInfo &ArgInfo = info_it->info; 1179 RValue RV = I->first; 1180 1181 unsigned Alignment = 1182 getContext().getTypeAlignInChars(I->second).getQuantity(); 1183 switch (ArgInfo.getKind()) { 1184 case ABIArgInfo::Indirect: { 1185 if (RV.isScalar() || RV.isComplex()) { 1186 // Make a temporary alloca to pass the argument. 1187 Args.push_back(CreateMemTemp(I->second)); 1188 if (RV.isScalar()) 1189 EmitStoreOfScalar(RV.getScalarVal(), Args.back(), false, 1190 Alignment, I->second); 1191 else 1192 StoreComplexToAddr(RV.getComplexVal(), Args.back(), false); 1193 } else { 1194 Args.push_back(RV.getAggregateAddr()); 1195 } 1196 break; 1197 } 1198 1199 case ABIArgInfo::Ignore: 1200 break; 1201 1202 case ABIArgInfo::Extend: 1203 case ABIArgInfo::Direct: { 1204 if (!isa<llvm::StructType>(ArgInfo.getCoerceToType()) && 1205 ArgInfo.getCoerceToType() == ConvertType(info_it->type) && 1206 ArgInfo.getDirectOffset() == 0) { 1207 if (RV.isScalar()) 1208 Args.push_back(RV.getScalarVal()); 1209 else 1210 Args.push_back(Builder.CreateLoad(RV.getAggregateAddr())); 1211 break; 1212 } 1213 1214 // FIXME: Avoid the conversion through memory if possible. 1215 llvm::Value *SrcPtr; 1216 if (RV.isScalar()) { 1217 SrcPtr = CreateMemTemp(I->second, "coerce"); 1218 EmitStoreOfScalar(RV.getScalarVal(), SrcPtr, false, Alignment, 1219 I->second); 1220 } else if (RV.isComplex()) { 1221 SrcPtr = CreateMemTemp(I->second, "coerce"); 1222 StoreComplexToAddr(RV.getComplexVal(), SrcPtr, false); 1223 } else 1224 SrcPtr = RV.getAggregateAddr(); 1225 1226 // If the value is offset in memory, apply the offset now. 1227 if (unsigned Offs = ArgInfo.getDirectOffset()) { 1228 SrcPtr = Builder.CreateBitCast(SrcPtr, Builder.getInt8PtrTy()); 1229 SrcPtr = Builder.CreateConstGEP1_32(SrcPtr, Offs); 1230 SrcPtr = Builder.CreateBitCast(SrcPtr, 1231 llvm::PointerType::getUnqual(ArgInfo.getCoerceToType())); 1232 1233 } 1234 1235 // If the coerce-to type is a first class aggregate, we flatten it and 1236 // pass the elements. Either way is semantically identical, but fast-isel 1237 // and the optimizer generally likes scalar values better than FCAs. 1238 if (const llvm::StructType *STy = 1239 dyn_cast<llvm::StructType>(ArgInfo.getCoerceToType())) { 1240 SrcPtr = Builder.CreateBitCast(SrcPtr, 1241 llvm::PointerType::getUnqual(STy)); 1242 for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) { 1243 llvm::Value *EltPtr = Builder.CreateConstGEP2_32(SrcPtr, 0, i); 1244 llvm::LoadInst *LI = Builder.CreateLoad(EltPtr); 1245 // We don't know what we're loading from. 1246 LI->setAlignment(1); 1247 Args.push_back(LI); 1248 } 1249 } else { 1250 // In the simple case, just pass the coerced loaded value. 1251 Args.push_back(CreateCoercedLoad(SrcPtr, ArgInfo.getCoerceToType(), 1252 *this)); 1253 } 1254 1255 break; 1256 } 1257 1258 case ABIArgInfo::Expand: 1259 ExpandTypeToArgs(I->second, RV, Args); 1260 break; 1261 } 1262 } 1263 1264 // If the callee is a bitcast of a function to a varargs pointer to function 1265 // type, check to see if we can remove the bitcast. This handles some cases 1266 // with unprototyped functions. 1267 if (llvm::ConstantExpr *CE = dyn_cast<llvm::ConstantExpr>(Callee)) 1268 if (llvm::Function *CalleeF = dyn_cast<llvm::Function>(CE->getOperand(0))) { 1269 const llvm::PointerType *CurPT=cast<llvm::PointerType>(Callee->getType()); 1270 const llvm::FunctionType *CurFT = 1271 cast<llvm::FunctionType>(CurPT->getElementType()); 1272 const llvm::FunctionType *ActualFT = CalleeF->getFunctionType(); 1273 1274 if (CE->getOpcode() == llvm::Instruction::BitCast && 1275 ActualFT->getReturnType() == CurFT->getReturnType() && 1276 ActualFT->getNumParams() == CurFT->getNumParams() && 1277 ActualFT->getNumParams() == Args.size()) { 1278 bool ArgsMatch = true; 1279 for (unsigned i = 0, e = ActualFT->getNumParams(); i != e; ++i) 1280 if (ActualFT->getParamType(i) != CurFT->getParamType(i)) { 1281 ArgsMatch = false; 1282 break; 1283 } 1284 1285 // Strip the cast if we can get away with it. This is a nice cleanup, 1286 // but also allows us to inline the function at -O0 if it is marked 1287 // always_inline. 1288 if (ArgsMatch) 1289 Callee = CalleeF; 1290 } 1291 } 1292 1293 1294 unsigned CallingConv; 1295 CodeGen::AttributeListType AttributeList; 1296 CGM.ConstructAttributeList(CallInfo, TargetDecl, AttributeList, CallingConv); 1297 llvm::AttrListPtr Attrs = llvm::AttrListPtr::get(AttributeList.begin(), 1298 AttributeList.end()); 1299 1300 llvm::BasicBlock *InvokeDest = 0; 1301 if (!(Attrs.getFnAttributes() & llvm::Attribute::NoUnwind)) 1302 InvokeDest = getInvokeDest(); 1303 1304 llvm::CallSite CS; 1305 if (!InvokeDest) { 1306 CS = Builder.CreateCall(Callee, Args.data(), Args.data()+Args.size()); 1307 } else { 1308 llvm::BasicBlock *Cont = createBasicBlock("invoke.cont"); 1309 CS = Builder.CreateInvoke(Callee, Cont, InvokeDest, 1310 Args.data(), Args.data()+Args.size()); 1311 EmitBlock(Cont); 1312 } 1313 if (callOrInvoke) 1314 *callOrInvoke = CS.getInstruction(); 1315 1316 CS.setAttributes(Attrs); 1317 CS.setCallingConv(static_cast<llvm::CallingConv::ID>(CallingConv)); 1318 1319 // If the call doesn't return, finish the basic block and clear the 1320 // insertion point; this allows the rest of IRgen to discard 1321 // unreachable code. 1322 if (CS.doesNotReturn()) { 1323 Builder.CreateUnreachable(); 1324 Builder.ClearInsertionPoint(); 1325 1326 // FIXME: For now, emit a dummy basic block because expr emitters in 1327 // generally are not ready to handle emitting expressions at unreachable 1328 // points. 1329 EnsureInsertPoint(); 1330 1331 // Return a reasonable RValue. 1332 return GetUndefRValue(RetTy); 1333 } 1334 1335 llvm::Instruction *CI = CS.getInstruction(); 1336 if (Builder.isNamePreserving() && !CI->getType()->isVoidTy()) 1337 CI->setName("call"); 1338 1339 switch (RetAI.getKind()) { 1340 case ABIArgInfo::Indirect: { 1341 unsigned Alignment = getContext().getTypeAlignInChars(RetTy).getQuantity(); 1342 if (RetTy->isAnyComplexType()) 1343 return RValue::getComplex(LoadComplexFromAddr(Args[0], false)); 1344 if (CodeGenFunction::hasAggregateLLVMType(RetTy)) 1345 return RValue::getAggregate(Args[0]); 1346 return RValue::get(EmitLoadOfScalar(Args[0], false, Alignment, RetTy)); 1347 } 1348 1349 case ABIArgInfo::Ignore: 1350 // If we are ignoring an argument that had a result, make sure to 1351 // construct the appropriate return value for our caller. 1352 return GetUndefRValue(RetTy); 1353 1354 case ABIArgInfo::Extend: 1355 case ABIArgInfo::Direct: { 1356 if (RetAI.getCoerceToType() == ConvertType(RetTy) && 1357 RetAI.getDirectOffset() == 0) { 1358 if (RetTy->isAnyComplexType()) { 1359 llvm::Value *Real = Builder.CreateExtractValue(CI, 0); 1360 llvm::Value *Imag = Builder.CreateExtractValue(CI, 1); 1361 return RValue::getComplex(std::make_pair(Real, Imag)); 1362 } 1363 if (CodeGenFunction::hasAggregateLLVMType(RetTy)) { 1364 llvm::Value *DestPtr = ReturnValue.getValue(); 1365 bool DestIsVolatile = ReturnValue.isVolatile(); 1366 1367 if (!DestPtr) { 1368 DestPtr = CreateMemTemp(RetTy, "agg.tmp"); 1369 DestIsVolatile = false; 1370 } 1371 Builder.CreateStore(CI, DestPtr, DestIsVolatile); 1372 return RValue::getAggregate(DestPtr); 1373 } 1374 return RValue::get(CI); 1375 } 1376 1377 llvm::Value *DestPtr = ReturnValue.getValue(); 1378 bool DestIsVolatile = ReturnValue.isVolatile(); 1379 1380 if (!DestPtr) { 1381 DestPtr = CreateMemTemp(RetTy, "coerce"); 1382 DestIsVolatile = false; 1383 } 1384 1385 // If the value is offset in memory, apply the offset now. 1386 llvm::Value *StorePtr = DestPtr; 1387 if (unsigned Offs = RetAI.getDirectOffset()) { 1388 StorePtr = Builder.CreateBitCast(StorePtr, Builder.getInt8PtrTy()); 1389 StorePtr = Builder.CreateConstGEP1_32(StorePtr, Offs); 1390 StorePtr = Builder.CreateBitCast(StorePtr, 1391 llvm::PointerType::getUnqual(RetAI.getCoerceToType())); 1392 } 1393 CreateCoercedStore(CI, StorePtr, DestIsVolatile, *this); 1394 1395 unsigned Alignment = getContext().getTypeAlignInChars(RetTy).getQuantity(); 1396 if (RetTy->isAnyComplexType()) 1397 return RValue::getComplex(LoadComplexFromAddr(DestPtr, false)); 1398 if (CodeGenFunction::hasAggregateLLVMType(RetTy)) 1399 return RValue::getAggregate(DestPtr); 1400 return RValue::get(EmitLoadOfScalar(DestPtr, false, Alignment, RetTy)); 1401 } 1402 1403 case ABIArgInfo::Expand: 1404 assert(0 && "Invalid ABI kind for return argument"); 1405 } 1406 1407 assert(0 && "Unhandled ABIArgInfo::Kind"); 1408 return RValue::get(0); 1409 } 1410 1411 /* VarArg handling */ 1412 1413 llvm::Value *CodeGenFunction::EmitVAArg(llvm::Value *VAListAddr, QualType Ty) { 1414 return CGM.getTypes().getABIInfo().EmitVAArg(VAListAddr, Ty, *this); 1415 } 1416