1 //===--- CGCall.cpp - Encapsulate calling convention details ----*- C++ -*-===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // These classes wrap the information about a call or function 11 // definition used to handle ABI compliancy. 12 // 13 //===----------------------------------------------------------------------===// 14 15 #include "CGCall.h" 16 #include "CGCXXABI.h" 17 #include "ABIInfo.h" 18 #include "CodeGenFunction.h" 19 #include "CodeGenModule.h" 20 #include "TargetInfo.h" 21 #include "clang/Basic/TargetInfo.h" 22 #include "clang/AST/Decl.h" 23 #include "clang/AST/DeclCXX.h" 24 #include "clang/AST/DeclObjC.h" 25 #include "clang/Frontend/CodeGenOptions.h" 26 #include "llvm/Attributes.h" 27 #include "llvm/Support/CallSite.h" 28 #include "llvm/DataLayout.h" 29 #include "llvm/InlineAsm.h" 30 #include "llvm/Transforms/Utils/Local.h" 31 using namespace clang; 32 using namespace CodeGen; 33 34 /***/ 35 36 static unsigned ClangCallConvToLLVMCallConv(CallingConv CC) { 37 switch (CC) { 38 default: return llvm::CallingConv::C; 39 case CC_X86StdCall: return llvm::CallingConv::X86_StdCall; 40 case CC_X86FastCall: return llvm::CallingConv::X86_FastCall; 41 case CC_X86ThisCall: return llvm::CallingConv::X86_ThisCall; 42 case CC_AAPCS: return llvm::CallingConv::ARM_AAPCS; 43 case CC_AAPCS_VFP: return llvm::CallingConv::ARM_AAPCS_VFP; 44 // TODO: add support for CC_X86Pascal to llvm 45 } 46 } 47 48 /// Derives the 'this' type for codegen purposes, i.e. ignoring method 49 /// qualification. 50 /// FIXME: address space qualification? 51 static CanQualType GetThisType(ASTContext &Context, const CXXRecordDecl *RD) { 52 QualType RecTy = Context.getTagDeclType(RD)->getCanonicalTypeInternal(); 53 return Context.getPointerType(CanQualType::CreateUnsafe(RecTy)); 54 } 55 56 /// Returns the canonical formal type of the given C++ method. 57 static CanQual<FunctionProtoType> GetFormalType(const CXXMethodDecl *MD) { 58 return MD->getType()->getCanonicalTypeUnqualified() 59 .getAs<FunctionProtoType>(); 60 } 61 62 /// Returns the "extra-canonicalized" return type, which discards 63 /// qualifiers on the return type. Codegen doesn't care about them, 64 /// and it makes ABI code a little easier to be able to assume that 65 /// all parameter and return types are top-level unqualified. 66 static CanQualType GetReturnType(QualType RetTy) { 67 return RetTy->getCanonicalTypeUnqualified().getUnqualifiedType(); 68 } 69 70 /// Arrange the argument and result information for a value of the given 71 /// unprototyped freestanding function type. 72 const CGFunctionInfo & 73 CodeGenTypes::arrangeFreeFunctionType(CanQual<FunctionNoProtoType> FTNP) { 74 // When translating an unprototyped function type, always use a 75 // variadic type. 76 return arrangeLLVMFunctionInfo(FTNP->getResultType().getUnqualifiedType(), 77 ArrayRef<CanQualType>(), 78 FTNP->getExtInfo(), 79 RequiredArgs(0)); 80 } 81 82 /// Arrange the LLVM function layout for a value of the given function 83 /// type, on top of any implicit parameters already stored. Use the 84 /// given ExtInfo instead of the ExtInfo from the function type. 85 static const CGFunctionInfo &arrangeLLVMFunctionInfo(CodeGenTypes &CGT, 86 SmallVectorImpl<CanQualType> &prefix, 87 CanQual<FunctionProtoType> FTP, 88 FunctionType::ExtInfo extInfo) { 89 RequiredArgs required = RequiredArgs::forPrototypePlus(FTP, prefix.size()); 90 // FIXME: Kill copy. 91 for (unsigned i = 0, e = FTP->getNumArgs(); i != e; ++i) 92 prefix.push_back(FTP->getArgType(i)); 93 CanQualType resultType = FTP->getResultType().getUnqualifiedType(); 94 return CGT.arrangeLLVMFunctionInfo(resultType, prefix, extInfo, required); 95 } 96 97 /// Arrange the argument and result information for a free function (i.e. 98 /// not a C++ or ObjC instance method) of the given type. 99 static const CGFunctionInfo &arrangeFreeFunctionType(CodeGenTypes &CGT, 100 SmallVectorImpl<CanQualType> &prefix, 101 CanQual<FunctionProtoType> FTP) { 102 return arrangeLLVMFunctionInfo(CGT, prefix, FTP, FTP->getExtInfo()); 103 } 104 105 /// Given the formal ext-info of a C++ instance method, adjust it 106 /// according to the C++ ABI in effect. 107 static void adjustCXXMethodInfo(CodeGenTypes &CGT, 108 FunctionType::ExtInfo &extInfo, 109 bool isVariadic) { 110 if (extInfo.getCC() == CC_Default) { 111 CallingConv CC = CGT.getContext().getDefaultCXXMethodCallConv(isVariadic); 112 extInfo = extInfo.withCallingConv(CC); 113 } 114 } 115 116 /// Arrange the argument and result information for a free function (i.e. 117 /// not a C++ or ObjC instance method) of the given type. 118 static const CGFunctionInfo &arrangeCXXMethodType(CodeGenTypes &CGT, 119 SmallVectorImpl<CanQualType> &prefix, 120 CanQual<FunctionProtoType> FTP) { 121 FunctionType::ExtInfo extInfo = FTP->getExtInfo(); 122 adjustCXXMethodInfo(CGT, extInfo, FTP->isVariadic()); 123 return arrangeLLVMFunctionInfo(CGT, prefix, FTP, extInfo); 124 } 125 126 /// Arrange the argument and result information for a value of the 127 /// given freestanding function type. 128 const CGFunctionInfo & 129 CodeGenTypes::arrangeFreeFunctionType(CanQual<FunctionProtoType> FTP) { 130 SmallVector<CanQualType, 16> argTypes; 131 return ::arrangeFreeFunctionType(*this, argTypes, FTP); 132 } 133 134 static CallingConv getCallingConventionForDecl(const Decl *D) { 135 // Set the appropriate calling convention for the Function. 136 if (D->hasAttr<StdCallAttr>()) 137 return CC_X86StdCall; 138 139 if (D->hasAttr<FastCallAttr>()) 140 return CC_X86FastCall; 141 142 if (D->hasAttr<ThisCallAttr>()) 143 return CC_X86ThisCall; 144 145 if (D->hasAttr<PascalAttr>()) 146 return CC_X86Pascal; 147 148 if (PcsAttr *PCS = D->getAttr<PcsAttr>()) 149 return (PCS->getPCS() == PcsAttr::AAPCS ? CC_AAPCS : CC_AAPCS_VFP); 150 151 if (D->hasAttr<PnaclCallAttr>()) 152 return CC_PnaclCall; 153 154 return CC_C; 155 } 156 157 /// Arrange the argument and result information for a call to an 158 /// unknown C++ non-static member function of the given abstract type. 159 /// The member function must be an ordinary function, i.e. not a 160 /// constructor or destructor. 161 const CGFunctionInfo & 162 CodeGenTypes::arrangeCXXMethodType(const CXXRecordDecl *RD, 163 const FunctionProtoType *FTP) { 164 SmallVector<CanQualType, 16> argTypes; 165 166 // Add the 'this' pointer. 167 argTypes.push_back(GetThisType(Context, RD)); 168 169 return ::arrangeCXXMethodType(*this, argTypes, 170 FTP->getCanonicalTypeUnqualified().getAs<FunctionProtoType>()); 171 } 172 173 /// Arrange the argument and result information for a declaration or 174 /// definition of the given C++ non-static member function. The 175 /// member function must be an ordinary function, i.e. not a 176 /// constructor or destructor. 177 const CGFunctionInfo & 178 CodeGenTypes::arrangeCXXMethodDeclaration(const CXXMethodDecl *MD) { 179 assert(!isa<CXXConstructorDecl>(MD) && "wrong method for contructors!"); 180 assert(!isa<CXXDestructorDecl>(MD) && "wrong method for destructors!"); 181 182 CanQual<FunctionProtoType> prototype = GetFormalType(MD); 183 184 if (MD->isInstance()) { 185 // The abstract case is perfectly fine. 186 return arrangeCXXMethodType(MD->getParent(), prototype.getTypePtr()); 187 } 188 189 return arrangeFreeFunctionType(prototype); 190 } 191 192 /// Arrange the argument and result information for a declaration 193 /// or definition to the given constructor variant. 194 const CGFunctionInfo & 195 CodeGenTypes::arrangeCXXConstructorDeclaration(const CXXConstructorDecl *D, 196 CXXCtorType ctorKind) { 197 SmallVector<CanQualType, 16> argTypes; 198 argTypes.push_back(GetThisType(Context, D->getParent())); 199 CanQualType resultType = Context.VoidTy; 200 201 TheCXXABI.BuildConstructorSignature(D, ctorKind, resultType, argTypes); 202 203 CanQual<FunctionProtoType> FTP = GetFormalType(D); 204 205 RequiredArgs required = RequiredArgs::forPrototypePlus(FTP, argTypes.size()); 206 207 // Add the formal parameters. 208 for (unsigned i = 0, e = FTP->getNumArgs(); i != e; ++i) 209 argTypes.push_back(FTP->getArgType(i)); 210 211 FunctionType::ExtInfo extInfo = FTP->getExtInfo(); 212 adjustCXXMethodInfo(*this, extInfo, FTP->isVariadic()); 213 return arrangeLLVMFunctionInfo(resultType, argTypes, extInfo, required); 214 } 215 216 /// Arrange the argument and result information for a declaration, 217 /// definition, or call to the given destructor variant. It so 218 /// happens that all three cases produce the same information. 219 const CGFunctionInfo & 220 CodeGenTypes::arrangeCXXDestructor(const CXXDestructorDecl *D, 221 CXXDtorType dtorKind) { 222 SmallVector<CanQualType, 2> argTypes; 223 argTypes.push_back(GetThisType(Context, D->getParent())); 224 CanQualType resultType = Context.VoidTy; 225 226 TheCXXABI.BuildDestructorSignature(D, dtorKind, resultType, argTypes); 227 228 CanQual<FunctionProtoType> FTP = GetFormalType(D); 229 assert(FTP->getNumArgs() == 0 && "dtor with formal parameters"); 230 assert(FTP->isVariadic() == 0 && "dtor with formal parameters"); 231 232 FunctionType::ExtInfo extInfo = FTP->getExtInfo(); 233 adjustCXXMethodInfo(*this, extInfo, false); 234 return arrangeLLVMFunctionInfo(resultType, argTypes, extInfo, 235 RequiredArgs::All); 236 } 237 238 /// Arrange the argument and result information for the declaration or 239 /// definition of the given function. 240 const CGFunctionInfo & 241 CodeGenTypes::arrangeFunctionDeclaration(const FunctionDecl *FD) { 242 if (const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(FD)) 243 if (MD->isInstance()) 244 return arrangeCXXMethodDeclaration(MD); 245 246 CanQualType FTy = FD->getType()->getCanonicalTypeUnqualified(); 247 248 assert(isa<FunctionType>(FTy)); 249 250 // When declaring a function without a prototype, always use a 251 // non-variadic type. 252 if (isa<FunctionNoProtoType>(FTy)) { 253 CanQual<FunctionNoProtoType> noProto = FTy.getAs<FunctionNoProtoType>(); 254 return arrangeLLVMFunctionInfo(noProto->getResultType(), 255 ArrayRef<CanQualType>(), 256 noProto->getExtInfo(), 257 RequiredArgs::All); 258 } 259 260 assert(isa<FunctionProtoType>(FTy)); 261 return arrangeFreeFunctionType(FTy.getAs<FunctionProtoType>()); 262 } 263 264 /// Arrange the argument and result information for the declaration or 265 /// definition of an Objective-C method. 266 const CGFunctionInfo & 267 CodeGenTypes::arrangeObjCMethodDeclaration(const ObjCMethodDecl *MD) { 268 // It happens that this is the same as a call with no optional 269 // arguments, except also using the formal 'self' type. 270 return arrangeObjCMessageSendSignature(MD, MD->getSelfDecl()->getType()); 271 } 272 273 /// Arrange the argument and result information for the function type 274 /// through which to perform a send to the given Objective-C method, 275 /// using the given receiver type. The receiver type is not always 276 /// the 'self' type of the method or even an Objective-C pointer type. 277 /// This is *not* the right method for actually performing such a 278 /// message send, due to the possibility of optional arguments. 279 const CGFunctionInfo & 280 CodeGenTypes::arrangeObjCMessageSendSignature(const ObjCMethodDecl *MD, 281 QualType receiverType) { 282 SmallVector<CanQualType, 16> argTys; 283 argTys.push_back(Context.getCanonicalParamType(receiverType)); 284 argTys.push_back(Context.getCanonicalParamType(Context.getObjCSelType())); 285 // FIXME: Kill copy? 286 for (ObjCMethodDecl::param_const_iterator i = MD->param_begin(), 287 e = MD->param_end(); i != e; ++i) { 288 argTys.push_back(Context.getCanonicalParamType((*i)->getType())); 289 } 290 291 FunctionType::ExtInfo einfo; 292 einfo = einfo.withCallingConv(getCallingConventionForDecl(MD)); 293 294 if (getContext().getLangOpts().ObjCAutoRefCount && 295 MD->hasAttr<NSReturnsRetainedAttr>()) 296 einfo = einfo.withProducesResult(true); 297 298 RequiredArgs required = 299 (MD->isVariadic() ? RequiredArgs(argTys.size()) : RequiredArgs::All); 300 301 return arrangeLLVMFunctionInfo(GetReturnType(MD->getResultType()), argTys, 302 einfo, required); 303 } 304 305 const CGFunctionInfo & 306 CodeGenTypes::arrangeGlobalDeclaration(GlobalDecl GD) { 307 // FIXME: Do we need to handle ObjCMethodDecl? 308 const FunctionDecl *FD = cast<FunctionDecl>(GD.getDecl()); 309 310 if (const CXXConstructorDecl *CD = dyn_cast<CXXConstructorDecl>(FD)) 311 return arrangeCXXConstructorDeclaration(CD, GD.getCtorType()); 312 313 if (const CXXDestructorDecl *DD = dyn_cast<CXXDestructorDecl>(FD)) 314 return arrangeCXXDestructor(DD, GD.getDtorType()); 315 316 return arrangeFunctionDeclaration(FD); 317 } 318 319 /// Figure out the rules for calling a function with the given formal 320 /// type using the given arguments. The arguments are necessary 321 /// because the function might be unprototyped, in which case it's 322 /// target-dependent in crazy ways. 323 const CGFunctionInfo & 324 CodeGenTypes::arrangeFreeFunctionCall(const CallArgList &args, 325 const FunctionType *fnType) { 326 RequiredArgs required = RequiredArgs::All; 327 if (const FunctionProtoType *proto = dyn_cast<FunctionProtoType>(fnType)) { 328 if (proto->isVariadic()) 329 required = RequiredArgs(proto->getNumArgs()); 330 } else if (CGM.getTargetCodeGenInfo() 331 .isNoProtoCallVariadic(args, cast<FunctionNoProtoType>(fnType))) { 332 required = RequiredArgs(0); 333 } 334 335 return arrangeFreeFunctionCall(fnType->getResultType(), args, 336 fnType->getExtInfo(), required); 337 } 338 339 const CGFunctionInfo & 340 CodeGenTypes::arrangeFreeFunctionCall(QualType resultType, 341 const CallArgList &args, 342 FunctionType::ExtInfo info, 343 RequiredArgs required) { 344 // FIXME: Kill copy. 345 SmallVector<CanQualType, 16> argTypes; 346 for (CallArgList::const_iterator i = args.begin(), e = args.end(); 347 i != e; ++i) 348 argTypes.push_back(Context.getCanonicalParamType(i->Ty)); 349 return arrangeLLVMFunctionInfo(GetReturnType(resultType), argTypes, info, 350 required); 351 } 352 353 /// Arrange a call to a C++ method, passing the given arguments. 354 const CGFunctionInfo & 355 CodeGenTypes::arrangeCXXMethodCall(const CallArgList &args, 356 const FunctionProtoType *FPT, 357 RequiredArgs required) { 358 // FIXME: Kill copy. 359 SmallVector<CanQualType, 16> argTypes; 360 for (CallArgList::const_iterator i = args.begin(), e = args.end(); 361 i != e; ++i) 362 argTypes.push_back(Context.getCanonicalParamType(i->Ty)); 363 364 FunctionType::ExtInfo info = FPT->getExtInfo(); 365 adjustCXXMethodInfo(*this, info, FPT->isVariadic()); 366 return arrangeLLVMFunctionInfo(GetReturnType(FPT->getResultType()), 367 argTypes, info, required); 368 } 369 370 const CGFunctionInfo & 371 CodeGenTypes::arrangeFunctionDeclaration(QualType resultType, 372 const FunctionArgList &args, 373 const FunctionType::ExtInfo &info, 374 bool isVariadic) { 375 // FIXME: Kill copy. 376 SmallVector<CanQualType, 16> argTypes; 377 for (FunctionArgList::const_iterator i = args.begin(), e = args.end(); 378 i != e; ++i) 379 argTypes.push_back(Context.getCanonicalParamType((*i)->getType())); 380 381 RequiredArgs required = 382 (isVariadic ? RequiredArgs(args.size()) : RequiredArgs::All); 383 return arrangeLLVMFunctionInfo(GetReturnType(resultType), argTypes, info, 384 required); 385 } 386 387 const CGFunctionInfo &CodeGenTypes::arrangeNullaryFunction() { 388 return arrangeLLVMFunctionInfo(getContext().VoidTy, ArrayRef<CanQualType>(), 389 FunctionType::ExtInfo(), RequiredArgs::All); 390 } 391 392 /// Arrange the argument and result information for an abstract value 393 /// of a given function type. This is the method which all of the 394 /// above functions ultimately defer to. 395 const CGFunctionInfo & 396 CodeGenTypes::arrangeLLVMFunctionInfo(CanQualType resultType, 397 ArrayRef<CanQualType> argTypes, 398 FunctionType::ExtInfo info, 399 RequiredArgs required) { 400 #ifndef NDEBUG 401 for (ArrayRef<CanQualType>::const_iterator 402 I = argTypes.begin(), E = argTypes.end(); I != E; ++I) 403 assert(I->isCanonicalAsParam()); 404 #endif 405 406 unsigned CC = ClangCallConvToLLVMCallConv(info.getCC()); 407 408 // Lookup or create unique function info. 409 llvm::FoldingSetNodeID ID; 410 CGFunctionInfo::Profile(ID, info, required, resultType, argTypes); 411 412 void *insertPos = 0; 413 CGFunctionInfo *FI = FunctionInfos.FindNodeOrInsertPos(ID, insertPos); 414 if (FI) 415 return *FI; 416 417 // Construct the function info. We co-allocate the ArgInfos. 418 FI = CGFunctionInfo::create(CC, info, resultType, argTypes, required); 419 FunctionInfos.InsertNode(FI, insertPos); 420 421 bool inserted = FunctionsBeingProcessed.insert(FI); (void)inserted; 422 assert(inserted && "Recursively being processed?"); 423 424 // Compute ABI information. 425 getABIInfo().computeInfo(*FI); 426 427 // Loop over all of the computed argument and return value info. If any of 428 // them are direct or extend without a specified coerce type, specify the 429 // default now. 430 ABIArgInfo &retInfo = FI->getReturnInfo(); 431 if (retInfo.canHaveCoerceToType() && retInfo.getCoerceToType() == 0) 432 retInfo.setCoerceToType(ConvertType(FI->getReturnType())); 433 434 for (CGFunctionInfo::arg_iterator I = FI->arg_begin(), E = FI->arg_end(); 435 I != E; ++I) 436 if (I->info.canHaveCoerceToType() && I->info.getCoerceToType() == 0) 437 I->info.setCoerceToType(ConvertType(I->type)); 438 439 bool erased = FunctionsBeingProcessed.erase(FI); (void)erased; 440 assert(erased && "Not in set?"); 441 442 return *FI; 443 } 444 445 CGFunctionInfo *CGFunctionInfo::create(unsigned llvmCC, 446 const FunctionType::ExtInfo &info, 447 CanQualType resultType, 448 ArrayRef<CanQualType> argTypes, 449 RequiredArgs required) { 450 void *buffer = operator new(sizeof(CGFunctionInfo) + 451 sizeof(ArgInfo) * (argTypes.size() + 1)); 452 CGFunctionInfo *FI = new(buffer) CGFunctionInfo(); 453 FI->CallingConvention = llvmCC; 454 FI->EffectiveCallingConvention = llvmCC; 455 FI->ASTCallingConvention = info.getCC(); 456 FI->NoReturn = info.getNoReturn(); 457 FI->ReturnsRetained = info.getProducesResult(); 458 FI->Required = required; 459 FI->HasRegParm = info.getHasRegParm(); 460 FI->RegParm = info.getRegParm(); 461 FI->NumArgs = argTypes.size(); 462 FI->getArgsBuffer()[0].type = resultType; 463 for (unsigned i = 0, e = argTypes.size(); i != e; ++i) 464 FI->getArgsBuffer()[i + 1].type = argTypes[i]; 465 return FI; 466 } 467 468 /***/ 469 470 void CodeGenTypes::GetExpandedTypes(QualType type, 471 SmallVectorImpl<llvm::Type*> &expandedTypes) { 472 if (const ConstantArrayType *AT = Context.getAsConstantArrayType(type)) { 473 uint64_t NumElts = AT->getSize().getZExtValue(); 474 for (uint64_t Elt = 0; Elt < NumElts; ++Elt) 475 GetExpandedTypes(AT->getElementType(), expandedTypes); 476 } else if (const RecordType *RT = type->getAs<RecordType>()) { 477 const RecordDecl *RD = RT->getDecl(); 478 assert(!RD->hasFlexibleArrayMember() && 479 "Cannot expand structure with flexible array."); 480 if (RD->isUnion()) { 481 // Unions can be here only in degenerative cases - all the fields are same 482 // after flattening. Thus we have to use the "largest" field. 483 const FieldDecl *LargestFD = 0; 484 CharUnits UnionSize = CharUnits::Zero(); 485 486 for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end(); 487 i != e; ++i) { 488 const FieldDecl *FD = *i; 489 assert(!FD->isBitField() && 490 "Cannot expand structure with bit-field members."); 491 CharUnits FieldSize = getContext().getTypeSizeInChars(FD->getType()); 492 if (UnionSize < FieldSize) { 493 UnionSize = FieldSize; 494 LargestFD = FD; 495 } 496 } 497 if (LargestFD) 498 GetExpandedTypes(LargestFD->getType(), expandedTypes); 499 } else { 500 for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end(); 501 i != e; ++i) { 502 assert(!i->isBitField() && 503 "Cannot expand structure with bit-field members."); 504 GetExpandedTypes(i->getType(), expandedTypes); 505 } 506 } 507 } else if (const ComplexType *CT = type->getAs<ComplexType>()) { 508 llvm::Type *EltTy = ConvertType(CT->getElementType()); 509 expandedTypes.push_back(EltTy); 510 expandedTypes.push_back(EltTy); 511 } else 512 expandedTypes.push_back(ConvertType(type)); 513 } 514 515 llvm::Function::arg_iterator 516 CodeGenFunction::ExpandTypeFromArgs(QualType Ty, LValue LV, 517 llvm::Function::arg_iterator AI) { 518 assert(LV.isSimple() && 519 "Unexpected non-simple lvalue during struct expansion."); 520 521 if (const ConstantArrayType *AT = getContext().getAsConstantArrayType(Ty)) { 522 unsigned NumElts = AT->getSize().getZExtValue(); 523 QualType EltTy = AT->getElementType(); 524 for (unsigned Elt = 0; Elt < NumElts; ++Elt) { 525 llvm::Value *EltAddr = Builder.CreateConstGEP2_32(LV.getAddress(), 0, Elt); 526 LValue LV = MakeAddrLValue(EltAddr, EltTy); 527 AI = ExpandTypeFromArgs(EltTy, LV, AI); 528 } 529 } else if (const RecordType *RT = Ty->getAs<RecordType>()) { 530 RecordDecl *RD = RT->getDecl(); 531 if (RD->isUnion()) { 532 // Unions can be here only in degenerative cases - all the fields are same 533 // after flattening. Thus we have to use the "largest" field. 534 const FieldDecl *LargestFD = 0; 535 CharUnits UnionSize = CharUnits::Zero(); 536 537 for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end(); 538 i != e; ++i) { 539 const FieldDecl *FD = *i; 540 assert(!FD->isBitField() && 541 "Cannot expand structure with bit-field members."); 542 CharUnits FieldSize = getContext().getTypeSizeInChars(FD->getType()); 543 if (UnionSize < FieldSize) { 544 UnionSize = FieldSize; 545 LargestFD = FD; 546 } 547 } 548 if (LargestFD) { 549 // FIXME: What are the right qualifiers here? 550 LValue SubLV = EmitLValueForField(LV, LargestFD); 551 AI = ExpandTypeFromArgs(LargestFD->getType(), SubLV, AI); 552 } 553 } else { 554 for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end(); 555 i != e; ++i) { 556 FieldDecl *FD = *i; 557 QualType FT = FD->getType(); 558 559 // FIXME: What are the right qualifiers here? 560 LValue SubLV = EmitLValueForField(LV, FD); 561 AI = ExpandTypeFromArgs(FT, SubLV, AI); 562 } 563 } 564 } else if (const ComplexType *CT = Ty->getAs<ComplexType>()) { 565 QualType EltTy = CT->getElementType(); 566 llvm::Value *RealAddr = Builder.CreateStructGEP(LV.getAddress(), 0, "real"); 567 EmitStoreThroughLValue(RValue::get(AI++), MakeAddrLValue(RealAddr, EltTy)); 568 llvm::Value *ImagAddr = Builder.CreateStructGEP(LV.getAddress(), 1, "imag"); 569 EmitStoreThroughLValue(RValue::get(AI++), MakeAddrLValue(ImagAddr, EltTy)); 570 } else { 571 EmitStoreThroughLValue(RValue::get(AI), LV); 572 ++AI; 573 } 574 575 return AI; 576 } 577 578 /// EnterStructPointerForCoercedAccess - Given a struct pointer that we are 579 /// accessing some number of bytes out of it, try to gep into the struct to get 580 /// at its inner goodness. Dive as deep as possible without entering an element 581 /// with an in-memory size smaller than DstSize. 582 static llvm::Value * 583 EnterStructPointerForCoercedAccess(llvm::Value *SrcPtr, 584 llvm::StructType *SrcSTy, 585 uint64_t DstSize, CodeGenFunction &CGF) { 586 // We can't dive into a zero-element struct. 587 if (SrcSTy->getNumElements() == 0) return SrcPtr; 588 589 llvm::Type *FirstElt = SrcSTy->getElementType(0); 590 591 // If the first elt is at least as large as what we're looking for, or if the 592 // first element is the same size as the whole struct, we can enter it. 593 uint64_t FirstEltSize = 594 CGF.CGM.getDataLayout().getTypeAllocSize(FirstElt); 595 if (FirstEltSize < DstSize && 596 FirstEltSize < CGF.CGM.getDataLayout().getTypeAllocSize(SrcSTy)) 597 return SrcPtr; 598 599 // GEP into the first element. 600 SrcPtr = CGF.Builder.CreateConstGEP2_32(SrcPtr, 0, 0, "coerce.dive"); 601 602 // If the first element is a struct, recurse. 603 llvm::Type *SrcTy = 604 cast<llvm::PointerType>(SrcPtr->getType())->getElementType(); 605 if (llvm::StructType *SrcSTy = dyn_cast<llvm::StructType>(SrcTy)) 606 return EnterStructPointerForCoercedAccess(SrcPtr, SrcSTy, DstSize, CGF); 607 608 return SrcPtr; 609 } 610 611 /// CoerceIntOrPtrToIntOrPtr - Convert a value Val to the specific Ty where both 612 /// are either integers or pointers. This does a truncation of the value if it 613 /// is too large or a zero extension if it is too small. 614 static llvm::Value *CoerceIntOrPtrToIntOrPtr(llvm::Value *Val, 615 llvm::Type *Ty, 616 CodeGenFunction &CGF) { 617 if (Val->getType() == Ty) 618 return Val; 619 620 if (isa<llvm::PointerType>(Val->getType())) { 621 // If this is Pointer->Pointer avoid conversion to and from int. 622 if (isa<llvm::PointerType>(Ty)) 623 return CGF.Builder.CreateBitCast(Val, Ty, "coerce.val"); 624 625 // Convert the pointer to an integer so we can play with its width. 626 Val = CGF.Builder.CreatePtrToInt(Val, CGF.IntPtrTy, "coerce.val.pi"); 627 } 628 629 llvm::Type *DestIntTy = Ty; 630 if (isa<llvm::PointerType>(DestIntTy)) 631 DestIntTy = CGF.IntPtrTy; 632 633 if (Val->getType() != DestIntTy) 634 Val = CGF.Builder.CreateIntCast(Val, DestIntTy, false, "coerce.val.ii"); 635 636 if (isa<llvm::PointerType>(Ty)) 637 Val = CGF.Builder.CreateIntToPtr(Val, Ty, "coerce.val.ip"); 638 return Val; 639 } 640 641 642 643 /// CreateCoercedLoad - Create a load from \arg SrcPtr interpreted as 644 /// a pointer to an object of type \arg Ty. 645 /// 646 /// This safely handles the case when the src type is smaller than the 647 /// destination type; in this situation the values of bits which not 648 /// present in the src are undefined. 649 static llvm::Value *CreateCoercedLoad(llvm::Value *SrcPtr, 650 llvm::Type *Ty, 651 CodeGenFunction &CGF) { 652 llvm::Type *SrcTy = 653 cast<llvm::PointerType>(SrcPtr->getType())->getElementType(); 654 655 // If SrcTy and Ty are the same, just do a load. 656 if (SrcTy == Ty) 657 return CGF.Builder.CreateLoad(SrcPtr); 658 659 uint64_t DstSize = CGF.CGM.getDataLayout().getTypeAllocSize(Ty); 660 661 if (llvm::StructType *SrcSTy = dyn_cast<llvm::StructType>(SrcTy)) { 662 SrcPtr = EnterStructPointerForCoercedAccess(SrcPtr, SrcSTy, DstSize, CGF); 663 SrcTy = cast<llvm::PointerType>(SrcPtr->getType())->getElementType(); 664 } 665 666 uint64_t SrcSize = CGF.CGM.getDataLayout().getTypeAllocSize(SrcTy); 667 668 // If the source and destination are integer or pointer types, just do an 669 // extension or truncation to the desired type. 670 if ((isa<llvm::IntegerType>(Ty) || isa<llvm::PointerType>(Ty)) && 671 (isa<llvm::IntegerType>(SrcTy) || isa<llvm::PointerType>(SrcTy))) { 672 llvm::LoadInst *Load = CGF.Builder.CreateLoad(SrcPtr); 673 return CoerceIntOrPtrToIntOrPtr(Load, Ty, CGF); 674 } 675 676 // If load is legal, just bitcast the src pointer. 677 if (SrcSize >= DstSize) { 678 // Generally SrcSize is never greater than DstSize, since this means we are 679 // losing bits. However, this can happen in cases where the structure has 680 // additional padding, for example due to a user specified alignment. 681 // 682 // FIXME: Assert that we aren't truncating non-padding bits when have access 683 // to that information. 684 llvm::Value *Casted = 685 CGF.Builder.CreateBitCast(SrcPtr, llvm::PointerType::getUnqual(Ty)); 686 llvm::LoadInst *Load = CGF.Builder.CreateLoad(Casted); 687 // FIXME: Use better alignment / avoid requiring aligned load. 688 Load->setAlignment(1); 689 return Load; 690 } 691 692 // Otherwise do coercion through memory. This is stupid, but 693 // simple. 694 llvm::Value *Tmp = CGF.CreateTempAlloca(Ty); 695 llvm::Type *I8PtrTy = CGF.Builder.getInt8PtrTy(); 696 llvm::Value *Casted = CGF.Builder.CreateBitCast(Tmp, I8PtrTy); 697 llvm::Value *SrcCasted = CGF.Builder.CreateBitCast(SrcPtr, I8PtrTy); 698 // FIXME: Use better alignment. 699 CGF.Builder.CreateMemCpy(Casted, SrcCasted, 700 llvm::ConstantInt::get(CGF.IntPtrTy, SrcSize), 701 1, false); 702 return CGF.Builder.CreateLoad(Tmp); 703 } 704 705 // Function to store a first-class aggregate into memory. We prefer to 706 // store the elements rather than the aggregate to be more friendly to 707 // fast-isel. 708 // FIXME: Do we need to recurse here? 709 static void BuildAggStore(CodeGenFunction &CGF, llvm::Value *Val, 710 llvm::Value *DestPtr, bool DestIsVolatile, 711 bool LowAlignment) { 712 // Prefer scalar stores to first-class aggregate stores. 713 if (llvm::StructType *STy = 714 dyn_cast<llvm::StructType>(Val->getType())) { 715 for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) { 716 llvm::Value *EltPtr = CGF.Builder.CreateConstGEP2_32(DestPtr, 0, i); 717 llvm::Value *Elt = CGF.Builder.CreateExtractValue(Val, i); 718 llvm::StoreInst *SI = CGF.Builder.CreateStore(Elt, EltPtr, 719 DestIsVolatile); 720 if (LowAlignment) 721 SI->setAlignment(1); 722 } 723 } else { 724 llvm::StoreInst *SI = CGF.Builder.CreateStore(Val, DestPtr, DestIsVolatile); 725 if (LowAlignment) 726 SI->setAlignment(1); 727 } 728 } 729 730 /// CreateCoercedStore - Create a store to \arg DstPtr from \arg Src, 731 /// where the source and destination may have different types. 732 /// 733 /// This safely handles the case when the src type is larger than the 734 /// destination type; the upper bits of the src will be lost. 735 static void CreateCoercedStore(llvm::Value *Src, 736 llvm::Value *DstPtr, 737 bool DstIsVolatile, 738 CodeGenFunction &CGF) { 739 llvm::Type *SrcTy = Src->getType(); 740 llvm::Type *DstTy = 741 cast<llvm::PointerType>(DstPtr->getType())->getElementType(); 742 if (SrcTy == DstTy) { 743 CGF.Builder.CreateStore(Src, DstPtr, DstIsVolatile); 744 return; 745 } 746 747 uint64_t SrcSize = CGF.CGM.getDataLayout().getTypeAllocSize(SrcTy); 748 749 if (llvm::StructType *DstSTy = dyn_cast<llvm::StructType>(DstTy)) { 750 DstPtr = EnterStructPointerForCoercedAccess(DstPtr, DstSTy, SrcSize, CGF); 751 DstTy = cast<llvm::PointerType>(DstPtr->getType())->getElementType(); 752 } 753 754 // If the source and destination are integer or pointer types, just do an 755 // extension or truncation to the desired type. 756 if ((isa<llvm::IntegerType>(SrcTy) || isa<llvm::PointerType>(SrcTy)) && 757 (isa<llvm::IntegerType>(DstTy) || isa<llvm::PointerType>(DstTy))) { 758 Src = CoerceIntOrPtrToIntOrPtr(Src, DstTy, CGF); 759 CGF.Builder.CreateStore(Src, DstPtr, DstIsVolatile); 760 return; 761 } 762 763 uint64_t DstSize = CGF.CGM.getDataLayout().getTypeAllocSize(DstTy); 764 765 // If store is legal, just bitcast the src pointer. 766 if (SrcSize <= DstSize) { 767 llvm::Value *Casted = 768 CGF.Builder.CreateBitCast(DstPtr, llvm::PointerType::getUnqual(SrcTy)); 769 // FIXME: Use better alignment / avoid requiring aligned store. 770 BuildAggStore(CGF, Src, Casted, DstIsVolatile, true); 771 } else { 772 // Otherwise do coercion through memory. This is stupid, but 773 // simple. 774 775 // Generally SrcSize is never greater than DstSize, since this means we are 776 // losing bits. However, this can happen in cases where the structure has 777 // additional padding, for example due to a user specified alignment. 778 // 779 // FIXME: Assert that we aren't truncating non-padding bits when have access 780 // to that information. 781 llvm::Value *Tmp = CGF.CreateTempAlloca(SrcTy); 782 CGF.Builder.CreateStore(Src, Tmp); 783 llvm::Type *I8PtrTy = CGF.Builder.getInt8PtrTy(); 784 llvm::Value *Casted = CGF.Builder.CreateBitCast(Tmp, I8PtrTy); 785 llvm::Value *DstCasted = CGF.Builder.CreateBitCast(DstPtr, I8PtrTy); 786 // FIXME: Use better alignment. 787 CGF.Builder.CreateMemCpy(DstCasted, Casted, 788 llvm::ConstantInt::get(CGF.IntPtrTy, DstSize), 789 1, false); 790 } 791 } 792 793 /***/ 794 795 bool CodeGenModule::ReturnTypeUsesSRet(const CGFunctionInfo &FI) { 796 return FI.getReturnInfo().isIndirect(); 797 } 798 799 bool CodeGenModule::ReturnTypeUsesFPRet(QualType ResultType) { 800 if (const BuiltinType *BT = ResultType->getAs<BuiltinType>()) { 801 switch (BT->getKind()) { 802 default: 803 return false; 804 case BuiltinType::Float: 805 return getContext().getTargetInfo().useObjCFPRetForRealType(TargetInfo::Float); 806 case BuiltinType::Double: 807 return getContext().getTargetInfo().useObjCFPRetForRealType(TargetInfo::Double); 808 case BuiltinType::LongDouble: 809 return getContext().getTargetInfo().useObjCFPRetForRealType( 810 TargetInfo::LongDouble); 811 } 812 } 813 814 return false; 815 } 816 817 bool CodeGenModule::ReturnTypeUsesFP2Ret(QualType ResultType) { 818 if (const ComplexType *CT = ResultType->getAs<ComplexType>()) { 819 if (const BuiltinType *BT = CT->getElementType()->getAs<BuiltinType>()) { 820 if (BT->getKind() == BuiltinType::LongDouble) 821 return getContext().getTargetInfo().useObjCFP2RetForComplexLongDouble(); 822 } 823 } 824 825 return false; 826 } 827 828 llvm::FunctionType *CodeGenTypes::GetFunctionType(GlobalDecl GD) { 829 const CGFunctionInfo &FI = arrangeGlobalDeclaration(GD); 830 return GetFunctionType(FI); 831 } 832 833 llvm::FunctionType * 834 CodeGenTypes::GetFunctionType(const CGFunctionInfo &FI) { 835 836 bool Inserted = FunctionsBeingProcessed.insert(&FI); (void)Inserted; 837 assert(Inserted && "Recursively being processed?"); 838 839 SmallVector<llvm::Type*, 8> argTypes; 840 llvm::Type *resultType = 0; 841 842 const ABIArgInfo &retAI = FI.getReturnInfo(); 843 switch (retAI.getKind()) { 844 case ABIArgInfo::Expand: 845 llvm_unreachable("Invalid ABI kind for return argument"); 846 847 case ABIArgInfo::Extend: 848 case ABIArgInfo::Direct: 849 resultType = retAI.getCoerceToType(); 850 break; 851 852 case ABIArgInfo::Indirect: { 853 assert(!retAI.getIndirectAlign() && "Align unused on indirect return."); 854 resultType = llvm::Type::getVoidTy(getLLVMContext()); 855 856 QualType ret = FI.getReturnType(); 857 llvm::Type *ty = ConvertType(ret); 858 unsigned addressSpace = Context.getTargetAddressSpace(ret); 859 argTypes.push_back(llvm::PointerType::get(ty, addressSpace)); 860 break; 861 } 862 863 case ABIArgInfo::Ignore: 864 resultType = llvm::Type::getVoidTy(getLLVMContext()); 865 break; 866 } 867 868 for (CGFunctionInfo::const_arg_iterator it = FI.arg_begin(), 869 ie = FI.arg_end(); it != ie; ++it) { 870 const ABIArgInfo &argAI = it->info; 871 872 // Insert a padding type to ensure proper alignment. 873 if (llvm::Type *PaddingType = argAI.getPaddingType()) 874 argTypes.push_back(PaddingType); 875 876 switch (argAI.getKind()) { 877 case ABIArgInfo::Ignore: 878 break; 879 880 case ABIArgInfo::Indirect: { 881 // indirect arguments are always on the stack, which is addr space #0. 882 llvm::Type *LTy = ConvertTypeForMem(it->type); 883 argTypes.push_back(LTy->getPointerTo()); 884 break; 885 } 886 887 case ABIArgInfo::Extend: 888 case ABIArgInfo::Direct: { 889 // If the coerce-to type is a first class aggregate, flatten it. Either 890 // way is semantically identical, but fast-isel and the optimizer 891 // generally likes scalar values better than FCAs. 892 llvm::Type *argType = argAI.getCoerceToType(); 893 if (llvm::StructType *st = dyn_cast<llvm::StructType>(argType)) { 894 for (unsigned i = 0, e = st->getNumElements(); i != e; ++i) 895 argTypes.push_back(st->getElementType(i)); 896 } else { 897 argTypes.push_back(argType); 898 } 899 break; 900 } 901 902 case ABIArgInfo::Expand: 903 GetExpandedTypes(it->type, argTypes); 904 break; 905 } 906 } 907 908 bool Erased = FunctionsBeingProcessed.erase(&FI); (void)Erased; 909 assert(Erased && "Not in set?"); 910 911 return llvm::FunctionType::get(resultType, argTypes, FI.isVariadic()); 912 } 913 914 llvm::Type *CodeGenTypes::GetFunctionTypeForVTable(GlobalDecl GD) { 915 const CXXMethodDecl *MD = cast<CXXMethodDecl>(GD.getDecl()); 916 const FunctionProtoType *FPT = MD->getType()->getAs<FunctionProtoType>(); 917 918 if (!isFuncTypeConvertible(FPT)) 919 return llvm::StructType::get(getLLVMContext()); 920 921 const CGFunctionInfo *Info; 922 if (isa<CXXDestructorDecl>(MD)) 923 Info = &arrangeCXXDestructor(cast<CXXDestructorDecl>(MD), GD.getDtorType()); 924 else 925 Info = &arrangeCXXMethodDeclaration(MD); 926 return GetFunctionType(*Info); 927 } 928 929 void CodeGenModule::ConstructAttributeList(const CGFunctionInfo &FI, 930 const Decl *TargetDecl, 931 AttributeListType &PAL, 932 unsigned &CallingConv) { 933 llvm::AttrBuilder FuncAttrs; 934 llvm::AttrBuilder RetAttrs; 935 936 CallingConv = FI.getEffectiveCallingConvention(); 937 938 if (FI.isNoReturn()) 939 FuncAttrs.addAttribute(llvm::Attributes::NoReturn); 940 941 // FIXME: handle sseregparm someday... 942 if (TargetDecl) { 943 if (TargetDecl->hasAttr<ReturnsTwiceAttr>()) 944 FuncAttrs.addAttribute(llvm::Attributes::ReturnsTwice); 945 if (TargetDecl->hasAttr<NoThrowAttr>()) 946 FuncAttrs.addAttribute(llvm::Attributes::NoUnwind); 947 else if (const FunctionDecl *Fn = dyn_cast<FunctionDecl>(TargetDecl)) { 948 const FunctionProtoType *FPT = Fn->getType()->getAs<FunctionProtoType>(); 949 if (FPT && FPT->isNothrow(getContext())) 950 FuncAttrs.addAttribute(llvm::Attributes::NoUnwind); 951 } 952 953 if (TargetDecl->hasAttr<NoReturnAttr>()) 954 FuncAttrs.addAttribute(llvm::Attributes::NoReturn); 955 956 if (TargetDecl->hasAttr<ReturnsTwiceAttr>()) 957 FuncAttrs.addAttribute(llvm::Attributes::ReturnsTwice); 958 959 // 'const' and 'pure' attribute functions are also nounwind. 960 if (TargetDecl->hasAttr<ConstAttr>()) { 961 FuncAttrs.addAttribute(llvm::Attributes::ReadNone); 962 FuncAttrs.addAttribute(llvm::Attributes::NoUnwind); 963 } else if (TargetDecl->hasAttr<PureAttr>()) { 964 FuncAttrs.addAttribute(llvm::Attributes::ReadOnly); 965 FuncAttrs.addAttribute(llvm::Attributes::NoUnwind); 966 } 967 if (TargetDecl->hasAttr<MallocAttr>()) 968 RetAttrs.addAttribute(llvm::Attributes::NoAlias); 969 } 970 971 if (CodeGenOpts.OptimizeSize) 972 FuncAttrs.addAttribute(llvm::Attributes::OptimizeForSize); 973 if (CodeGenOpts.OptimizeSize == 2) 974 FuncAttrs.addAttribute(llvm::Attributes::MinSize); 975 if (CodeGenOpts.DisableRedZone) 976 FuncAttrs.addAttribute(llvm::Attributes::NoRedZone); 977 if (CodeGenOpts.NoImplicitFloat) 978 FuncAttrs.addAttribute(llvm::Attributes::NoImplicitFloat); 979 980 QualType RetTy = FI.getReturnType(); 981 unsigned Index = 1; 982 const ABIArgInfo &RetAI = FI.getReturnInfo(); 983 switch (RetAI.getKind()) { 984 case ABIArgInfo::Extend: 985 if (RetTy->hasSignedIntegerRepresentation()) 986 RetAttrs.addAttribute(llvm::Attributes::SExt); 987 else if (RetTy->hasUnsignedIntegerRepresentation()) 988 RetAttrs.addAttribute(llvm::Attributes::ZExt); 989 break; 990 case ABIArgInfo::Direct: 991 case ABIArgInfo::Ignore: 992 break; 993 994 case ABIArgInfo::Indirect: { 995 llvm::AttrBuilder SRETAttrs; 996 SRETAttrs.addAttribute(llvm::Attributes::StructRet); 997 if (RetAI.getInReg()) 998 SRETAttrs.addAttribute(llvm::Attributes::InReg); 999 PAL.push_back(llvm:: 1000 AttributeWithIndex::get(Index, 1001 llvm::Attributes::get(getLLVMContext(), 1002 SRETAttrs))); 1003 1004 ++Index; 1005 // sret disables readnone and readonly 1006 FuncAttrs.removeAttribute(llvm::Attributes::ReadOnly) 1007 .removeAttribute(llvm::Attributes::ReadNone); 1008 break; 1009 } 1010 1011 case ABIArgInfo::Expand: 1012 llvm_unreachable("Invalid ABI kind for return argument"); 1013 } 1014 1015 if (RetAttrs.hasAttributes()) 1016 PAL.push_back(llvm:: 1017 AttributeWithIndex::get(llvm::AttrListPtr::ReturnIndex, 1018 llvm::Attributes::get(getLLVMContext(), 1019 RetAttrs))); 1020 1021 for (CGFunctionInfo::const_arg_iterator it = FI.arg_begin(), 1022 ie = FI.arg_end(); it != ie; ++it) { 1023 QualType ParamType = it->type; 1024 const ABIArgInfo &AI = it->info; 1025 llvm::AttrBuilder Attrs; 1026 1027 if (AI.getPaddingType()) { 1028 if (AI.getPaddingInReg()) { 1029 llvm::AttrBuilder PadAttrs; 1030 PadAttrs.addAttribute(llvm::Attributes::InReg); 1031 1032 llvm::Attributes A =llvm::Attributes::get(getLLVMContext(), PadAttrs); 1033 PAL.push_back(llvm::AttributeWithIndex::get(Index, A)); 1034 } 1035 // Increment Index if there is padding. 1036 ++Index; 1037 } 1038 1039 // 'restrict' -> 'noalias' is done in EmitFunctionProlog when we 1040 // have the corresponding parameter variable. It doesn't make 1041 // sense to do it here because parameters are so messed up. 1042 switch (AI.getKind()) { 1043 case ABIArgInfo::Extend: 1044 if (ParamType->isSignedIntegerOrEnumerationType()) 1045 Attrs.addAttribute(llvm::Attributes::SExt); 1046 else if (ParamType->isUnsignedIntegerOrEnumerationType()) 1047 Attrs.addAttribute(llvm::Attributes::ZExt); 1048 // FALL THROUGH 1049 case ABIArgInfo::Direct: 1050 if (AI.getInReg()) 1051 Attrs.addAttribute(llvm::Attributes::InReg); 1052 1053 // FIXME: handle sseregparm someday... 1054 1055 if (llvm::StructType *STy = 1056 dyn_cast<llvm::StructType>(AI.getCoerceToType())) { 1057 unsigned Extra = STy->getNumElements()-1; // 1 will be added below. 1058 if (Attrs.hasAttributes()) 1059 for (unsigned I = 0; I < Extra; ++I) 1060 PAL.push_back(llvm::AttributeWithIndex::get(Index + I, 1061 llvm::Attributes::get(getLLVMContext(), 1062 Attrs))); 1063 Index += Extra; 1064 } 1065 break; 1066 1067 case ABIArgInfo::Indirect: 1068 if (AI.getInReg()) 1069 Attrs.addAttribute(llvm::Attributes::InReg); 1070 1071 if (AI.getIndirectByVal()) 1072 Attrs.addAttribute(llvm::Attributes::ByVal); 1073 1074 Attrs.addAlignmentAttr(AI.getIndirectAlign()); 1075 1076 // byval disables readnone and readonly. 1077 FuncAttrs.removeAttribute(llvm::Attributes::ReadOnly) 1078 .removeAttribute(llvm::Attributes::ReadNone); 1079 break; 1080 1081 case ABIArgInfo::Ignore: 1082 // Skip increment, no matching LLVM parameter. 1083 continue; 1084 1085 case ABIArgInfo::Expand: { 1086 SmallVector<llvm::Type*, 8> types; 1087 // FIXME: This is rather inefficient. Do we ever actually need to do 1088 // anything here? The result should be just reconstructed on the other 1089 // side, so extension should be a non-issue. 1090 getTypes().GetExpandedTypes(ParamType, types); 1091 Index += types.size(); 1092 continue; 1093 } 1094 } 1095 1096 if (Attrs.hasAttributes()) 1097 PAL.push_back(llvm::AttributeWithIndex::get(Index, 1098 llvm::Attributes::get(getLLVMContext(), 1099 Attrs))); 1100 ++Index; 1101 } 1102 if (FuncAttrs.hasAttributes()) 1103 PAL.push_back(llvm:: 1104 AttributeWithIndex::get(llvm::AttrListPtr::FunctionIndex, 1105 llvm::Attributes::get(getLLVMContext(), 1106 FuncAttrs))); 1107 } 1108 1109 /// An argument came in as a promoted argument; demote it back to its 1110 /// declared type. 1111 static llvm::Value *emitArgumentDemotion(CodeGenFunction &CGF, 1112 const VarDecl *var, 1113 llvm::Value *value) { 1114 llvm::Type *varType = CGF.ConvertType(var->getType()); 1115 1116 // This can happen with promotions that actually don't change the 1117 // underlying type, like the enum promotions. 1118 if (value->getType() == varType) return value; 1119 1120 assert((varType->isIntegerTy() || varType->isFloatingPointTy()) 1121 && "unexpected promotion type"); 1122 1123 if (isa<llvm::IntegerType>(varType)) 1124 return CGF.Builder.CreateTrunc(value, varType, "arg.unpromote"); 1125 1126 return CGF.Builder.CreateFPCast(value, varType, "arg.unpromote"); 1127 } 1128 1129 void CodeGenFunction::EmitFunctionProlog(const CGFunctionInfo &FI, 1130 llvm::Function *Fn, 1131 const FunctionArgList &Args) { 1132 // If this is an implicit-return-zero function, go ahead and 1133 // initialize the return value. TODO: it might be nice to have 1134 // a more general mechanism for this that didn't require synthesized 1135 // return statements. 1136 if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(CurFuncDecl)) { 1137 if (FD->hasImplicitReturnZero()) { 1138 QualType RetTy = FD->getResultType().getUnqualifiedType(); 1139 llvm::Type* LLVMTy = CGM.getTypes().ConvertType(RetTy); 1140 llvm::Constant* Zero = llvm::Constant::getNullValue(LLVMTy); 1141 Builder.CreateStore(Zero, ReturnValue); 1142 } 1143 } 1144 1145 // FIXME: We no longer need the types from FunctionArgList; lift up and 1146 // simplify. 1147 1148 // Emit allocs for param decls. Give the LLVM Argument nodes names. 1149 llvm::Function::arg_iterator AI = Fn->arg_begin(); 1150 1151 // Name the struct return argument. 1152 if (CGM.ReturnTypeUsesSRet(FI)) { 1153 AI->setName("agg.result"); 1154 AI->addAttr(llvm::Attributes::get(getLLVMContext(), 1155 llvm::Attributes::NoAlias)); 1156 ++AI; 1157 } 1158 1159 assert(FI.arg_size() == Args.size() && 1160 "Mismatch between function signature & arguments."); 1161 unsigned ArgNo = 1; 1162 CGFunctionInfo::const_arg_iterator info_it = FI.arg_begin(); 1163 for (FunctionArgList::const_iterator i = Args.begin(), e = Args.end(); 1164 i != e; ++i, ++info_it, ++ArgNo) { 1165 const VarDecl *Arg = *i; 1166 QualType Ty = info_it->type; 1167 const ABIArgInfo &ArgI = info_it->info; 1168 1169 bool isPromoted = 1170 isa<ParmVarDecl>(Arg) && cast<ParmVarDecl>(Arg)->isKNRPromoted(); 1171 1172 // Skip the dummy padding argument. 1173 if (ArgI.getPaddingType()) 1174 ++AI; 1175 1176 switch (ArgI.getKind()) { 1177 case ABIArgInfo::Indirect: { 1178 llvm::Value *V = AI; 1179 1180 if (hasAggregateLLVMType(Ty)) { 1181 // Aggregates and complex variables are accessed by reference. All we 1182 // need to do is realign the value, if requested 1183 if (ArgI.getIndirectRealign()) { 1184 llvm::Value *AlignedTemp = CreateMemTemp(Ty, "coerce"); 1185 1186 // Copy from the incoming argument pointer to the temporary with the 1187 // appropriate alignment. 1188 // 1189 // FIXME: We should have a common utility for generating an aggregate 1190 // copy. 1191 llvm::Type *I8PtrTy = Builder.getInt8PtrTy(); 1192 CharUnits Size = getContext().getTypeSizeInChars(Ty); 1193 llvm::Value *Dst = Builder.CreateBitCast(AlignedTemp, I8PtrTy); 1194 llvm::Value *Src = Builder.CreateBitCast(V, I8PtrTy); 1195 Builder.CreateMemCpy(Dst, 1196 Src, 1197 llvm::ConstantInt::get(IntPtrTy, 1198 Size.getQuantity()), 1199 ArgI.getIndirectAlign(), 1200 false); 1201 V = AlignedTemp; 1202 } 1203 } else { 1204 // Load scalar value from indirect argument. 1205 CharUnits Alignment = getContext().getTypeAlignInChars(Ty); 1206 V = EmitLoadOfScalar(V, false, Alignment.getQuantity(), Ty); 1207 1208 if (isPromoted) 1209 V = emitArgumentDemotion(*this, Arg, V); 1210 } 1211 EmitParmDecl(*Arg, V, ArgNo); 1212 break; 1213 } 1214 1215 case ABIArgInfo::Extend: 1216 case ABIArgInfo::Direct: { 1217 1218 // If we have the trivial case, handle it with no muss and fuss. 1219 if (!isa<llvm::StructType>(ArgI.getCoerceToType()) && 1220 ArgI.getCoerceToType() == ConvertType(Ty) && 1221 ArgI.getDirectOffset() == 0) { 1222 assert(AI != Fn->arg_end() && "Argument mismatch!"); 1223 llvm::Value *V = AI; 1224 1225 if (Arg->getType().isRestrictQualified()) 1226 AI->addAttr(llvm::Attributes::get(getLLVMContext(), 1227 llvm::Attributes::NoAlias)); 1228 1229 // Ensure the argument is the correct type. 1230 if (V->getType() != ArgI.getCoerceToType()) 1231 V = Builder.CreateBitCast(V, ArgI.getCoerceToType()); 1232 1233 if (isPromoted) 1234 V = emitArgumentDemotion(*this, Arg, V); 1235 1236 // Because of merging of function types from multiple decls it is 1237 // possible for the type of an argument to not match the corresponding 1238 // type in the function type. Since we are codegening the callee 1239 // in here, add a cast to the argument type. 1240 llvm::Type *LTy = ConvertType(Arg->getType()); 1241 if (V->getType() != LTy) 1242 V = Builder.CreateBitCast(V, LTy); 1243 1244 EmitParmDecl(*Arg, V, ArgNo); 1245 break; 1246 } 1247 1248 llvm::AllocaInst *Alloca = CreateMemTemp(Ty, Arg->getName()); 1249 1250 // The alignment we need to use is the max of the requested alignment for 1251 // the argument plus the alignment required by our access code below. 1252 unsigned AlignmentToUse = 1253 CGM.getDataLayout().getABITypeAlignment(ArgI.getCoerceToType()); 1254 AlignmentToUse = std::max(AlignmentToUse, 1255 (unsigned)getContext().getDeclAlign(Arg).getQuantity()); 1256 1257 Alloca->setAlignment(AlignmentToUse); 1258 llvm::Value *V = Alloca; 1259 llvm::Value *Ptr = V; // Pointer to store into. 1260 1261 // If the value is offset in memory, apply the offset now. 1262 if (unsigned Offs = ArgI.getDirectOffset()) { 1263 Ptr = Builder.CreateBitCast(Ptr, Builder.getInt8PtrTy()); 1264 Ptr = Builder.CreateConstGEP1_32(Ptr, Offs); 1265 Ptr = Builder.CreateBitCast(Ptr, 1266 llvm::PointerType::getUnqual(ArgI.getCoerceToType())); 1267 } 1268 1269 // If the coerce-to type is a first class aggregate, we flatten it and 1270 // pass the elements. Either way is semantically identical, but fast-isel 1271 // and the optimizer generally likes scalar values better than FCAs. 1272 llvm::StructType *STy = dyn_cast<llvm::StructType>(ArgI.getCoerceToType()); 1273 if (STy && STy->getNumElements() > 1) { 1274 uint64_t SrcSize = CGM.getDataLayout().getTypeAllocSize(STy); 1275 llvm::Type *DstTy = 1276 cast<llvm::PointerType>(Ptr->getType())->getElementType(); 1277 uint64_t DstSize = CGM.getDataLayout().getTypeAllocSize(DstTy); 1278 1279 if (SrcSize <= DstSize) { 1280 Ptr = Builder.CreateBitCast(Ptr, llvm::PointerType::getUnqual(STy)); 1281 1282 for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) { 1283 assert(AI != Fn->arg_end() && "Argument mismatch!"); 1284 AI->setName(Arg->getName() + ".coerce" + Twine(i)); 1285 llvm::Value *EltPtr = Builder.CreateConstGEP2_32(Ptr, 0, i); 1286 Builder.CreateStore(AI++, EltPtr); 1287 } 1288 } else { 1289 llvm::AllocaInst *TempAlloca = 1290 CreateTempAlloca(ArgI.getCoerceToType(), "coerce"); 1291 TempAlloca->setAlignment(AlignmentToUse); 1292 llvm::Value *TempV = TempAlloca; 1293 1294 for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) { 1295 assert(AI != Fn->arg_end() && "Argument mismatch!"); 1296 AI->setName(Arg->getName() + ".coerce" + Twine(i)); 1297 llvm::Value *EltPtr = Builder.CreateConstGEP2_32(TempV, 0, i); 1298 Builder.CreateStore(AI++, EltPtr); 1299 } 1300 1301 Builder.CreateMemCpy(Ptr, TempV, DstSize, AlignmentToUse); 1302 } 1303 } else { 1304 // Simple case, just do a coerced store of the argument into the alloca. 1305 assert(AI != Fn->arg_end() && "Argument mismatch!"); 1306 AI->setName(Arg->getName() + ".coerce"); 1307 CreateCoercedStore(AI++, Ptr, /*DestIsVolatile=*/false, *this); 1308 } 1309 1310 1311 // Match to what EmitParmDecl is expecting for this type. 1312 if (!CodeGenFunction::hasAggregateLLVMType(Ty)) { 1313 V = EmitLoadOfScalar(V, false, AlignmentToUse, Ty); 1314 if (isPromoted) 1315 V = emitArgumentDemotion(*this, Arg, V); 1316 } 1317 EmitParmDecl(*Arg, V, ArgNo); 1318 continue; // Skip ++AI increment, already done. 1319 } 1320 1321 case ABIArgInfo::Expand: { 1322 // If this structure was expanded into multiple arguments then 1323 // we need to create a temporary and reconstruct it from the 1324 // arguments. 1325 llvm::AllocaInst *Alloca = CreateMemTemp(Ty); 1326 CharUnits Align = getContext().getDeclAlign(Arg); 1327 Alloca->setAlignment(Align.getQuantity()); 1328 LValue LV = MakeAddrLValue(Alloca, Ty, Align); 1329 llvm::Function::arg_iterator End = ExpandTypeFromArgs(Ty, LV, AI); 1330 EmitParmDecl(*Arg, Alloca, ArgNo); 1331 1332 // Name the arguments used in expansion and increment AI. 1333 unsigned Index = 0; 1334 for (; AI != End; ++AI, ++Index) 1335 AI->setName(Arg->getName() + "." + Twine(Index)); 1336 continue; 1337 } 1338 1339 case ABIArgInfo::Ignore: 1340 // Initialize the local variable appropriately. 1341 if (hasAggregateLLVMType(Ty)) 1342 EmitParmDecl(*Arg, CreateMemTemp(Ty), ArgNo); 1343 else 1344 EmitParmDecl(*Arg, llvm::UndefValue::get(ConvertType(Arg->getType())), 1345 ArgNo); 1346 1347 // Skip increment, no matching LLVM parameter. 1348 continue; 1349 } 1350 1351 ++AI; 1352 } 1353 assert(AI == Fn->arg_end() && "Argument mismatch!"); 1354 } 1355 1356 static void eraseUnusedBitCasts(llvm::Instruction *insn) { 1357 while (insn->use_empty()) { 1358 llvm::BitCastInst *bitcast = dyn_cast<llvm::BitCastInst>(insn); 1359 if (!bitcast) return; 1360 1361 // This is "safe" because we would have used a ConstantExpr otherwise. 1362 insn = cast<llvm::Instruction>(bitcast->getOperand(0)); 1363 bitcast->eraseFromParent(); 1364 } 1365 } 1366 1367 /// Try to emit a fused autorelease of a return result. 1368 static llvm::Value *tryEmitFusedAutoreleaseOfResult(CodeGenFunction &CGF, 1369 llvm::Value *result) { 1370 // We must be immediately followed the cast. 1371 llvm::BasicBlock *BB = CGF.Builder.GetInsertBlock(); 1372 if (BB->empty()) return 0; 1373 if (&BB->back() != result) return 0; 1374 1375 llvm::Type *resultType = result->getType(); 1376 1377 // result is in a BasicBlock and is therefore an Instruction. 1378 llvm::Instruction *generator = cast<llvm::Instruction>(result); 1379 1380 SmallVector<llvm::Instruction*,4> insnsToKill; 1381 1382 // Look for: 1383 // %generator = bitcast %type1* %generator2 to %type2* 1384 while (llvm::BitCastInst *bitcast = dyn_cast<llvm::BitCastInst>(generator)) { 1385 // We would have emitted this as a constant if the operand weren't 1386 // an Instruction. 1387 generator = cast<llvm::Instruction>(bitcast->getOperand(0)); 1388 1389 // Require the generator to be immediately followed by the cast. 1390 if (generator->getNextNode() != bitcast) 1391 return 0; 1392 1393 insnsToKill.push_back(bitcast); 1394 } 1395 1396 // Look for: 1397 // %generator = call i8* @objc_retain(i8* %originalResult) 1398 // or 1399 // %generator = call i8* @objc_retainAutoreleasedReturnValue(i8* %originalResult) 1400 llvm::CallInst *call = dyn_cast<llvm::CallInst>(generator); 1401 if (!call) return 0; 1402 1403 bool doRetainAutorelease; 1404 1405 if (call->getCalledValue() == CGF.CGM.getARCEntrypoints().objc_retain) { 1406 doRetainAutorelease = true; 1407 } else if (call->getCalledValue() == CGF.CGM.getARCEntrypoints() 1408 .objc_retainAutoreleasedReturnValue) { 1409 doRetainAutorelease = false; 1410 1411 // If we emitted an assembly marker for this call (and the 1412 // ARCEntrypoints field should have been set if so), go looking 1413 // for that call. If we can't find it, we can't do this 1414 // optimization. But it should always be the immediately previous 1415 // instruction, unless we needed bitcasts around the call. 1416 if (CGF.CGM.getARCEntrypoints().retainAutoreleasedReturnValueMarker) { 1417 llvm::Instruction *prev = call->getPrevNode(); 1418 assert(prev); 1419 if (isa<llvm::BitCastInst>(prev)) { 1420 prev = prev->getPrevNode(); 1421 assert(prev); 1422 } 1423 assert(isa<llvm::CallInst>(prev)); 1424 assert(cast<llvm::CallInst>(prev)->getCalledValue() == 1425 CGF.CGM.getARCEntrypoints().retainAutoreleasedReturnValueMarker); 1426 insnsToKill.push_back(prev); 1427 } 1428 } else { 1429 return 0; 1430 } 1431 1432 result = call->getArgOperand(0); 1433 insnsToKill.push_back(call); 1434 1435 // Keep killing bitcasts, for sanity. Note that we no longer care 1436 // about precise ordering as long as there's exactly one use. 1437 while (llvm::BitCastInst *bitcast = dyn_cast<llvm::BitCastInst>(result)) { 1438 if (!bitcast->hasOneUse()) break; 1439 insnsToKill.push_back(bitcast); 1440 result = bitcast->getOperand(0); 1441 } 1442 1443 // Delete all the unnecessary instructions, from latest to earliest. 1444 for (SmallVectorImpl<llvm::Instruction*>::iterator 1445 i = insnsToKill.begin(), e = insnsToKill.end(); i != e; ++i) 1446 (*i)->eraseFromParent(); 1447 1448 // Do the fused retain/autorelease if we were asked to. 1449 if (doRetainAutorelease) 1450 result = CGF.EmitARCRetainAutoreleaseReturnValue(result); 1451 1452 // Cast back to the result type. 1453 return CGF.Builder.CreateBitCast(result, resultType); 1454 } 1455 1456 /// If this is a +1 of the value of an immutable 'self', remove it. 1457 static llvm::Value *tryRemoveRetainOfSelf(CodeGenFunction &CGF, 1458 llvm::Value *result) { 1459 // This is only applicable to a method with an immutable 'self'. 1460 const ObjCMethodDecl *method = 1461 dyn_cast_or_null<ObjCMethodDecl>(CGF.CurCodeDecl); 1462 if (!method) return 0; 1463 const VarDecl *self = method->getSelfDecl(); 1464 if (!self->getType().isConstQualified()) return 0; 1465 1466 // Look for a retain call. 1467 llvm::CallInst *retainCall = 1468 dyn_cast<llvm::CallInst>(result->stripPointerCasts()); 1469 if (!retainCall || 1470 retainCall->getCalledValue() != CGF.CGM.getARCEntrypoints().objc_retain) 1471 return 0; 1472 1473 // Look for an ordinary load of 'self'. 1474 llvm::Value *retainedValue = retainCall->getArgOperand(0); 1475 llvm::LoadInst *load = 1476 dyn_cast<llvm::LoadInst>(retainedValue->stripPointerCasts()); 1477 if (!load || load->isAtomic() || load->isVolatile() || 1478 load->getPointerOperand() != CGF.GetAddrOfLocalVar(self)) 1479 return 0; 1480 1481 // Okay! Burn it all down. This relies for correctness on the 1482 // assumption that the retain is emitted as part of the return and 1483 // that thereafter everything is used "linearly". 1484 llvm::Type *resultType = result->getType(); 1485 eraseUnusedBitCasts(cast<llvm::Instruction>(result)); 1486 assert(retainCall->use_empty()); 1487 retainCall->eraseFromParent(); 1488 eraseUnusedBitCasts(cast<llvm::Instruction>(retainedValue)); 1489 1490 return CGF.Builder.CreateBitCast(load, resultType); 1491 } 1492 1493 /// Emit an ARC autorelease of the result of a function. 1494 /// 1495 /// \return the value to actually return from the function 1496 static llvm::Value *emitAutoreleaseOfResult(CodeGenFunction &CGF, 1497 llvm::Value *result) { 1498 // If we're returning 'self', kill the initial retain. This is a 1499 // heuristic attempt to "encourage correctness" in the really unfortunate 1500 // case where we have a return of self during a dealloc and we desperately 1501 // need to avoid the possible autorelease. 1502 if (llvm::Value *self = tryRemoveRetainOfSelf(CGF, result)) 1503 return self; 1504 1505 // At -O0, try to emit a fused retain/autorelease. 1506 if (CGF.shouldUseFusedARCCalls()) 1507 if (llvm::Value *fused = tryEmitFusedAutoreleaseOfResult(CGF, result)) 1508 return fused; 1509 1510 return CGF.EmitARCAutoreleaseReturnValue(result); 1511 } 1512 1513 /// Heuristically search for a dominating store to the return-value slot. 1514 static llvm::StoreInst *findDominatingStoreToReturnValue(CodeGenFunction &CGF) { 1515 // If there are multiple uses of the return-value slot, just check 1516 // for something immediately preceding the IP. Sometimes this can 1517 // happen with how we generate implicit-returns; it can also happen 1518 // with noreturn cleanups. 1519 if (!CGF.ReturnValue->hasOneUse()) { 1520 llvm::BasicBlock *IP = CGF.Builder.GetInsertBlock(); 1521 if (IP->empty()) return 0; 1522 llvm::StoreInst *store = dyn_cast<llvm::StoreInst>(&IP->back()); 1523 if (!store) return 0; 1524 if (store->getPointerOperand() != CGF.ReturnValue) return 0; 1525 assert(!store->isAtomic() && !store->isVolatile()); // see below 1526 return store; 1527 } 1528 1529 llvm::StoreInst *store = 1530 dyn_cast<llvm::StoreInst>(CGF.ReturnValue->use_back()); 1531 if (!store) return 0; 1532 1533 // These aren't actually possible for non-coerced returns, and we 1534 // only care about non-coerced returns on this code path. 1535 assert(!store->isAtomic() && !store->isVolatile()); 1536 1537 // Now do a first-and-dirty dominance check: just walk up the 1538 // single-predecessors chain from the current insertion point. 1539 llvm::BasicBlock *StoreBB = store->getParent(); 1540 llvm::BasicBlock *IP = CGF.Builder.GetInsertBlock(); 1541 while (IP != StoreBB) { 1542 if (!(IP = IP->getSinglePredecessor())) 1543 return 0; 1544 } 1545 1546 // Okay, the store's basic block dominates the insertion point; we 1547 // can do our thing. 1548 return store; 1549 } 1550 1551 void CodeGenFunction::EmitFunctionEpilog(const CGFunctionInfo &FI) { 1552 // Functions with no result always return void. 1553 if (ReturnValue == 0) { 1554 Builder.CreateRetVoid(); 1555 return; 1556 } 1557 1558 llvm::DebugLoc RetDbgLoc; 1559 llvm::Value *RV = 0; 1560 QualType RetTy = FI.getReturnType(); 1561 const ABIArgInfo &RetAI = FI.getReturnInfo(); 1562 1563 switch (RetAI.getKind()) { 1564 case ABIArgInfo::Indirect: { 1565 unsigned Alignment = getContext().getTypeAlignInChars(RetTy).getQuantity(); 1566 if (RetTy->isAnyComplexType()) { 1567 ComplexPairTy RT = LoadComplexFromAddr(ReturnValue, false); 1568 StoreComplexToAddr(RT, CurFn->arg_begin(), false); 1569 } else if (CodeGenFunction::hasAggregateLLVMType(RetTy)) { 1570 // Do nothing; aggregrates get evaluated directly into the destination. 1571 } else { 1572 EmitStoreOfScalar(Builder.CreateLoad(ReturnValue), CurFn->arg_begin(), 1573 false, Alignment, RetTy); 1574 } 1575 break; 1576 } 1577 1578 case ABIArgInfo::Extend: 1579 case ABIArgInfo::Direct: 1580 if (RetAI.getCoerceToType() == ConvertType(RetTy) && 1581 RetAI.getDirectOffset() == 0) { 1582 // The internal return value temp always will have pointer-to-return-type 1583 // type, just do a load. 1584 1585 // If there is a dominating store to ReturnValue, we can elide 1586 // the load, zap the store, and usually zap the alloca. 1587 if (llvm::StoreInst *SI = findDominatingStoreToReturnValue(*this)) { 1588 // Get the stored value and nuke the now-dead store. 1589 RetDbgLoc = SI->getDebugLoc(); 1590 RV = SI->getValueOperand(); 1591 SI->eraseFromParent(); 1592 1593 // If that was the only use of the return value, nuke it as well now. 1594 if (ReturnValue->use_empty() && isa<llvm::AllocaInst>(ReturnValue)) { 1595 cast<llvm::AllocaInst>(ReturnValue)->eraseFromParent(); 1596 ReturnValue = 0; 1597 } 1598 1599 // Otherwise, we have to do a simple load. 1600 } else { 1601 RV = Builder.CreateLoad(ReturnValue); 1602 } 1603 } else { 1604 llvm::Value *V = ReturnValue; 1605 // If the value is offset in memory, apply the offset now. 1606 if (unsigned Offs = RetAI.getDirectOffset()) { 1607 V = Builder.CreateBitCast(V, Builder.getInt8PtrTy()); 1608 V = Builder.CreateConstGEP1_32(V, Offs); 1609 V = Builder.CreateBitCast(V, 1610 llvm::PointerType::getUnqual(RetAI.getCoerceToType())); 1611 } 1612 1613 RV = CreateCoercedLoad(V, RetAI.getCoerceToType(), *this); 1614 } 1615 1616 // In ARC, end functions that return a retainable type with a call 1617 // to objc_autoreleaseReturnValue. 1618 if (AutoreleaseResult) { 1619 assert(getLangOpts().ObjCAutoRefCount && 1620 !FI.isReturnsRetained() && 1621 RetTy->isObjCRetainableType()); 1622 RV = emitAutoreleaseOfResult(*this, RV); 1623 } 1624 1625 break; 1626 1627 case ABIArgInfo::Ignore: 1628 break; 1629 1630 case ABIArgInfo::Expand: 1631 llvm_unreachable("Invalid ABI kind for return argument"); 1632 } 1633 1634 llvm::Instruction *Ret = RV ? Builder.CreateRet(RV) : Builder.CreateRetVoid(); 1635 if (!RetDbgLoc.isUnknown()) 1636 Ret->setDebugLoc(RetDbgLoc); 1637 } 1638 1639 void CodeGenFunction::EmitDelegateCallArg(CallArgList &args, 1640 const VarDecl *param) { 1641 // StartFunction converted the ABI-lowered parameter(s) into a 1642 // local alloca. We need to turn that into an r-value suitable 1643 // for EmitCall. 1644 llvm::Value *local = GetAddrOfLocalVar(param); 1645 1646 QualType type = param->getType(); 1647 1648 // For the most part, we just need to load the alloca, except: 1649 // 1) aggregate r-values are actually pointers to temporaries, and 1650 // 2) references to aggregates are pointers directly to the aggregate. 1651 // I don't know why references to non-aggregates are different here. 1652 if (const ReferenceType *ref = type->getAs<ReferenceType>()) { 1653 if (hasAggregateLLVMType(ref->getPointeeType())) 1654 return args.add(RValue::getAggregate(local), type); 1655 1656 // Locals which are references to scalars are represented 1657 // with allocas holding the pointer. 1658 return args.add(RValue::get(Builder.CreateLoad(local)), type); 1659 } 1660 1661 if (type->isAnyComplexType()) { 1662 ComplexPairTy complex = LoadComplexFromAddr(local, /*volatile*/ false); 1663 return args.add(RValue::getComplex(complex), type); 1664 } 1665 1666 if (hasAggregateLLVMType(type)) 1667 return args.add(RValue::getAggregate(local), type); 1668 1669 unsigned alignment = getContext().getDeclAlign(param).getQuantity(); 1670 llvm::Value *value = EmitLoadOfScalar(local, false, alignment, type); 1671 return args.add(RValue::get(value), type); 1672 } 1673 1674 static bool isProvablyNull(llvm::Value *addr) { 1675 return isa<llvm::ConstantPointerNull>(addr); 1676 } 1677 1678 static bool isProvablyNonNull(llvm::Value *addr) { 1679 return isa<llvm::AllocaInst>(addr); 1680 } 1681 1682 /// Emit the actual writing-back of a writeback. 1683 static void emitWriteback(CodeGenFunction &CGF, 1684 const CallArgList::Writeback &writeback) { 1685 llvm::Value *srcAddr = writeback.Address; 1686 assert(!isProvablyNull(srcAddr) && 1687 "shouldn't have writeback for provably null argument"); 1688 1689 llvm::BasicBlock *contBB = 0; 1690 1691 // If the argument wasn't provably non-null, we need to null check 1692 // before doing the store. 1693 bool provablyNonNull = isProvablyNonNull(srcAddr); 1694 if (!provablyNonNull) { 1695 llvm::BasicBlock *writebackBB = CGF.createBasicBlock("icr.writeback"); 1696 contBB = CGF.createBasicBlock("icr.done"); 1697 1698 llvm::Value *isNull = CGF.Builder.CreateIsNull(srcAddr, "icr.isnull"); 1699 CGF.Builder.CreateCondBr(isNull, contBB, writebackBB); 1700 CGF.EmitBlock(writebackBB); 1701 } 1702 1703 // Load the value to writeback. 1704 llvm::Value *value = CGF.Builder.CreateLoad(writeback.Temporary); 1705 1706 // Cast it back, in case we're writing an id to a Foo* or something. 1707 value = CGF.Builder.CreateBitCast(value, 1708 cast<llvm::PointerType>(srcAddr->getType())->getElementType(), 1709 "icr.writeback-cast"); 1710 1711 // Perform the writeback. 1712 QualType srcAddrType = writeback.AddressType; 1713 CGF.EmitStoreThroughLValue(RValue::get(value), 1714 CGF.MakeAddrLValue(srcAddr, srcAddrType)); 1715 1716 // Jump to the continuation block. 1717 if (!provablyNonNull) 1718 CGF.EmitBlock(contBB); 1719 } 1720 1721 static void emitWritebacks(CodeGenFunction &CGF, 1722 const CallArgList &args) { 1723 for (CallArgList::writeback_iterator 1724 i = args.writeback_begin(), e = args.writeback_end(); i != e; ++i) 1725 emitWriteback(CGF, *i); 1726 } 1727 1728 /// Emit an argument that's being passed call-by-writeback. That is, 1729 /// we are passing the address of 1730 static void emitWritebackArg(CodeGenFunction &CGF, CallArgList &args, 1731 const ObjCIndirectCopyRestoreExpr *CRE) { 1732 llvm::Value *srcAddr = CGF.EmitScalarExpr(CRE->getSubExpr()); 1733 1734 // The dest and src types don't necessarily match in LLVM terms 1735 // because of the crazy ObjC compatibility rules. 1736 1737 llvm::PointerType *destType = 1738 cast<llvm::PointerType>(CGF.ConvertType(CRE->getType())); 1739 1740 // If the address is a constant null, just pass the appropriate null. 1741 if (isProvablyNull(srcAddr)) { 1742 args.add(RValue::get(llvm::ConstantPointerNull::get(destType)), 1743 CRE->getType()); 1744 return; 1745 } 1746 1747 QualType srcAddrType = 1748 CRE->getSubExpr()->getType()->castAs<PointerType>()->getPointeeType(); 1749 1750 // Create the temporary. 1751 llvm::Value *temp = CGF.CreateTempAlloca(destType->getElementType(), 1752 "icr.temp"); 1753 // Loading an l-value can introduce a cleanup if the l-value is __weak, 1754 // and that cleanup will be conditional if we can't prove that the l-value 1755 // isn't null, so we need to register a dominating point so that the cleanups 1756 // system will make valid IR. 1757 CodeGenFunction::ConditionalEvaluation condEval(CGF); 1758 1759 // Zero-initialize it if we're not doing a copy-initialization. 1760 bool shouldCopy = CRE->shouldCopy(); 1761 if (!shouldCopy) { 1762 llvm::Value *null = 1763 llvm::ConstantPointerNull::get( 1764 cast<llvm::PointerType>(destType->getElementType())); 1765 CGF.Builder.CreateStore(null, temp); 1766 } 1767 1768 llvm::BasicBlock *contBB = 0; 1769 1770 // If the address is *not* known to be non-null, we need to switch. 1771 llvm::Value *finalArgument; 1772 1773 bool provablyNonNull = isProvablyNonNull(srcAddr); 1774 if (provablyNonNull) { 1775 finalArgument = temp; 1776 } else { 1777 llvm::Value *isNull = CGF.Builder.CreateIsNull(srcAddr, "icr.isnull"); 1778 1779 finalArgument = CGF.Builder.CreateSelect(isNull, 1780 llvm::ConstantPointerNull::get(destType), 1781 temp, "icr.argument"); 1782 1783 // If we need to copy, then the load has to be conditional, which 1784 // means we need control flow. 1785 if (shouldCopy) { 1786 contBB = CGF.createBasicBlock("icr.cont"); 1787 llvm::BasicBlock *copyBB = CGF.createBasicBlock("icr.copy"); 1788 CGF.Builder.CreateCondBr(isNull, contBB, copyBB); 1789 CGF.EmitBlock(copyBB); 1790 condEval.begin(CGF); 1791 } 1792 } 1793 1794 // Perform a copy if necessary. 1795 if (shouldCopy) { 1796 LValue srcLV = CGF.MakeAddrLValue(srcAddr, srcAddrType); 1797 RValue srcRV = CGF.EmitLoadOfLValue(srcLV); 1798 assert(srcRV.isScalar()); 1799 1800 llvm::Value *src = srcRV.getScalarVal(); 1801 src = CGF.Builder.CreateBitCast(src, destType->getElementType(), 1802 "icr.cast"); 1803 1804 // Use an ordinary store, not a store-to-lvalue. 1805 CGF.Builder.CreateStore(src, temp); 1806 } 1807 1808 // Finish the control flow if we needed it. 1809 if (shouldCopy && !provablyNonNull) { 1810 CGF.EmitBlock(contBB); 1811 condEval.end(CGF); 1812 } 1813 1814 args.addWriteback(srcAddr, srcAddrType, temp); 1815 args.add(RValue::get(finalArgument), CRE->getType()); 1816 } 1817 1818 void CodeGenFunction::EmitCallArg(CallArgList &args, const Expr *E, 1819 QualType type) { 1820 if (const ObjCIndirectCopyRestoreExpr *CRE 1821 = dyn_cast<ObjCIndirectCopyRestoreExpr>(E)) { 1822 assert(getLangOpts().ObjCAutoRefCount); 1823 assert(getContext().hasSameType(E->getType(), type)); 1824 return emitWritebackArg(*this, args, CRE); 1825 } 1826 1827 assert(type->isReferenceType() == E->isGLValue() && 1828 "reference binding to unmaterialized r-value!"); 1829 1830 if (E->isGLValue()) { 1831 assert(E->getObjectKind() == OK_Ordinary); 1832 return args.add(EmitReferenceBindingToExpr(E, /*InitializedDecl=*/0), 1833 type); 1834 } 1835 1836 if (hasAggregateLLVMType(type) && !E->getType()->isAnyComplexType() && 1837 isa<ImplicitCastExpr>(E) && 1838 cast<CastExpr>(E)->getCastKind() == CK_LValueToRValue) { 1839 LValue L = EmitLValue(cast<CastExpr>(E)->getSubExpr()); 1840 assert(L.isSimple()); 1841 args.add(L.asAggregateRValue(), type, /*NeedsCopy*/true); 1842 return; 1843 } 1844 1845 args.add(EmitAnyExprToTemp(E), type); 1846 } 1847 1848 // In ObjC ARC mode with no ObjC ARC exception safety, tell the ARC 1849 // optimizer it can aggressively ignore unwind edges. 1850 void 1851 CodeGenFunction::AddObjCARCExceptionMetadata(llvm::Instruction *Inst) { 1852 if (CGM.getCodeGenOpts().OptimizationLevel != 0 && 1853 !CGM.getCodeGenOpts().ObjCAutoRefCountExceptions) 1854 Inst->setMetadata("clang.arc.no_objc_arc_exceptions", 1855 CGM.getNoObjCARCExceptionsMetadata()); 1856 } 1857 1858 /// Emits a call or invoke instruction to the given function, depending 1859 /// on the current state of the EH stack. 1860 llvm::CallSite 1861 CodeGenFunction::EmitCallOrInvoke(llvm::Value *Callee, 1862 ArrayRef<llvm::Value *> Args, 1863 const Twine &Name) { 1864 llvm::BasicBlock *InvokeDest = getInvokeDest(); 1865 1866 llvm::Instruction *Inst; 1867 if (!InvokeDest) 1868 Inst = Builder.CreateCall(Callee, Args, Name); 1869 else { 1870 llvm::BasicBlock *ContBB = createBasicBlock("invoke.cont"); 1871 Inst = Builder.CreateInvoke(Callee, ContBB, InvokeDest, Args, Name); 1872 EmitBlock(ContBB); 1873 } 1874 1875 // In ObjC ARC mode with no ObjC ARC exception safety, tell the ARC 1876 // optimizer it can aggressively ignore unwind edges. 1877 if (CGM.getLangOpts().ObjCAutoRefCount) 1878 AddObjCARCExceptionMetadata(Inst); 1879 1880 return Inst; 1881 } 1882 1883 llvm::CallSite 1884 CodeGenFunction::EmitCallOrInvoke(llvm::Value *Callee, 1885 const Twine &Name) { 1886 return EmitCallOrInvoke(Callee, ArrayRef<llvm::Value *>(), Name); 1887 } 1888 1889 static void checkArgMatches(llvm::Value *Elt, unsigned &ArgNo, 1890 llvm::FunctionType *FTy) { 1891 if (ArgNo < FTy->getNumParams()) 1892 assert(Elt->getType() == FTy->getParamType(ArgNo)); 1893 else 1894 assert(FTy->isVarArg()); 1895 ++ArgNo; 1896 } 1897 1898 void CodeGenFunction::ExpandTypeToArgs(QualType Ty, RValue RV, 1899 SmallVector<llvm::Value*,16> &Args, 1900 llvm::FunctionType *IRFuncTy) { 1901 if (const ConstantArrayType *AT = getContext().getAsConstantArrayType(Ty)) { 1902 unsigned NumElts = AT->getSize().getZExtValue(); 1903 QualType EltTy = AT->getElementType(); 1904 llvm::Value *Addr = RV.getAggregateAddr(); 1905 for (unsigned Elt = 0; Elt < NumElts; ++Elt) { 1906 llvm::Value *EltAddr = Builder.CreateConstGEP2_32(Addr, 0, Elt); 1907 LValue LV = MakeAddrLValue(EltAddr, EltTy); 1908 RValue EltRV; 1909 if (EltTy->isAnyComplexType()) 1910 // FIXME: Volatile? 1911 EltRV = RValue::getComplex(LoadComplexFromAddr(LV.getAddress(), false)); 1912 else if (CodeGenFunction::hasAggregateLLVMType(EltTy)) 1913 EltRV = LV.asAggregateRValue(); 1914 else 1915 EltRV = EmitLoadOfLValue(LV); 1916 ExpandTypeToArgs(EltTy, EltRV, Args, IRFuncTy); 1917 } 1918 } else if (const RecordType *RT = Ty->getAs<RecordType>()) { 1919 RecordDecl *RD = RT->getDecl(); 1920 assert(RV.isAggregate() && "Unexpected rvalue during struct expansion"); 1921 LValue LV = MakeAddrLValue(RV.getAggregateAddr(), Ty); 1922 1923 if (RD->isUnion()) { 1924 const FieldDecl *LargestFD = 0; 1925 CharUnits UnionSize = CharUnits::Zero(); 1926 1927 for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end(); 1928 i != e; ++i) { 1929 const FieldDecl *FD = *i; 1930 assert(!FD->isBitField() && 1931 "Cannot expand structure with bit-field members."); 1932 CharUnits FieldSize = getContext().getTypeSizeInChars(FD->getType()); 1933 if (UnionSize < FieldSize) { 1934 UnionSize = FieldSize; 1935 LargestFD = FD; 1936 } 1937 } 1938 if (LargestFD) { 1939 RValue FldRV = EmitRValueForField(LV, LargestFD); 1940 ExpandTypeToArgs(LargestFD->getType(), FldRV, Args, IRFuncTy); 1941 } 1942 } else { 1943 for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end(); 1944 i != e; ++i) { 1945 FieldDecl *FD = *i; 1946 1947 RValue FldRV = EmitRValueForField(LV, FD); 1948 ExpandTypeToArgs(FD->getType(), FldRV, Args, IRFuncTy); 1949 } 1950 } 1951 } else if (Ty->isAnyComplexType()) { 1952 ComplexPairTy CV = RV.getComplexVal(); 1953 Args.push_back(CV.first); 1954 Args.push_back(CV.second); 1955 } else { 1956 assert(RV.isScalar() && 1957 "Unexpected non-scalar rvalue during struct expansion."); 1958 1959 // Insert a bitcast as needed. 1960 llvm::Value *V = RV.getScalarVal(); 1961 if (Args.size() < IRFuncTy->getNumParams() && 1962 V->getType() != IRFuncTy->getParamType(Args.size())) 1963 V = Builder.CreateBitCast(V, IRFuncTy->getParamType(Args.size())); 1964 1965 Args.push_back(V); 1966 } 1967 } 1968 1969 1970 RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo, 1971 llvm::Value *Callee, 1972 ReturnValueSlot ReturnValue, 1973 const CallArgList &CallArgs, 1974 const Decl *TargetDecl, 1975 llvm::Instruction **callOrInvoke) { 1976 // FIXME: We no longer need the types from CallArgs; lift up and simplify. 1977 SmallVector<llvm::Value*, 16> Args; 1978 1979 // Handle struct-return functions by passing a pointer to the 1980 // location that we would like to return into. 1981 QualType RetTy = CallInfo.getReturnType(); 1982 const ABIArgInfo &RetAI = CallInfo.getReturnInfo(); 1983 1984 // IRArgNo - Keep track of the argument number in the callee we're looking at. 1985 unsigned IRArgNo = 0; 1986 llvm::FunctionType *IRFuncTy = 1987 cast<llvm::FunctionType>( 1988 cast<llvm::PointerType>(Callee->getType())->getElementType()); 1989 1990 // If the call returns a temporary with struct return, create a temporary 1991 // alloca to hold the result, unless one is given to us. 1992 if (CGM.ReturnTypeUsesSRet(CallInfo)) { 1993 llvm::Value *Value = ReturnValue.getValue(); 1994 if (!Value) 1995 Value = CreateMemTemp(RetTy); 1996 Args.push_back(Value); 1997 checkArgMatches(Value, IRArgNo, IRFuncTy); 1998 } 1999 2000 assert(CallInfo.arg_size() == CallArgs.size() && 2001 "Mismatch between function signature & arguments."); 2002 CGFunctionInfo::const_arg_iterator info_it = CallInfo.arg_begin(); 2003 for (CallArgList::const_iterator I = CallArgs.begin(), E = CallArgs.end(); 2004 I != E; ++I, ++info_it) { 2005 const ABIArgInfo &ArgInfo = info_it->info; 2006 RValue RV = I->RV; 2007 2008 unsigned TypeAlign = 2009 getContext().getTypeAlignInChars(I->Ty).getQuantity(); 2010 2011 // Insert a padding argument to ensure proper alignment. 2012 if (llvm::Type *PaddingType = ArgInfo.getPaddingType()) { 2013 Args.push_back(llvm::UndefValue::get(PaddingType)); 2014 ++IRArgNo; 2015 } 2016 2017 switch (ArgInfo.getKind()) { 2018 case ABIArgInfo::Indirect: { 2019 if (RV.isScalar() || RV.isComplex()) { 2020 // Make a temporary alloca to pass the argument. 2021 llvm::AllocaInst *AI = CreateMemTemp(I->Ty); 2022 if (ArgInfo.getIndirectAlign() > AI->getAlignment()) 2023 AI->setAlignment(ArgInfo.getIndirectAlign()); 2024 Args.push_back(AI); 2025 2026 if (RV.isScalar()) 2027 EmitStoreOfScalar(RV.getScalarVal(), Args.back(), false, 2028 TypeAlign, I->Ty); 2029 else 2030 StoreComplexToAddr(RV.getComplexVal(), Args.back(), false); 2031 2032 // Validate argument match. 2033 checkArgMatches(AI, IRArgNo, IRFuncTy); 2034 } else { 2035 // We want to avoid creating an unnecessary temporary+copy here; 2036 // however, we need one in two cases: 2037 // 1. If the argument is not byval, and we are required to copy the 2038 // source. (This case doesn't occur on any common architecture.) 2039 // 2. If the argument is byval, RV is not sufficiently aligned, and 2040 // we cannot force it to be sufficiently aligned. 2041 llvm::Value *Addr = RV.getAggregateAddr(); 2042 unsigned Align = ArgInfo.getIndirectAlign(); 2043 const llvm::DataLayout *TD = &CGM.getDataLayout(); 2044 if ((!ArgInfo.getIndirectByVal() && I->NeedsCopy) || 2045 (ArgInfo.getIndirectByVal() && TypeAlign < Align && 2046 llvm::getOrEnforceKnownAlignment(Addr, Align, TD) < Align)) { 2047 // Create an aligned temporary, and copy to it. 2048 llvm::AllocaInst *AI = CreateMemTemp(I->Ty); 2049 if (Align > AI->getAlignment()) 2050 AI->setAlignment(Align); 2051 Args.push_back(AI); 2052 EmitAggregateCopy(AI, Addr, I->Ty, RV.isVolatileQualified()); 2053 2054 // Validate argument match. 2055 checkArgMatches(AI, IRArgNo, IRFuncTy); 2056 } else { 2057 // Skip the extra memcpy call. 2058 Args.push_back(Addr); 2059 2060 // Validate argument match. 2061 checkArgMatches(Addr, IRArgNo, IRFuncTy); 2062 } 2063 } 2064 break; 2065 } 2066 2067 case ABIArgInfo::Ignore: 2068 break; 2069 2070 case ABIArgInfo::Extend: 2071 case ABIArgInfo::Direct: { 2072 if (!isa<llvm::StructType>(ArgInfo.getCoerceToType()) && 2073 ArgInfo.getCoerceToType() == ConvertType(info_it->type) && 2074 ArgInfo.getDirectOffset() == 0) { 2075 llvm::Value *V; 2076 if (RV.isScalar()) 2077 V = RV.getScalarVal(); 2078 else 2079 V = Builder.CreateLoad(RV.getAggregateAddr()); 2080 2081 // If the argument doesn't match, perform a bitcast to coerce it. This 2082 // can happen due to trivial type mismatches. 2083 if (IRArgNo < IRFuncTy->getNumParams() && 2084 V->getType() != IRFuncTy->getParamType(IRArgNo)) 2085 V = Builder.CreateBitCast(V, IRFuncTy->getParamType(IRArgNo)); 2086 Args.push_back(V); 2087 2088 checkArgMatches(V, IRArgNo, IRFuncTy); 2089 break; 2090 } 2091 2092 // FIXME: Avoid the conversion through memory if possible. 2093 llvm::Value *SrcPtr; 2094 if (RV.isScalar()) { 2095 SrcPtr = CreateMemTemp(I->Ty, "coerce"); 2096 EmitStoreOfScalar(RV.getScalarVal(), SrcPtr, false, TypeAlign, I->Ty); 2097 } else if (RV.isComplex()) { 2098 SrcPtr = CreateMemTemp(I->Ty, "coerce"); 2099 StoreComplexToAddr(RV.getComplexVal(), SrcPtr, false); 2100 } else 2101 SrcPtr = RV.getAggregateAddr(); 2102 2103 // If the value is offset in memory, apply the offset now. 2104 if (unsigned Offs = ArgInfo.getDirectOffset()) { 2105 SrcPtr = Builder.CreateBitCast(SrcPtr, Builder.getInt8PtrTy()); 2106 SrcPtr = Builder.CreateConstGEP1_32(SrcPtr, Offs); 2107 SrcPtr = Builder.CreateBitCast(SrcPtr, 2108 llvm::PointerType::getUnqual(ArgInfo.getCoerceToType())); 2109 2110 } 2111 2112 // If the coerce-to type is a first class aggregate, we flatten it and 2113 // pass the elements. Either way is semantically identical, but fast-isel 2114 // and the optimizer generally likes scalar values better than FCAs. 2115 if (llvm::StructType *STy = 2116 dyn_cast<llvm::StructType>(ArgInfo.getCoerceToType())) { 2117 llvm::Type *SrcTy = 2118 cast<llvm::PointerType>(SrcPtr->getType())->getElementType(); 2119 uint64_t SrcSize = CGM.getDataLayout().getTypeAllocSize(SrcTy); 2120 uint64_t DstSize = CGM.getDataLayout().getTypeAllocSize(STy); 2121 2122 // If the source type is smaller than the destination type of the 2123 // coerce-to logic, copy the source value into a temp alloca the size 2124 // of the destination type to allow loading all of it. The bits past 2125 // the source value are left undef. 2126 if (SrcSize < DstSize) { 2127 llvm::AllocaInst *TempAlloca 2128 = CreateTempAlloca(STy, SrcPtr->getName() + ".coerce"); 2129 Builder.CreateMemCpy(TempAlloca, SrcPtr, SrcSize, 0); 2130 SrcPtr = TempAlloca; 2131 } else { 2132 SrcPtr = Builder.CreateBitCast(SrcPtr, 2133 llvm::PointerType::getUnqual(STy)); 2134 } 2135 2136 for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) { 2137 llvm::Value *EltPtr = Builder.CreateConstGEP2_32(SrcPtr, 0, i); 2138 llvm::LoadInst *LI = Builder.CreateLoad(EltPtr); 2139 // We don't know what we're loading from. 2140 LI->setAlignment(1); 2141 Args.push_back(LI); 2142 2143 // Validate argument match. 2144 checkArgMatches(LI, IRArgNo, IRFuncTy); 2145 } 2146 } else { 2147 // In the simple case, just pass the coerced loaded value. 2148 Args.push_back(CreateCoercedLoad(SrcPtr, ArgInfo.getCoerceToType(), 2149 *this)); 2150 2151 // Validate argument match. 2152 checkArgMatches(Args.back(), IRArgNo, IRFuncTy); 2153 } 2154 2155 break; 2156 } 2157 2158 case ABIArgInfo::Expand: 2159 ExpandTypeToArgs(I->Ty, RV, Args, IRFuncTy); 2160 IRArgNo = Args.size(); 2161 break; 2162 } 2163 } 2164 2165 // If the callee is a bitcast of a function to a varargs pointer to function 2166 // type, check to see if we can remove the bitcast. This handles some cases 2167 // with unprototyped functions. 2168 if (llvm::ConstantExpr *CE = dyn_cast<llvm::ConstantExpr>(Callee)) 2169 if (llvm::Function *CalleeF = dyn_cast<llvm::Function>(CE->getOperand(0))) { 2170 llvm::PointerType *CurPT=cast<llvm::PointerType>(Callee->getType()); 2171 llvm::FunctionType *CurFT = 2172 cast<llvm::FunctionType>(CurPT->getElementType()); 2173 llvm::FunctionType *ActualFT = CalleeF->getFunctionType(); 2174 2175 if (CE->getOpcode() == llvm::Instruction::BitCast && 2176 ActualFT->getReturnType() == CurFT->getReturnType() && 2177 ActualFT->getNumParams() == CurFT->getNumParams() && 2178 ActualFT->getNumParams() == Args.size() && 2179 (CurFT->isVarArg() || !ActualFT->isVarArg())) { 2180 bool ArgsMatch = true; 2181 for (unsigned i = 0, e = ActualFT->getNumParams(); i != e; ++i) 2182 if (ActualFT->getParamType(i) != CurFT->getParamType(i)) { 2183 ArgsMatch = false; 2184 break; 2185 } 2186 2187 // Strip the cast if we can get away with it. This is a nice cleanup, 2188 // but also allows us to inline the function at -O0 if it is marked 2189 // always_inline. 2190 if (ArgsMatch) 2191 Callee = CalleeF; 2192 } 2193 } 2194 2195 unsigned CallingConv; 2196 CodeGen::AttributeListType AttributeList; 2197 CGM.ConstructAttributeList(CallInfo, TargetDecl, AttributeList, CallingConv); 2198 llvm::AttrListPtr Attrs = llvm::AttrListPtr::get(getLLVMContext(), 2199 AttributeList); 2200 2201 llvm::BasicBlock *InvokeDest = 0; 2202 if (!Attrs.getFnAttributes().hasAttribute(llvm::Attributes::NoUnwind)) 2203 InvokeDest = getInvokeDest(); 2204 2205 llvm::CallSite CS; 2206 if (!InvokeDest) { 2207 CS = Builder.CreateCall(Callee, Args); 2208 } else { 2209 llvm::BasicBlock *Cont = createBasicBlock("invoke.cont"); 2210 CS = Builder.CreateInvoke(Callee, Cont, InvokeDest, Args); 2211 EmitBlock(Cont); 2212 } 2213 if (callOrInvoke) 2214 *callOrInvoke = CS.getInstruction(); 2215 2216 CS.setAttributes(Attrs); 2217 CS.setCallingConv(static_cast<llvm::CallingConv::ID>(CallingConv)); 2218 2219 // In ObjC ARC mode with no ObjC ARC exception safety, tell the ARC 2220 // optimizer it can aggressively ignore unwind edges. 2221 if (CGM.getLangOpts().ObjCAutoRefCount) 2222 AddObjCARCExceptionMetadata(CS.getInstruction()); 2223 2224 // If the call doesn't return, finish the basic block and clear the 2225 // insertion point; this allows the rest of IRgen to discard 2226 // unreachable code. 2227 if (CS.doesNotReturn()) { 2228 Builder.CreateUnreachable(); 2229 Builder.ClearInsertionPoint(); 2230 2231 // FIXME: For now, emit a dummy basic block because expr emitters in 2232 // generally are not ready to handle emitting expressions at unreachable 2233 // points. 2234 EnsureInsertPoint(); 2235 2236 // Return a reasonable RValue. 2237 return GetUndefRValue(RetTy); 2238 } 2239 2240 llvm::Instruction *CI = CS.getInstruction(); 2241 if (Builder.isNamePreserving() && !CI->getType()->isVoidTy()) 2242 CI->setName("call"); 2243 2244 // Emit any writebacks immediately. Arguably this should happen 2245 // after any return-value munging. 2246 if (CallArgs.hasWritebacks()) 2247 emitWritebacks(*this, CallArgs); 2248 2249 switch (RetAI.getKind()) { 2250 case ABIArgInfo::Indirect: { 2251 unsigned Alignment = getContext().getTypeAlignInChars(RetTy).getQuantity(); 2252 if (RetTy->isAnyComplexType()) 2253 return RValue::getComplex(LoadComplexFromAddr(Args[0], false)); 2254 if (CodeGenFunction::hasAggregateLLVMType(RetTy)) 2255 return RValue::getAggregate(Args[0]); 2256 return RValue::get(EmitLoadOfScalar(Args[0], false, Alignment, RetTy)); 2257 } 2258 2259 case ABIArgInfo::Ignore: 2260 // If we are ignoring an argument that had a result, make sure to 2261 // construct the appropriate return value for our caller. 2262 return GetUndefRValue(RetTy); 2263 2264 case ABIArgInfo::Extend: 2265 case ABIArgInfo::Direct: { 2266 llvm::Type *RetIRTy = ConvertType(RetTy); 2267 if (RetAI.getCoerceToType() == RetIRTy && RetAI.getDirectOffset() == 0) { 2268 if (RetTy->isAnyComplexType()) { 2269 llvm::Value *Real = Builder.CreateExtractValue(CI, 0); 2270 llvm::Value *Imag = Builder.CreateExtractValue(CI, 1); 2271 return RValue::getComplex(std::make_pair(Real, Imag)); 2272 } 2273 if (CodeGenFunction::hasAggregateLLVMType(RetTy)) { 2274 llvm::Value *DestPtr = ReturnValue.getValue(); 2275 bool DestIsVolatile = ReturnValue.isVolatile(); 2276 2277 if (!DestPtr) { 2278 DestPtr = CreateMemTemp(RetTy, "agg.tmp"); 2279 DestIsVolatile = false; 2280 } 2281 BuildAggStore(*this, CI, DestPtr, DestIsVolatile, false); 2282 return RValue::getAggregate(DestPtr); 2283 } 2284 2285 // If the argument doesn't match, perform a bitcast to coerce it. This 2286 // can happen due to trivial type mismatches. 2287 llvm::Value *V = CI; 2288 if (V->getType() != RetIRTy) 2289 V = Builder.CreateBitCast(V, RetIRTy); 2290 return RValue::get(V); 2291 } 2292 2293 llvm::Value *DestPtr = ReturnValue.getValue(); 2294 bool DestIsVolatile = ReturnValue.isVolatile(); 2295 2296 if (!DestPtr) { 2297 DestPtr = CreateMemTemp(RetTy, "coerce"); 2298 DestIsVolatile = false; 2299 } 2300 2301 // If the value is offset in memory, apply the offset now. 2302 llvm::Value *StorePtr = DestPtr; 2303 if (unsigned Offs = RetAI.getDirectOffset()) { 2304 StorePtr = Builder.CreateBitCast(StorePtr, Builder.getInt8PtrTy()); 2305 StorePtr = Builder.CreateConstGEP1_32(StorePtr, Offs); 2306 StorePtr = Builder.CreateBitCast(StorePtr, 2307 llvm::PointerType::getUnqual(RetAI.getCoerceToType())); 2308 } 2309 CreateCoercedStore(CI, StorePtr, DestIsVolatile, *this); 2310 2311 unsigned Alignment = getContext().getTypeAlignInChars(RetTy).getQuantity(); 2312 if (RetTy->isAnyComplexType()) 2313 return RValue::getComplex(LoadComplexFromAddr(DestPtr, false)); 2314 if (CodeGenFunction::hasAggregateLLVMType(RetTy)) 2315 return RValue::getAggregate(DestPtr); 2316 return RValue::get(EmitLoadOfScalar(DestPtr, false, Alignment, RetTy)); 2317 } 2318 2319 case ABIArgInfo::Expand: 2320 llvm_unreachable("Invalid ABI kind for return argument"); 2321 } 2322 2323 llvm_unreachable("Unhandled ABIArgInfo::Kind"); 2324 } 2325 2326 /* VarArg handling */ 2327 2328 llvm::Value *CodeGenFunction::EmitVAArg(llvm::Value *VAListAddr, QualType Ty) { 2329 return CGM.getTypes().getABIInfo().EmitVAArg(VAListAddr, Ty, *this); 2330 } 2331