1 //===--- CodeGenFunction.cpp - Emit LLVM Code from ASTs for a Function ----===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // This coordinates the per-function state used while generating code. 11 // 12 //===----------------------------------------------------------------------===// 13 14 #include "CodeGenFunction.h" 15 #include "CGCUDARuntime.h" 16 #include "CGCXXABI.h" 17 #include "CGDebugInfo.h" 18 #include "CodeGenModule.h" 19 #include "clang/AST/ASTContext.h" 20 #include "clang/AST/Decl.h" 21 #include "clang/AST/DeclCXX.h" 22 #include "clang/AST/StmtCXX.h" 23 #include "clang/Basic/OpenCL.h" 24 #include "clang/Basic/TargetInfo.h" 25 #include "clang/Frontend/CodeGenOptions.h" 26 #include "llvm/IR/DataLayout.h" 27 #include "llvm/IR/Intrinsics.h" 28 #include "llvm/IR/MDBuilder.h" 29 #include "llvm/IR/Operator.h" 30 using namespace clang; 31 using namespace CodeGen; 32 33 CodeGenFunction::CodeGenFunction(CodeGenModule &cgm, bool suppressNewContext) 34 : CodeGenTypeCache(cgm), CGM(cgm), Target(cgm.getTarget()), 35 Builder(cgm.getModule().getContext()), 36 CapturedStmtInfo(0), 37 SanitizePerformTypeCheck(CGM.getSanOpts().Null | 38 CGM.getSanOpts().Alignment | 39 CGM.getSanOpts().ObjectSize | 40 CGM.getSanOpts().Vptr), 41 SanOpts(&CGM.getSanOpts()), 42 AutoreleaseResult(false), BlockInfo(0), BlockPointer(0), 43 LambdaThisCaptureField(0), NormalCleanupDest(0), NextCleanupDestIndex(1), 44 FirstBlockInfo(0), EHResumeBlock(0), ExceptionSlot(0), EHSelectorSlot(0), 45 DebugInfo(0), DisableDebugInfo(false), CalleeWithThisReturn(0), 46 DidCallStackSave(false), 47 IndirectBranch(0), SwitchInsn(0), CaseRangeBlock(0), UnreachableBlock(0), 48 NumReturnExprs(0), NumSimpleReturnExprs(0), 49 CXXABIThisDecl(0), CXXABIThisValue(0), CXXThisValue(0), 50 CXXDefaultInitExprThis(0), 51 CXXStructorImplicitParamDecl(0), CXXStructorImplicitParamValue(0), 52 OutermostConditional(0), CurLexicalScope(0), TerminateLandingPad(0), 53 TerminateHandler(0), TrapBB(0) { 54 if (!suppressNewContext) 55 CGM.getCXXABI().getMangleContext().startNewFunction(); 56 57 llvm::FastMathFlags FMF; 58 if (CGM.getLangOpts().FastMath) 59 FMF.setUnsafeAlgebra(); 60 if (CGM.getLangOpts().FiniteMathOnly) { 61 FMF.setNoNaNs(); 62 FMF.setNoInfs(); 63 } 64 Builder.SetFastMathFlags(FMF); 65 } 66 67 CodeGenFunction::~CodeGenFunction() { 68 // If there are any unclaimed block infos, go ahead and destroy them 69 // now. This can happen if IR-gen gets clever and skips evaluating 70 // something. 71 if (FirstBlockInfo) 72 destroyBlockInfos(FirstBlockInfo); 73 } 74 75 76 llvm::Type *CodeGenFunction::ConvertTypeForMem(QualType T) { 77 return CGM.getTypes().ConvertTypeForMem(T); 78 } 79 80 llvm::Type *CodeGenFunction::ConvertType(QualType T) { 81 return CGM.getTypes().ConvertType(T); 82 } 83 84 TypeEvaluationKind CodeGenFunction::getEvaluationKind(QualType type) { 85 type = type.getCanonicalType(); 86 while (true) { 87 switch (type->getTypeClass()) { 88 #define TYPE(name, parent) 89 #define ABSTRACT_TYPE(name, parent) 90 #define NON_CANONICAL_TYPE(name, parent) case Type::name: 91 #define DEPENDENT_TYPE(name, parent) case Type::name: 92 #define NON_CANONICAL_UNLESS_DEPENDENT_TYPE(name, parent) case Type::name: 93 #include "clang/AST/TypeNodes.def" 94 llvm_unreachable("non-canonical or dependent type in IR-generation"); 95 96 case Type::Auto: 97 llvm_unreachable("undeduced auto type in IR-generation"); 98 99 // Various scalar types. 100 case Type::Builtin: 101 case Type::Pointer: 102 case Type::BlockPointer: 103 case Type::LValueReference: 104 case Type::RValueReference: 105 case Type::MemberPointer: 106 case Type::Vector: 107 case Type::ExtVector: 108 case Type::FunctionProto: 109 case Type::FunctionNoProto: 110 case Type::Enum: 111 case Type::ObjCObjectPointer: 112 return TEK_Scalar; 113 114 // Complexes. 115 case Type::Complex: 116 return TEK_Complex; 117 118 // Arrays, records, and Objective-C objects. 119 case Type::ConstantArray: 120 case Type::IncompleteArray: 121 case Type::VariableArray: 122 case Type::Record: 123 case Type::ObjCObject: 124 case Type::ObjCInterface: 125 return TEK_Aggregate; 126 127 // We operate on atomic values according to their underlying type. 128 case Type::Atomic: 129 type = cast<AtomicType>(type)->getValueType(); 130 continue; 131 } 132 llvm_unreachable("unknown type kind!"); 133 } 134 } 135 136 void CodeGenFunction::EmitReturnBlock() { 137 // For cleanliness, we try to avoid emitting the return block for 138 // simple cases. 139 llvm::BasicBlock *CurBB = Builder.GetInsertBlock(); 140 141 if (CurBB) { 142 assert(!CurBB->getTerminator() && "Unexpected terminated block."); 143 144 // We have a valid insert point, reuse it if it is empty or there are no 145 // explicit jumps to the return block. 146 if (CurBB->empty() || ReturnBlock.getBlock()->use_empty()) { 147 ReturnBlock.getBlock()->replaceAllUsesWith(CurBB); 148 delete ReturnBlock.getBlock(); 149 } else 150 EmitBlock(ReturnBlock.getBlock()); 151 return; 152 } 153 154 // Otherwise, if the return block is the target of a single direct 155 // branch then we can just put the code in that block instead. This 156 // cleans up functions which started with a unified return block. 157 if (ReturnBlock.getBlock()->hasOneUse()) { 158 llvm::BranchInst *BI = 159 dyn_cast<llvm::BranchInst>(*ReturnBlock.getBlock()->use_begin()); 160 if (BI && BI->isUnconditional() && 161 BI->getSuccessor(0) == ReturnBlock.getBlock()) { 162 // Reset insertion point, including debug location, and delete the 163 // branch. This is really subtle and only works because the next change 164 // in location will hit the caching in CGDebugInfo::EmitLocation and not 165 // override this. 166 Builder.SetCurrentDebugLocation(BI->getDebugLoc()); 167 Builder.SetInsertPoint(BI->getParent()); 168 BI->eraseFromParent(); 169 delete ReturnBlock.getBlock(); 170 return; 171 } 172 } 173 174 // FIXME: We are at an unreachable point, there is no reason to emit the block 175 // unless it has uses. However, we still need a place to put the debug 176 // region.end for now. 177 178 EmitBlock(ReturnBlock.getBlock()); 179 } 180 181 static void EmitIfUsed(CodeGenFunction &CGF, llvm::BasicBlock *BB) { 182 if (!BB) return; 183 if (!BB->use_empty()) 184 return CGF.CurFn->getBasicBlockList().push_back(BB); 185 delete BB; 186 } 187 188 void CodeGenFunction::FinishFunction(SourceLocation EndLoc) { 189 assert(BreakContinueStack.empty() && 190 "mismatched push/pop in break/continue stack!"); 191 192 bool OnlySimpleReturnStmts = NumSimpleReturnExprs > 0 193 && NumSimpleReturnExprs == NumReturnExprs; 194 // If the function contains only a simple return statement, the 195 // cleanup code may become the first breakpoint in the function. To 196 // be safe, set the debug location for it to the location of the 197 // return statement. Otherwise point it to end of the function's 198 // lexical scope. 199 if (CGDebugInfo *DI = getDebugInfo()) { 200 if (OnlySimpleReturnStmts) 201 DI->EmitLocation(Builder, LastStopPoint); 202 else 203 DI->EmitLocation(Builder, EndLoc); 204 } 205 206 // Pop any cleanups that might have been associated with the 207 // parameters. Do this in whatever block we're currently in; it's 208 // important to do this before we enter the return block or return 209 // edges will be *really* confused. 210 bool EmitRetDbgLoc = true; 211 if (EHStack.stable_begin() != PrologueCleanupDepth) { 212 PopCleanupBlocks(PrologueCleanupDepth); 213 214 // Make sure the line table doesn't jump back into the body for 215 // the ret after it's been at EndLoc. 216 EmitRetDbgLoc = false; 217 218 if (CGDebugInfo *DI = getDebugInfo()) 219 if (OnlySimpleReturnStmts) 220 DI->EmitLocation(Builder, EndLoc); 221 } 222 223 // Emit function epilog (to return). 224 EmitReturnBlock(); 225 226 if (ShouldInstrumentFunction()) 227 EmitFunctionInstrumentation("__cyg_profile_func_exit"); 228 229 // Emit debug descriptor for function end. 230 if (CGDebugInfo *DI = getDebugInfo()) { 231 DI->EmitFunctionEnd(Builder); 232 } 233 234 EmitFunctionEpilog(*CurFnInfo, EmitRetDbgLoc); 235 EmitEndEHSpec(CurCodeDecl); 236 237 assert(EHStack.empty() && 238 "did not remove all scopes from cleanup stack!"); 239 240 // If someone did an indirect goto, emit the indirect goto block at the end of 241 // the function. 242 if (IndirectBranch) { 243 EmitBlock(IndirectBranch->getParent()); 244 Builder.ClearInsertionPoint(); 245 } 246 247 // Remove the AllocaInsertPt instruction, which is just a convenience for us. 248 llvm::Instruction *Ptr = AllocaInsertPt; 249 AllocaInsertPt = 0; 250 Ptr->eraseFromParent(); 251 252 // If someone took the address of a label but never did an indirect goto, we 253 // made a zero entry PHI node, which is illegal, zap it now. 254 if (IndirectBranch) { 255 llvm::PHINode *PN = cast<llvm::PHINode>(IndirectBranch->getAddress()); 256 if (PN->getNumIncomingValues() == 0) { 257 PN->replaceAllUsesWith(llvm::UndefValue::get(PN->getType())); 258 PN->eraseFromParent(); 259 } 260 } 261 262 EmitIfUsed(*this, EHResumeBlock); 263 EmitIfUsed(*this, TerminateLandingPad); 264 EmitIfUsed(*this, TerminateHandler); 265 EmitIfUsed(*this, UnreachableBlock); 266 267 if (CGM.getCodeGenOpts().EmitDeclMetadata) 268 EmitDeclMetadata(); 269 } 270 271 /// ShouldInstrumentFunction - Return true if the current function should be 272 /// instrumented with __cyg_profile_func_* calls 273 bool CodeGenFunction::ShouldInstrumentFunction() { 274 if (!CGM.getCodeGenOpts().InstrumentFunctions) 275 return false; 276 if (!CurFuncDecl || CurFuncDecl->hasAttr<NoInstrumentFunctionAttr>()) 277 return false; 278 return true; 279 } 280 281 /// EmitFunctionInstrumentation - Emit LLVM code to call the specified 282 /// instrumentation function with the current function and the call site, if 283 /// function instrumentation is enabled. 284 void CodeGenFunction::EmitFunctionInstrumentation(const char *Fn) { 285 // void __cyg_profile_func_{enter,exit} (void *this_fn, void *call_site); 286 llvm::PointerType *PointerTy = Int8PtrTy; 287 llvm::Type *ProfileFuncArgs[] = { PointerTy, PointerTy }; 288 llvm::FunctionType *FunctionTy = 289 llvm::FunctionType::get(VoidTy, ProfileFuncArgs, false); 290 291 llvm::Constant *F = CGM.CreateRuntimeFunction(FunctionTy, Fn); 292 llvm::CallInst *CallSite = Builder.CreateCall( 293 CGM.getIntrinsic(llvm::Intrinsic::returnaddress), 294 llvm::ConstantInt::get(Int32Ty, 0), 295 "callsite"); 296 297 llvm::Value *args[] = { 298 llvm::ConstantExpr::getBitCast(CurFn, PointerTy), 299 CallSite 300 }; 301 302 EmitNounwindRuntimeCall(F, args); 303 } 304 305 void CodeGenFunction::EmitMCountInstrumentation() { 306 llvm::FunctionType *FTy = llvm::FunctionType::get(VoidTy, false); 307 308 llvm::Constant *MCountFn = 309 CGM.CreateRuntimeFunction(FTy, getTarget().getMCountName()); 310 EmitNounwindRuntimeCall(MCountFn); 311 } 312 313 // OpenCL v1.2 s5.6.4.6 allows the compiler to store kernel argument 314 // information in the program executable. The argument information stored 315 // includes the argument name, its type, the address and access qualifiers used. 316 static void GenOpenCLArgMetadata(const FunctionDecl *FD, llvm::Function *Fn, 317 CodeGenModule &CGM,llvm::LLVMContext &Context, 318 SmallVector <llvm::Value*, 5> &kernelMDArgs, 319 CGBuilderTy& Builder, ASTContext &ASTCtx) { 320 // Create MDNodes that represent the kernel arg metadata. 321 // Each MDNode is a list in the form of "key", N number of values which is 322 // the same number of values as their are kernel arguments. 323 324 // MDNode for the kernel argument address space qualifiers. 325 SmallVector<llvm::Value*, 8> addressQuals; 326 addressQuals.push_back(llvm::MDString::get(Context, "kernel_arg_addr_space")); 327 328 // MDNode for the kernel argument access qualifiers (images only). 329 SmallVector<llvm::Value*, 8> accessQuals; 330 accessQuals.push_back(llvm::MDString::get(Context, "kernel_arg_access_qual")); 331 332 // MDNode for the kernel argument type names. 333 SmallVector<llvm::Value*, 8> argTypeNames; 334 argTypeNames.push_back(llvm::MDString::get(Context, "kernel_arg_type")); 335 336 // MDNode for the kernel argument type qualifiers. 337 SmallVector<llvm::Value*, 8> argTypeQuals; 338 argTypeQuals.push_back(llvm::MDString::get(Context, "kernel_arg_type_qual")); 339 340 // MDNode for the kernel argument names. 341 SmallVector<llvm::Value*, 8> argNames; 342 argNames.push_back(llvm::MDString::get(Context, "kernel_arg_name")); 343 344 for (unsigned i = 0, e = FD->getNumParams(); i != e; ++i) { 345 const ParmVarDecl *parm = FD->getParamDecl(i); 346 QualType ty = parm->getType(); 347 std::string typeQuals; 348 349 if (ty->isPointerType()) { 350 QualType pointeeTy = ty->getPointeeType(); 351 352 // Get address qualifier. 353 addressQuals.push_back(Builder.getInt32(ASTCtx.getTargetAddressSpace( 354 pointeeTy.getAddressSpace()))); 355 356 // Get argument type name. 357 std::string typeName = pointeeTy.getUnqualifiedType().getAsString() + "*"; 358 359 // Turn "unsigned type" to "utype" 360 std::string::size_type pos = typeName.find("unsigned"); 361 if (pos != std::string::npos) 362 typeName.erase(pos+1, 8); 363 364 argTypeNames.push_back(llvm::MDString::get(Context, typeName)); 365 366 // Get argument type qualifiers: 367 if (ty.isRestrictQualified()) 368 typeQuals = "restrict"; 369 if (pointeeTy.isConstQualified() || 370 (pointeeTy.getAddressSpace() == LangAS::opencl_constant)) 371 typeQuals += typeQuals.empty() ? "const" : " const"; 372 if (pointeeTy.isVolatileQualified()) 373 typeQuals += typeQuals.empty() ? "volatile" : " volatile"; 374 } else { 375 addressQuals.push_back(Builder.getInt32(0)); 376 377 // Get argument type name. 378 std::string typeName = ty.getUnqualifiedType().getAsString(); 379 380 // Turn "unsigned type" to "utype" 381 std::string::size_type pos = typeName.find("unsigned"); 382 if (pos != std::string::npos) 383 typeName.erase(pos+1, 8); 384 385 argTypeNames.push_back(llvm::MDString::get(Context, typeName)); 386 387 // Get argument type qualifiers: 388 if (ty.isConstQualified()) 389 typeQuals = "const"; 390 if (ty.isVolatileQualified()) 391 typeQuals += typeQuals.empty() ? "volatile" : " volatile"; 392 } 393 394 argTypeQuals.push_back(llvm::MDString::get(Context, typeQuals)); 395 396 // Get image access qualifier: 397 if (ty->isImageType()) { 398 if (parm->hasAttr<OpenCLImageAccessAttr>() && 399 parm->getAttr<OpenCLImageAccessAttr>()->getAccess() == CLIA_write_only) 400 accessQuals.push_back(llvm::MDString::get(Context, "write_only")); 401 else 402 accessQuals.push_back(llvm::MDString::get(Context, "read_only")); 403 } else 404 accessQuals.push_back(llvm::MDString::get(Context, "none")); 405 406 // Get argument name. 407 argNames.push_back(llvm::MDString::get(Context, parm->getName())); 408 } 409 410 kernelMDArgs.push_back(llvm::MDNode::get(Context, addressQuals)); 411 kernelMDArgs.push_back(llvm::MDNode::get(Context, accessQuals)); 412 kernelMDArgs.push_back(llvm::MDNode::get(Context, argTypeNames)); 413 kernelMDArgs.push_back(llvm::MDNode::get(Context, argTypeQuals)); 414 kernelMDArgs.push_back(llvm::MDNode::get(Context, argNames)); 415 } 416 417 void CodeGenFunction::EmitOpenCLKernelMetadata(const FunctionDecl *FD, 418 llvm::Function *Fn) 419 { 420 if (!FD->hasAttr<OpenCLKernelAttr>()) 421 return; 422 423 llvm::LLVMContext &Context = getLLVMContext(); 424 425 SmallVector <llvm::Value*, 5> kernelMDArgs; 426 kernelMDArgs.push_back(Fn); 427 428 if (CGM.getCodeGenOpts().EmitOpenCLArgMetadata) 429 GenOpenCLArgMetadata(FD, Fn, CGM, Context, kernelMDArgs, 430 Builder, getContext()); 431 432 if (FD->hasAttr<VecTypeHintAttr>()) { 433 VecTypeHintAttr *attr = FD->getAttr<VecTypeHintAttr>(); 434 QualType hintQTy = attr->getTypeHint(); 435 const ExtVectorType *hintEltQTy = hintQTy->getAs<ExtVectorType>(); 436 bool isSignedInteger = 437 hintQTy->isSignedIntegerType() || 438 (hintEltQTy && hintEltQTy->getElementType()->isSignedIntegerType()); 439 llvm::Value *attrMDArgs[] = { 440 llvm::MDString::get(Context, "vec_type_hint"), 441 llvm::UndefValue::get(CGM.getTypes().ConvertType(attr->getTypeHint())), 442 llvm::ConstantInt::get( 443 llvm::IntegerType::get(Context, 32), 444 llvm::APInt(32, (uint64_t)(isSignedInteger ? 1 : 0))) 445 }; 446 kernelMDArgs.push_back(llvm::MDNode::get(Context, attrMDArgs)); 447 } 448 449 if (FD->hasAttr<WorkGroupSizeHintAttr>()) { 450 WorkGroupSizeHintAttr *attr = FD->getAttr<WorkGroupSizeHintAttr>(); 451 llvm::Value *attrMDArgs[] = { 452 llvm::MDString::get(Context, "work_group_size_hint"), 453 Builder.getInt32(attr->getXDim()), 454 Builder.getInt32(attr->getYDim()), 455 Builder.getInt32(attr->getZDim()) 456 }; 457 kernelMDArgs.push_back(llvm::MDNode::get(Context, attrMDArgs)); 458 } 459 460 if (FD->hasAttr<ReqdWorkGroupSizeAttr>()) { 461 ReqdWorkGroupSizeAttr *attr = FD->getAttr<ReqdWorkGroupSizeAttr>(); 462 llvm::Value *attrMDArgs[] = { 463 llvm::MDString::get(Context, "reqd_work_group_size"), 464 Builder.getInt32(attr->getXDim()), 465 Builder.getInt32(attr->getYDim()), 466 Builder.getInt32(attr->getZDim()) 467 }; 468 kernelMDArgs.push_back(llvm::MDNode::get(Context, attrMDArgs)); 469 } 470 471 llvm::MDNode *kernelMDNode = llvm::MDNode::get(Context, kernelMDArgs); 472 llvm::NamedMDNode *OpenCLKernelMetadata = 473 CGM.getModule().getOrInsertNamedMetadata("opencl.kernels"); 474 OpenCLKernelMetadata->addOperand(kernelMDNode); 475 } 476 477 void CodeGenFunction::StartFunction(GlobalDecl GD, 478 QualType RetTy, 479 llvm::Function *Fn, 480 const CGFunctionInfo &FnInfo, 481 const FunctionArgList &Args, 482 SourceLocation StartLoc) { 483 const Decl *D = GD.getDecl(); 484 485 DidCallStackSave = false; 486 CurCodeDecl = D; 487 CurFuncDecl = (D ? D->getNonClosureContext() : 0); 488 FnRetTy = RetTy; 489 CurFn = Fn; 490 CurFnInfo = &FnInfo; 491 assert(CurFn->isDeclaration() && "Function already has body?"); 492 493 if (CGM.getSanitizerBlacklist().isIn(*Fn)) { 494 SanOpts = &SanitizerOptions::Disabled; 495 SanitizePerformTypeCheck = false; 496 } 497 498 // Pass inline keyword to optimizer if it appears explicitly on any 499 // declaration. 500 if (!CGM.getCodeGenOpts().NoInline) 501 if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D)) 502 for (FunctionDecl::redecl_iterator RI = FD->redecls_begin(), 503 RE = FD->redecls_end(); RI != RE; ++RI) 504 if (RI->isInlineSpecified()) { 505 Fn->addFnAttr(llvm::Attribute::InlineHint); 506 break; 507 } 508 509 if (getLangOpts().OpenCL) { 510 // Add metadata for a kernel function. 511 if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D)) 512 EmitOpenCLKernelMetadata(FD, Fn); 513 } 514 515 llvm::BasicBlock *EntryBB = createBasicBlock("entry", CurFn); 516 517 // Create a marker to make it easy to insert allocas into the entryblock 518 // later. Don't create this with the builder, because we don't want it 519 // folded. 520 llvm::Value *Undef = llvm::UndefValue::get(Int32Ty); 521 AllocaInsertPt = new llvm::BitCastInst(Undef, Int32Ty, "", EntryBB); 522 if (Builder.isNamePreserving()) 523 AllocaInsertPt->setName("allocapt"); 524 525 ReturnBlock = getJumpDestInCurrentScope("return"); 526 527 Builder.SetInsertPoint(EntryBB); 528 529 // Emit subprogram debug descriptor. 530 if (CGDebugInfo *DI = getDebugInfo()) { 531 SmallVector<QualType, 16> ArgTypes; 532 for (FunctionArgList::const_iterator i = Args.begin(), e = Args.end(); 533 i != e; ++i) { 534 ArgTypes.push_back((*i)->getType()); 535 } 536 537 QualType FnType = 538 getContext().getFunctionType(RetTy, ArgTypes, 539 FunctionProtoType::ExtProtoInfo()); 540 541 DI->setLocation(StartLoc); 542 DI->EmitFunctionStart(GD, FnType, CurFn, Builder); 543 } 544 545 if (ShouldInstrumentFunction()) 546 EmitFunctionInstrumentation("__cyg_profile_func_enter"); 547 548 if (CGM.getCodeGenOpts().InstrumentForProfiling) 549 EmitMCountInstrumentation(); 550 551 if (RetTy->isVoidType()) { 552 // Void type; nothing to return. 553 ReturnValue = 0; 554 } else if (CurFnInfo->getReturnInfo().getKind() == ABIArgInfo::Indirect && 555 !hasScalarEvaluationKind(CurFnInfo->getReturnType())) { 556 // Indirect aggregate return; emit returned value directly into sret slot. 557 // This reduces code size, and affects correctness in C++. 558 ReturnValue = CurFn->arg_begin(); 559 } else { 560 ReturnValue = CreateIRTemp(RetTy, "retval"); 561 562 // Tell the epilog emitter to autorelease the result. We do this 563 // now so that various specialized functions can suppress it 564 // during their IR-generation. 565 if (getLangOpts().ObjCAutoRefCount && 566 !CurFnInfo->isReturnsRetained() && 567 RetTy->isObjCRetainableType()) 568 AutoreleaseResult = true; 569 } 570 571 EmitStartEHSpec(CurCodeDecl); 572 573 PrologueCleanupDepth = EHStack.stable_begin(); 574 EmitFunctionProlog(*CurFnInfo, CurFn, Args); 575 576 if (D && isa<CXXMethodDecl>(D) && cast<CXXMethodDecl>(D)->isInstance()) { 577 CGM.getCXXABI().EmitInstanceFunctionProlog(*this); 578 const CXXMethodDecl *MD = cast<CXXMethodDecl>(D); 579 if (MD->getParent()->isLambda() && 580 MD->getOverloadedOperator() == OO_Call) { 581 // We're in a lambda; figure out the captures. 582 MD->getParent()->getCaptureFields(LambdaCaptureFields, 583 LambdaThisCaptureField); 584 if (LambdaThisCaptureField) { 585 // If this lambda captures this, load it. 586 LValue ThisLValue = EmitLValueForLambdaField(LambdaThisCaptureField); 587 CXXThisValue = EmitLoadOfLValue(ThisLValue).getScalarVal(); 588 } 589 } else { 590 // Not in a lambda; just use 'this' from the method. 591 // FIXME: Should we generate a new load for each use of 'this'? The 592 // fast register allocator would be happier... 593 CXXThisValue = CXXABIThisValue; 594 } 595 } 596 597 // If any of the arguments have a variably modified type, make sure to 598 // emit the type size. 599 for (FunctionArgList::const_iterator i = Args.begin(), e = Args.end(); 600 i != e; ++i) { 601 const VarDecl *VD = *i; 602 603 // Dig out the type as written from ParmVarDecls; it's unclear whether 604 // the standard (C99 6.9.1p10) requires this, but we're following the 605 // precedent set by gcc. 606 QualType Ty; 607 if (const ParmVarDecl *PVD = dyn_cast<ParmVarDecl>(VD)) 608 Ty = PVD->getOriginalType(); 609 else 610 Ty = VD->getType(); 611 612 if (Ty->isVariablyModifiedType()) 613 EmitVariablyModifiedType(Ty); 614 } 615 // Emit a location at the end of the prologue. 616 if (CGDebugInfo *DI = getDebugInfo()) 617 DI->EmitLocation(Builder, StartLoc); 618 } 619 620 void CodeGenFunction::EmitFunctionBody(FunctionArgList &Args) { 621 const FunctionDecl *FD = cast<FunctionDecl>(CurGD.getDecl()); 622 assert(FD->getBody()); 623 if (const CompoundStmt *S = dyn_cast<CompoundStmt>(FD->getBody())) 624 EmitCompoundStmtWithoutScope(*S); 625 else 626 EmitStmt(FD->getBody()); 627 } 628 629 /// Tries to mark the given function nounwind based on the 630 /// non-existence of any throwing calls within it. We believe this is 631 /// lightweight enough to do at -O0. 632 static void TryMarkNoThrow(llvm::Function *F) { 633 // LLVM treats 'nounwind' on a function as part of the type, so we 634 // can't do this on functions that can be overwritten. 635 if (F->mayBeOverridden()) return; 636 637 for (llvm::Function::iterator FI = F->begin(), FE = F->end(); FI != FE; ++FI) 638 for (llvm::BasicBlock::iterator 639 BI = FI->begin(), BE = FI->end(); BI != BE; ++BI) 640 if (llvm::CallInst *Call = dyn_cast<llvm::CallInst>(&*BI)) { 641 if (!Call->doesNotThrow()) 642 return; 643 } else if (isa<llvm::ResumeInst>(&*BI)) { 644 return; 645 } 646 F->setDoesNotThrow(); 647 } 648 649 void CodeGenFunction::GenerateCode(GlobalDecl GD, llvm::Function *Fn, 650 const CGFunctionInfo &FnInfo) { 651 const FunctionDecl *FD = cast<FunctionDecl>(GD.getDecl()); 652 653 // Check if we should generate debug info for this function. 654 if (!FD->hasAttr<NoDebugAttr>()) 655 maybeInitializeDebugInfo(); 656 657 FunctionArgList Args; 658 QualType ResTy = FD->getResultType(); 659 660 CurGD = GD; 661 if (isa<CXXMethodDecl>(FD) && cast<CXXMethodDecl>(FD)->isInstance()) 662 CGM.getCXXABI().BuildInstanceFunctionParams(*this, ResTy, Args); 663 664 for (unsigned i = 0, e = FD->getNumParams(); i != e; ++i) 665 Args.push_back(FD->getParamDecl(i)); 666 667 SourceRange BodyRange; 668 if (Stmt *Body = FD->getBody()) BodyRange = Body->getSourceRange(); 669 CurEHLocation = BodyRange.getEnd(); 670 671 // CalleeWithThisReturn keeps track of the last callee inside this function 672 // that returns 'this'. Before starting the function, we set it to null. 673 CalleeWithThisReturn = 0; 674 675 // Emit the standard function prologue. 676 StartFunction(GD, ResTy, Fn, FnInfo, Args, BodyRange.getBegin()); 677 678 // Generate the body of the function. 679 if (isa<CXXDestructorDecl>(FD)) 680 EmitDestructorBody(Args); 681 else if (isa<CXXConstructorDecl>(FD)) 682 EmitConstructorBody(Args); 683 else if (getLangOpts().CUDA && 684 !CGM.getCodeGenOpts().CUDAIsDevice && 685 FD->hasAttr<CUDAGlobalAttr>()) 686 CGM.getCUDARuntime().EmitDeviceStubBody(*this, Args); 687 else if (isa<CXXConversionDecl>(FD) && 688 cast<CXXConversionDecl>(FD)->isLambdaToBlockPointerConversion()) { 689 // The lambda conversion to block pointer is special; the semantics can't be 690 // expressed in the AST, so IRGen needs to special-case it. 691 EmitLambdaToBlockPointerBody(Args); 692 } else if (isa<CXXMethodDecl>(FD) && 693 cast<CXXMethodDecl>(FD)->isLambdaStaticInvoker()) { 694 // The lambda "__invoke" function is special, because it forwards or 695 // clones the body of the function call operator (but is actually static). 696 EmitLambdaStaticInvokeFunction(cast<CXXMethodDecl>(FD)); 697 } else if (FD->isDefaulted() && isa<CXXMethodDecl>(FD) && 698 cast<CXXMethodDecl>(FD)->isCopyAssignmentOperator()) { 699 // Implicit copy-assignment gets the same special treatment as implicit 700 // copy-constructors. 701 emitImplicitAssignmentOperatorBody(Args); 702 } 703 else 704 EmitFunctionBody(Args); 705 706 // C++11 [stmt.return]p2: 707 // Flowing off the end of a function [...] results in undefined behavior in 708 // a value-returning function. 709 // C11 6.9.1p12: 710 // If the '}' that terminates a function is reached, and the value of the 711 // function call is used by the caller, the behavior is undefined. 712 if (getLangOpts().CPlusPlus && !FD->hasImplicitReturnZero() && 713 !FD->getResultType()->isVoidType() && Builder.GetInsertBlock()) { 714 if (SanOpts->Return) 715 EmitCheck(Builder.getFalse(), "missing_return", 716 EmitCheckSourceLocation(FD->getLocation()), 717 ArrayRef<llvm::Value *>(), CRK_Unrecoverable); 718 else if (CGM.getCodeGenOpts().OptimizationLevel == 0) 719 Builder.CreateCall(CGM.getIntrinsic(llvm::Intrinsic::trap)); 720 Builder.CreateUnreachable(); 721 Builder.ClearInsertionPoint(); 722 } 723 724 // Emit the standard function epilogue. 725 FinishFunction(BodyRange.getEnd()); 726 // CalleeWithThisReturn keeps track of the last callee inside this function 727 // that returns 'this'. After finishing the function, we set it to null. 728 CalleeWithThisReturn = 0; 729 730 // If we haven't marked the function nothrow through other means, do 731 // a quick pass now to see if we can. 732 if (!CurFn->doesNotThrow()) 733 TryMarkNoThrow(CurFn); 734 } 735 736 /// ContainsLabel - Return true if the statement contains a label in it. If 737 /// this statement is not executed normally, it not containing a label means 738 /// that we can just remove the code. 739 bool CodeGenFunction::ContainsLabel(const Stmt *S, bool IgnoreCaseStmts) { 740 // Null statement, not a label! 741 if (S == 0) return false; 742 743 // If this is a label, we have to emit the code, consider something like: 744 // if (0) { ... foo: bar(); } goto foo; 745 // 746 // TODO: If anyone cared, we could track __label__'s, since we know that you 747 // can't jump to one from outside their declared region. 748 if (isa<LabelStmt>(S)) 749 return true; 750 751 // If this is a case/default statement, and we haven't seen a switch, we have 752 // to emit the code. 753 if (isa<SwitchCase>(S) && !IgnoreCaseStmts) 754 return true; 755 756 // If this is a switch statement, we want to ignore cases below it. 757 if (isa<SwitchStmt>(S)) 758 IgnoreCaseStmts = true; 759 760 // Scan subexpressions for verboten labels. 761 for (Stmt::const_child_range I = S->children(); I; ++I) 762 if (ContainsLabel(*I, IgnoreCaseStmts)) 763 return true; 764 765 return false; 766 } 767 768 /// containsBreak - Return true if the statement contains a break out of it. 769 /// If the statement (recursively) contains a switch or loop with a break 770 /// inside of it, this is fine. 771 bool CodeGenFunction::containsBreak(const Stmt *S) { 772 // Null statement, not a label! 773 if (S == 0) return false; 774 775 // If this is a switch or loop that defines its own break scope, then we can 776 // include it and anything inside of it. 777 if (isa<SwitchStmt>(S) || isa<WhileStmt>(S) || isa<DoStmt>(S) || 778 isa<ForStmt>(S)) 779 return false; 780 781 if (isa<BreakStmt>(S)) 782 return true; 783 784 // Scan subexpressions for verboten breaks. 785 for (Stmt::const_child_range I = S->children(); I; ++I) 786 if (containsBreak(*I)) 787 return true; 788 789 return false; 790 } 791 792 793 /// ConstantFoldsToSimpleInteger - If the specified expression does not fold 794 /// to a constant, or if it does but contains a label, return false. If it 795 /// constant folds return true and set the boolean result in Result. 796 bool CodeGenFunction::ConstantFoldsToSimpleInteger(const Expr *Cond, 797 bool &ResultBool) { 798 llvm::APSInt ResultInt; 799 if (!ConstantFoldsToSimpleInteger(Cond, ResultInt)) 800 return false; 801 802 ResultBool = ResultInt.getBoolValue(); 803 return true; 804 } 805 806 /// ConstantFoldsToSimpleInteger - If the specified expression does not fold 807 /// to a constant, or if it does but contains a label, return false. If it 808 /// constant folds return true and set the folded value. 809 bool CodeGenFunction:: 810 ConstantFoldsToSimpleInteger(const Expr *Cond, llvm::APSInt &ResultInt) { 811 // FIXME: Rename and handle conversion of other evaluatable things 812 // to bool. 813 llvm::APSInt Int; 814 if (!Cond->EvaluateAsInt(Int, getContext())) 815 return false; // Not foldable, not integer or not fully evaluatable. 816 817 if (CodeGenFunction::ContainsLabel(Cond)) 818 return false; // Contains a label. 819 820 ResultInt = Int; 821 return true; 822 } 823 824 825 826 /// EmitBranchOnBoolExpr - Emit a branch on a boolean condition (e.g. for an if 827 /// statement) to the specified blocks. Based on the condition, this might try 828 /// to simplify the codegen of the conditional based on the branch. 829 /// 830 void CodeGenFunction::EmitBranchOnBoolExpr(const Expr *Cond, 831 llvm::BasicBlock *TrueBlock, 832 llvm::BasicBlock *FalseBlock) { 833 Cond = Cond->IgnoreParens(); 834 835 if (const BinaryOperator *CondBOp = dyn_cast<BinaryOperator>(Cond)) { 836 // Handle X && Y in a condition. 837 if (CondBOp->getOpcode() == BO_LAnd) { 838 // If we have "1 && X", simplify the code. "0 && X" would have constant 839 // folded if the case was simple enough. 840 bool ConstantBool = false; 841 if (ConstantFoldsToSimpleInteger(CondBOp->getLHS(), ConstantBool) && 842 ConstantBool) { 843 // br(1 && X) -> br(X). 844 return EmitBranchOnBoolExpr(CondBOp->getRHS(), TrueBlock, FalseBlock); 845 } 846 847 // If we have "X && 1", simplify the code to use an uncond branch. 848 // "X && 0" would have been constant folded to 0. 849 if (ConstantFoldsToSimpleInteger(CondBOp->getRHS(), ConstantBool) && 850 ConstantBool) { 851 // br(X && 1) -> br(X). 852 return EmitBranchOnBoolExpr(CondBOp->getLHS(), TrueBlock, FalseBlock); 853 } 854 855 // Emit the LHS as a conditional. If the LHS conditional is false, we 856 // want to jump to the FalseBlock. 857 llvm::BasicBlock *LHSTrue = createBasicBlock("land.lhs.true"); 858 859 ConditionalEvaluation eval(*this); 860 EmitBranchOnBoolExpr(CondBOp->getLHS(), LHSTrue, FalseBlock); 861 EmitBlock(LHSTrue); 862 863 // Any temporaries created here are conditional. 864 eval.begin(*this); 865 EmitBranchOnBoolExpr(CondBOp->getRHS(), TrueBlock, FalseBlock); 866 eval.end(*this); 867 868 return; 869 } 870 871 if (CondBOp->getOpcode() == BO_LOr) { 872 // If we have "0 || X", simplify the code. "1 || X" would have constant 873 // folded if the case was simple enough. 874 bool ConstantBool = false; 875 if (ConstantFoldsToSimpleInteger(CondBOp->getLHS(), ConstantBool) && 876 !ConstantBool) { 877 // br(0 || X) -> br(X). 878 return EmitBranchOnBoolExpr(CondBOp->getRHS(), TrueBlock, FalseBlock); 879 } 880 881 // If we have "X || 0", simplify the code to use an uncond branch. 882 // "X || 1" would have been constant folded to 1. 883 if (ConstantFoldsToSimpleInteger(CondBOp->getRHS(), ConstantBool) && 884 !ConstantBool) { 885 // br(X || 0) -> br(X). 886 return EmitBranchOnBoolExpr(CondBOp->getLHS(), TrueBlock, FalseBlock); 887 } 888 889 // Emit the LHS as a conditional. If the LHS conditional is true, we 890 // want to jump to the TrueBlock. 891 llvm::BasicBlock *LHSFalse = createBasicBlock("lor.lhs.false"); 892 893 ConditionalEvaluation eval(*this); 894 EmitBranchOnBoolExpr(CondBOp->getLHS(), TrueBlock, LHSFalse); 895 EmitBlock(LHSFalse); 896 897 // Any temporaries created here are conditional. 898 eval.begin(*this); 899 EmitBranchOnBoolExpr(CondBOp->getRHS(), TrueBlock, FalseBlock); 900 eval.end(*this); 901 902 return; 903 } 904 } 905 906 if (const UnaryOperator *CondUOp = dyn_cast<UnaryOperator>(Cond)) { 907 // br(!x, t, f) -> br(x, f, t) 908 if (CondUOp->getOpcode() == UO_LNot) 909 return EmitBranchOnBoolExpr(CondUOp->getSubExpr(), FalseBlock, TrueBlock); 910 } 911 912 if (const ConditionalOperator *CondOp = dyn_cast<ConditionalOperator>(Cond)) { 913 // br(c ? x : y, t, f) -> br(c, br(x, t, f), br(y, t, f)) 914 llvm::BasicBlock *LHSBlock = createBasicBlock("cond.true"); 915 llvm::BasicBlock *RHSBlock = createBasicBlock("cond.false"); 916 917 ConditionalEvaluation cond(*this); 918 EmitBranchOnBoolExpr(CondOp->getCond(), LHSBlock, RHSBlock); 919 920 cond.begin(*this); 921 EmitBlock(LHSBlock); 922 EmitBranchOnBoolExpr(CondOp->getLHS(), TrueBlock, FalseBlock); 923 cond.end(*this); 924 925 cond.begin(*this); 926 EmitBlock(RHSBlock); 927 EmitBranchOnBoolExpr(CondOp->getRHS(), TrueBlock, FalseBlock); 928 cond.end(*this); 929 930 return; 931 } 932 933 if (const CXXThrowExpr *Throw = dyn_cast<CXXThrowExpr>(Cond)) { 934 // Conditional operator handling can give us a throw expression as a 935 // condition for a case like: 936 // br(c ? throw x : y, t, f) -> br(c, br(throw x, t, f), br(y, t, f) 937 // Fold this to: 938 // br(c, throw x, br(y, t, f)) 939 EmitCXXThrowExpr(Throw, /*KeepInsertionPoint*/false); 940 return; 941 } 942 943 // Emit the code with the fully general case. 944 llvm::Value *CondV = EvaluateExprAsBool(Cond); 945 Builder.CreateCondBr(CondV, TrueBlock, FalseBlock); 946 } 947 948 /// ErrorUnsupported - Print out an error that codegen doesn't support the 949 /// specified stmt yet. 950 void CodeGenFunction::ErrorUnsupported(const Stmt *S, const char *Type, 951 bool OmitOnError) { 952 CGM.ErrorUnsupported(S, Type, OmitOnError); 953 } 954 955 /// emitNonZeroVLAInit - Emit the "zero" initialization of a 956 /// variable-length array whose elements have a non-zero bit-pattern. 957 /// 958 /// \param baseType the inner-most element type of the array 959 /// \param src - a char* pointing to the bit-pattern for a single 960 /// base element of the array 961 /// \param sizeInChars - the total size of the VLA, in chars 962 static void emitNonZeroVLAInit(CodeGenFunction &CGF, QualType baseType, 963 llvm::Value *dest, llvm::Value *src, 964 llvm::Value *sizeInChars) { 965 std::pair<CharUnits,CharUnits> baseSizeAndAlign 966 = CGF.getContext().getTypeInfoInChars(baseType); 967 968 CGBuilderTy &Builder = CGF.Builder; 969 970 llvm::Value *baseSizeInChars 971 = llvm::ConstantInt::get(CGF.IntPtrTy, baseSizeAndAlign.first.getQuantity()); 972 973 llvm::Type *i8p = Builder.getInt8PtrTy(); 974 975 llvm::Value *begin = Builder.CreateBitCast(dest, i8p, "vla.begin"); 976 llvm::Value *end = Builder.CreateInBoundsGEP(dest, sizeInChars, "vla.end"); 977 978 llvm::BasicBlock *originBB = CGF.Builder.GetInsertBlock(); 979 llvm::BasicBlock *loopBB = CGF.createBasicBlock("vla-init.loop"); 980 llvm::BasicBlock *contBB = CGF.createBasicBlock("vla-init.cont"); 981 982 // Make a loop over the VLA. C99 guarantees that the VLA element 983 // count must be nonzero. 984 CGF.EmitBlock(loopBB); 985 986 llvm::PHINode *cur = Builder.CreatePHI(i8p, 2, "vla.cur"); 987 cur->addIncoming(begin, originBB); 988 989 // memcpy the individual element bit-pattern. 990 Builder.CreateMemCpy(cur, src, baseSizeInChars, 991 baseSizeAndAlign.second.getQuantity(), 992 /*volatile*/ false); 993 994 // Go to the next element. 995 llvm::Value *next = Builder.CreateConstInBoundsGEP1_32(cur, 1, "vla.next"); 996 997 // Leave if that's the end of the VLA. 998 llvm::Value *done = Builder.CreateICmpEQ(next, end, "vla-init.isdone"); 999 Builder.CreateCondBr(done, contBB, loopBB); 1000 cur->addIncoming(next, loopBB); 1001 1002 CGF.EmitBlock(contBB); 1003 } 1004 1005 void 1006 CodeGenFunction::EmitNullInitialization(llvm::Value *DestPtr, QualType Ty) { 1007 // Ignore empty classes in C++. 1008 if (getLangOpts().CPlusPlus) { 1009 if (const RecordType *RT = Ty->getAs<RecordType>()) { 1010 if (cast<CXXRecordDecl>(RT->getDecl())->isEmpty()) 1011 return; 1012 } 1013 } 1014 1015 // Cast the dest ptr to the appropriate i8 pointer type. 1016 unsigned DestAS = 1017 cast<llvm::PointerType>(DestPtr->getType())->getAddressSpace(); 1018 llvm::Type *BP = Builder.getInt8PtrTy(DestAS); 1019 if (DestPtr->getType() != BP) 1020 DestPtr = Builder.CreateBitCast(DestPtr, BP); 1021 1022 // Get size and alignment info for this aggregate. 1023 std::pair<CharUnits, CharUnits> TypeInfo = 1024 getContext().getTypeInfoInChars(Ty); 1025 CharUnits Size = TypeInfo.first; 1026 CharUnits Align = TypeInfo.second; 1027 1028 llvm::Value *SizeVal; 1029 const VariableArrayType *vla; 1030 1031 // Don't bother emitting a zero-byte memset. 1032 if (Size.isZero()) { 1033 // But note that getTypeInfo returns 0 for a VLA. 1034 if (const VariableArrayType *vlaType = 1035 dyn_cast_or_null<VariableArrayType>( 1036 getContext().getAsArrayType(Ty))) { 1037 QualType eltType; 1038 llvm::Value *numElts; 1039 llvm::tie(numElts, eltType) = getVLASize(vlaType); 1040 1041 SizeVal = numElts; 1042 CharUnits eltSize = getContext().getTypeSizeInChars(eltType); 1043 if (!eltSize.isOne()) 1044 SizeVal = Builder.CreateNUWMul(SizeVal, CGM.getSize(eltSize)); 1045 vla = vlaType; 1046 } else { 1047 return; 1048 } 1049 } else { 1050 SizeVal = CGM.getSize(Size); 1051 vla = 0; 1052 } 1053 1054 // If the type contains a pointer to data member we can't memset it to zero. 1055 // Instead, create a null constant and copy it to the destination. 1056 // TODO: there are other patterns besides zero that we can usefully memset, 1057 // like -1, which happens to be the pattern used by member-pointers. 1058 if (!CGM.getTypes().isZeroInitializable(Ty)) { 1059 // For a VLA, emit a single element, then splat that over the VLA. 1060 if (vla) Ty = getContext().getBaseElementType(vla); 1061 1062 llvm::Constant *NullConstant = CGM.EmitNullConstant(Ty); 1063 1064 llvm::GlobalVariable *NullVariable = 1065 new llvm::GlobalVariable(CGM.getModule(), NullConstant->getType(), 1066 /*isConstant=*/true, 1067 llvm::GlobalVariable::PrivateLinkage, 1068 NullConstant, Twine()); 1069 llvm::Value *SrcPtr = 1070 Builder.CreateBitCast(NullVariable, Builder.getInt8PtrTy()); 1071 1072 if (vla) return emitNonZeroVLAInit(*this, Ty, DestPtr, SrcPtr, SizeVal); 1073 1074 // Get and call the appropriate llvm.memcpy overload. 1075 Builder.CreateMemCpy(DestPtr, SrcPtr, SizeVal, Align.getQuantity(), false); 1076 return; 1077 } 1078 1079 // Otherwise, just memset the whole thing to zero. This is legal 1080 // because in LLVM, all default initializers (other than the ones we just 1081 // handled above) are guaranteed to have a bit pattern of all zeros. 1082 Builder.CreateMemSet(DestPtr, Builder.getInt8(0), SizeVal, 1083 Align.getQuantity(), false); 1084 } 1085 1086 llvm::BlockAddress *CodeGenFunction::GetAddrOfLabel(const LabelDecl *L) { 1087 // Make sure that there is a block for the indirect goto. 1088 if (IndirectBranch == 0) 1089 GetIndirectGotoBlock(); 1090 1091 llvm::BasicBlock *BB = getJumpDestForLabel(L).getBlock(); 1092 1093 // Make sure the indirect branch includes all of the address-taken blocks. 1094 IndirectBranch->addDestination(BB); 1095 return llvm::BlockAddress::get(CurFn, BB); 1096 } 1097 1098 llvm::BasicBlock *CodeGenFunction::GetIndirectGotoBlock() { 1099 // If we already made the indirect branch for indirect goto, return its block. 1100 if (IndirectBranch) return IndirectBranch->getParent(); 1101 1102 CGBuilderTy TmpBuilder(createBasicBlock("indirectgoto")); 1103 1104 // Create the PHI node that indirect gotos will add entries to. 1105 llvm::Value *DestVal = TmpBuilder.CreatePHI(Int8PtrTy, 0, 1106 "indirect.goto.dest"); 1107 1108 // Create the indirect branch instruction. 1109 IndirectBranch = TmpBuilder.CreateIndirectBr(DestVal); 1110 return IndirectBranch->getParent(); 1111 } 1112 1113 /// Computes the length of an array in elements, as well as the base 1114 /// element type and a properly-typed first element pointer. 1115 llvm::Value *CodeGenFunction::emitArrayLength(const ArrayType *origArrayType, 1116 QualType &baseType, 1117 llvm::Value *&addr) { 1118 const ArrayType *arrayType = origArrayType; 1119 1120 // If it's a VLA, we have to load the stored size. Note that 1121 // this is the size of the VLA in bytes, not its size in elements. 1122 llvm::Value *numVLAElements = 0; 1123 if (isa<VariableArrayType>(arrayType)) { 1124 numVLAElements = getVLASize(cast<VariableArrayType>(arrayType)).first; 1125 1126 // Walk into all VLAs. This doesn't require changes to addr, 1127 // which has type T* where T is the first non-VLA element type. 1128 do { 1129 QualType elementType = arrayType->getElementType(); 1130 arrayType = getContext().getAsArrayType(elementType); 1131 1132 // If we only have VLA components, 'addr' requires no adjustment. 1133 if (!arrayType) { 1134 baseType = elementType; 1135 return numVLAElements; 1136 } 1137 } while (isa<VariableArrayType>(arrayType)); 1138 1139 // We get out here only if we find a constant array type 1140 // inside the VLA. 1141 } 1142 1143 // We have some number of constant-length arrays, so addr should 1144 // have LLVM type [M x [N x [...]]]*. Build a GEP that walks 1145 // down to the first element of addr. 1146 SmallVector<llvm::Value*, 8> gepIndices; 1147 1148 // GEP down to the array type. 1149 llvm::ConstantInt *zero = Builder.getInt32(0); 1150 gepIndices.push_back(zero); 1151 1152 uint64_t countFromCLAs = 1; 1153 QualType eltType; 1154 1155 llvm::ArrayType *llvmArrayType = 1156 dyn_cast<llvm::ArrayType>( 1157 cast<llvm::PointerType>(addr->getType())->getElementType()); 1158 while (llvmArrayType) { 1159 assert(isa<ConstantArrayType>(arrayType)); 1160 assert(cast<ConstantArrayType>(arrayType)->getSize().getZExtValue() 1161 == llvmArrayType->getNumElements()); 1162 1163 gepIndices.push_back(zero); 1164 countFromCLAs *= llvmArrayType->getNumElements(); 1165 eltType = arrayType->getElementType(); 1166 1167 llvmArrayType = 1168 dyn_cast<llvm::ArrayType>(llvmArrayType->getElementType()); 1169 arrayType = getContext().getAsArrayType(arrayType->getElementType()); 1170 assert((!llvmArrayType || arrayType) && 1171 "LLVM and Clang types are out-of-synch"); 1172 } 1173 1174 if (arrayType) { 1175 // From this point onwards, the Clang array type has been emitted 1176 // as some other type (probably a packed struct). Compute the array 1177 // size, and just emit the 'begin' expression as a bitcast. 1178 while (arrayType) { 1179 countFromCLAs *= 1180 cast<ConstantArrayType>(arrayType)->getSize().getZExtValue(); 1181 eltType = arrayType->getElementType(); 1182 arrayType = getContext().getAsArrayType(eltType); 1183 } 1184 1185 unsigned AddressSpace = addr->getType()->getPointerAddressSpace(); 1186 llvm::Type *BaseType = ConvertType(eltType)->getPointerTo(AddressSpace); 1187 addr = Builder.CreateBitCast(addr, BaseType, "array.begin"); 1188 } else { 1189 // Create the actual GEP. 1190 addr = Builder.CreateInBoundsGEP(addr, gepIndices, "array.begin"); 1191 } 1192 1193 baseType = eltType; 1194 1195 llvm::Value *numElements 1196 = llvm::ConstantInt::get(SizeTy, countFromCLAs); 1197 1198 // If we had any VLA dimensions, factor them in. 1199 if (numVLAElements) 1200 numElements = Builder.CreateNUWMul(numVLAElements, numElements); 1201 1202 return numElements; 1203 } 1204 1205 std::pair<llvm::Value*, QualType> 1206 CodeGenFunction::getVLASize(QualType type) { 1207 const VariableArrayType *vla = getContext().getAsVariableArrayType(type); 1208 assert(vla && "type was not a variable array type!"); 1209 return getVLASize(vla); 1210 } 1211 1212 std::pair<llvm::Value*, QualType> 1213 CodeGenFunction::getVLASize(const VariableArrayType *type) { 1214 // The number of elements so far; always size_t. 1215 llvm::Value *numElements = 0; 1216 1217 QualType elementType; 1218 do { 1219 elementType = type->getElementType(); 1220 llvm::Value *vlaSize = VLASizeMap[type->getSizeExpr()]; 1221 assert(vlaSize && "no size for VLA!"); 1222 assert(vlaSize->getType() == SizeTy); 1223 1224 if (!numElements) { 1225 numElements = vlaSize; 1226 } else { 1227 // It's undefined behavior if this wraps around, so mark it that way. 1228 // FIXME: Teach -fcatch-undefined-behavior to trap this. 1229 numElements = Builder.CreateNUWMul(numElements, vlaSize); 1230 } 1231 } while ((type = getContext().getAsVariableArrayType(elementType))); 1232 1233 return std::pair<llvm::Value*,QualType>(numElements, elementType); 1234 } 1235 1236 void CodeGenFunction::EmitVariablyModifiedType(QualType type) { 1237 assert(type->isVariablyModifiedType() && 1238 "Must pass variably modified type to EmitVLASizes!"); 1239 1240 EnsureInsertPoint(); 1241 1242 // We're going to walk down into the type and look for VLA 1243 // expressions. 1244 do { 1245 assert(type->isVariablyModifiedType()); 1246 1247 const Type *ty = type.getTypePtr(); 1248 switch (ty->getTypeClass()) { 1249 1250 #define TYPE(Class, Base) 1251 #define ABSTRACT_TYPE(Class, Base) 1252 #define NON_CANONICAL_TYPE(Class, Base) 1253 #define DEPENDENT_TYPE(Class, Base) case Type::Class: 1254 #define NON_CANONICAL_UNLESS_DEPENDENT_TYPE(Class, Base) 1255 #include "clang/AST/TypeNodes.def" 1256 llvm_unreachable("unexpected dependent type!"); 1257 1258 // These types are never variably-modified. 1259 case Type::Builtin: 1260 case Type::Complex: 1261 case Type::Vector: 1262 case Type::ExtVector: 1263 case Type::Record: 1264 case Type::Enum: 1265 case Type::Elaborated: 1266 case Type::TemplateSpecialization: 1267 case Type::ObjCObject: 1268 case Type::ObjCInterface: 1269 case Type::ObjCObjectPointer: 1270 llvm_unreachable("type class is never variably-modified!"); 1271 1272 case Type::Pointer: 1273 type = cast<PointerType>(ty)->getPointeeType(); 1274 break; 1275 1276 case Type::BlockPointer: 1277 type = cast<BlockPointerType>(ty)->getPointeeType(); 1278 break; 1279 1280 case Type::LValueReference: 1281 case Type::RValueReference: 1282 type = cast<ReferenceType>(ty)->getPointeeType(); 1283 break; 1284 1285 case Type::MemberPointer: 1286 type = cast<MemberPointerType>(ty)->getPointeeType(); 1287 break; 1288 1289 case Type::ConstantArray: 1290 case Type::IncompleteArray: 1291 // Losing element qualification here is fine. 1292 type = cast<ArrayType>(ty)->getElementType(); 1293 break; 1294 1295 case Type::VariableArray: { 1296 // Losing element qualification here is fine. 1297 const VariableArrayType *vat = cast<VariableArrayType>(ty); 1298 1299 // Unknown size indication requires no size computation. 1300 // Otherwise, evaluate and record it. 1301 if (const Expr *size = vat->getSizeExpr()) { 1302 // It's possible that we might have emitted this already, 1303 // e.g. with a typedef and a pointer to it. 1304 llvm::Value *&entry = VLASizeMap[size]; 1305 if (!entry) { 1306 llvm::Value *Size = EmitScalarExpr(size); 1307 1308 // C11 6.7.6.2p5: 1309 // If the size is an expression that is not an integer constant 1310 // expression [...] each time it is evaluated it shall have a value 1311 // greater than zero. 1312 if (SanOpts->VLABound && 1313 size->getType()->isSignedIntegerType()) { 1314 llvm::Value *Zero = llvm::Constant::getNullValue(Size->getType()); 1315 llvm::Constant *StaticArgs[] = { 1316 EmitCheckSourceLocation(size->getLocStart()), 1317 EmitCheckTypeDescriptor(size->getType()) 1318 }; 1319 EmitCheck(Builder.CreateICmpSGT(Size, Zero), 1320 "vla_bound_not_positive", StaticArgs, Size, 1321 CRK_Recoverable); 1322 } 1323 1324 // Always zexting here would be wrong if it weren't 1325 // undefined behavior to have a negative bound. 1326 entry = Builder.CreateIntCast(Size, SizeTy, /*signed*/ false); 1327 } 1328 } 1329 type = vat->getElementType(); 1330 break; 1331 } 1332 1333 case Type::FunctionProto: 1334 case Type::FunctionNoProto: 1335 type = cast<FunctionType>(ty)->getResultType(); 1336 break; 1337 1338 case Type::Paren: 1339 case Type::TypeOf: 1340 case Type::UnaryTransform: 1341 case Type::Attributed: 1342 case Type::SubstTemplateTypeParm: 1343 // Keep walking after single level desugaring. 1344 type = type.getSingleStepDesugaredType(getContext()); 1345 break; 1346 1347 case Type::Typedef: 1348 case Type::Decltype: 1349 case Type::Auto: 1350 // Stop walking: nothing to do. 1351 return; 1352 1353 case Type::TypeOfExpr: 1354 // Stop walking: emit typeof expression. 1355 EmitIgnoredExpr(cast<TypeOfExprType>(ty)->getUnderlyingExpr()); 1356 return; 1357 1358 case Type::Atomic: 1359 type = cast<AtomicType>(ty)->getValueType(); 1360 break; 1361 } 1362 } while (type->isVariablyModifiedType()); 1363 } 1364 1365 llvm::Value* CodeGenFunction::EmitVAListRef(const Expr* E) { 1366 if (getContext().getBuiltinVaListType()->isArrayType()) 1367 return EmitScalarExpr(E); 1368 return EmitLValue(E).getAddress(); 1369 } 1370 1371 void CodeGenFunction::EmitDeclRefExprDbgValue(const DeclRefExpr *E, 1372 llvm::Constant *Init) { 1373 assert (Init && "Invalid DeclRefExpr initializer!"); 1374 if (CGDebugInfo *Dbg = getDebugInfo()) 1375 if (CGM.getCodeGenOpts().getDebugInfo() >= CodeGenOptions::LimitedDebugInfo) 1376 Dbg->EmitGlobalVariable(E->getDecl(), Init); 1377 } 1378 1379 CodeGenFunction::PeepholeProtection 1380 CodeGenFunction::protectFromPeepholes(RValue rvalue) { 1381 // At the moment, the only aggressive peephole we do in IR gen 1382 // is trunc(zext) folding, but if we add more, we can easily 1383 // extend this protection. 1384 1385 if (!rvalue.isScalar()) return PeepholeProtection(); 1386 llvm::Value *value = rvalue.getScalarVal(); 1387 if (!isa<llvm::ZExtInst>(value)) return PeepholeProtection(); 1388 1389 // Just make an extra bitcast. 1390 assert(HaveInsertPoint()); 1391 llvm::Instruction *inst = new llvm::BitCastInst(value, value->getType(), "", 1392 Builder.GetInsertBlock()); 1393 1394 PeepholeProtection protection; 1395 protection.Inst = inst; 1396 return protection; 1397 } 1398 1399 void CodeGenFunction::unprotectFromPeepholes(PeepholeProtection protection) { 1400 if (!protection.Inst) return; 1401 1402 // In theory, we could try to duplicate the peepholes now, but whatever. 1403 protection.Inst->eraseFromParent(); 1404 } 1405 1406 llvm::Value *CodeGenFunction::EmitAnnotationCall(llvm::Value *AnnotationFn, 1407 llvm::Value *AnnotatedVal, 1408 StringRef AnnotationStr, 1409 SourceLocation Location) { 1410 llvm::Value *Args[4] = { 1411 AnnotatedVal, 1412 Builder.CreateBitCast(CGM.EmitAnnotationString(AnnotationStr), Int8PtrTy), 1413 Builder.CreateBitCast(CGM.EmitAnnotationUnit(Location), Int8PtrTy), 1414 CGM.EmitAnnotationLineNo(Location) 1415 }; 1416 return Builder.CreateCall(AnnotationFn, Args); 1417 } 1418 1419 void CodeGenFunction::EmitVarAnnotations(const VarDecl *D, llvm::Value *V) { 1420 assert(D->hasAttr<AnnotateAttr>() && "no annotate attribute"); 1421 // FIXME We create a new bitcast for every annotation because that's what 1422 // llvm-gcc was doing. 1423 for (specific_attr_iterator<AnnotateAttr> 1424 ai = D->specific_attr_begin<AnnotateAttr>(), 1425 ae = D->specific_attr_end<AnnotateAttr>(); ai != ae; ++ai) 1426 EmitAnnotationCall(CGM.getIntrinsic(llvm::Intrinsic::var_annotation), 1427 Builder.CreateBitCast(V, CGM.Int8PtrTy, V->getName()), 1428 (*ai)->getAnnotation(), D->getLocation()); 1429 } 1430 1431 llvm::Value *CodeGenFunction::EmitFieldAnnotations(const FieldDecl *D, 1432 llvm::Value *V) { 1433 assert(D->hasAttr<AnnotateAttr>() && "no annotate attribute"); 1434 llvm::Type *VTy = V->getType(); 1435 llvm::Value *F = CGM.getIntrinsic(llvm::Intrinsic::ptr_annotation, 1436 CGM.Int8PtrTy); 1437 1438 for (specific_attr_iterator<AnnotateAttr> 1439 ai = D->specific_attr_begin<AnnotateAttr>(), 1440 ae = D->specific_attr_end<AnnotateAttr>(); ai != ae; ++ai) { 1441 // FIXME Always emit the cast inst so we can differentiate between 1442 // annotation on the first field of a struct and annotation on the struct 1443 // itself. 1444 if (VTy != CGM.Int8PtrTy) 1445 V = Builder.Insert(new llvm::BitCastInst(V, CGM.Int8PtrTy)); 1446 V = EmitAnnotationCall(F, V, (*ai)->getAnnotation(), D->getLocation()); 1447 V = Builder.CreateBitCast(V, VTy); 1448 } 1449 1450 return V; 1451 } 1452 1453 CodeGenFunction::CGCapturedStmtInfo::~CGCapturedStmtInfo() { } 1454