1 //===--- CodeGenFunction.cpp - Emit LLVM Code from ASTs for a Function ----===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // This coordinates the per-function state used while generating code. 11 // 12 //===----------------------------------------------------------------------===// 13 14 #include "CodeGenFunction.h" 15 #include "CGBlocks.h" 16 #include "CGCleanup.h" 17 #include "CGCUDARuntime.h" 18 #include "CGCXXABI.h" 19 #include "CGDebugInfo.h" 20 #include "CGOpenMPRuntime.h" 21 #include "CodeGenModule.h" 22 #include "CodeGenPGO.h" 23 #include "TargetInfo.h" 24 #include "clang/AST/ASTContext.h" 25 #include "clang/AST/ASTLambda.h" 26 #include "clang/AST/Decl.h" 27 #include "clang/AST/DeclCXX.h" 28 #include "clang/AST/StmtCXX.h" 29 #include "clang/AST/StmtObjC.h" 30 #include "clang/Basic/Builtins.h" 31 #include "clang/Basic/TargetInfo.h" 32 #include "clang/CodeGen/CGFunctionInfo.h" 33 #include "clang/Frontend/CodeGenOptions.h" 34 #include "clang/Sema/SemaDiagnostic.h" 35 #include "llvm/IR/DataLayout.h" 36 #include "llvm/IR/Dominators.h" 37 #include "llvm/IR/Intrinsics.h" 38 #include "llvm/IR/MDBuilder.h" 39 #include "llvm/IR/Operator.h" 40 #include "llvm/Transforms/Utils/PromoteMemToReg.h" 41 using namespace clang; 42 using namespace CodeGen; 43 44 /// shouldEmitLifetimeMarkers - Decide whether we need emit the life-time 45 /// markers. 46 static bool shouldEmitLifetimeMarkers(const CodeGenOptions &CGOpts, 47 const LangOptions &LangOpts) { 48 if (CGOpts.DisableLifetimeMarkers) 49 return false; 50 51 // Disable lifetime markers in msan builds. 52 // FIXME: Remove this when msan works with lifetime markers. 53 if (LangOpts.Sanitize.has(SanitizerKind::Memory)) 54 return false; 55 56 // Asan uses markers for use-after-scope checks. 57 if (CGOpts.SanitizeAddressUseAfterScope) 58 return true; 59 60 // For now, only in optimized builds. 61 return CGOpts.OptimizationLevel != 0; 62 } 63 64 CodeGenFunction::CodeGenFunction(CodeGenModule &cgm, bool suppressNewContext) 65 : CodeGenTypeCache(cgm), CGM(cgm), Target(cgm.getTarget()), 66 Builder(cgm, cgm.getModule().getContext(), llvm::ConstantFolder(), 67 CGBuilderInserterTy(this)), 68 CurFn(nullptr), ReturnValue(Address::invalid()), 69 CapturedStmtInfo(nullptr), SanOpts(CGM.getLangOpts().Sanitize), 70 IsSanitizerScope(false), CurFuncIsThunk(false), AutoreleaseResult(false), 71 SawAsmBlock(false), IsOutlinedSEHHelper(false), BlockInfo(nullptr), 72 BlockPointer(nullptr), LambdaThisCaptureField(nullptr), 73 NormalCleanupDest(nullptr), NextCleanupDestIndex(1), 74 FirstBlockInfo(nullptr), EHResumeBlock(nullptr), ExceptionSlot(nullptr), 75 EHSelectorSlot(nullptr), DebugInfo(CGM.getModuleDebugInfo()), 76 DisableDebugInfo(false), DidCallStackSave(false), IndirectBranch(nullptr), 77 PGO(cgm), SwitchInsn(nullptr), SwitchWeights(nullptr), 78 CaseRangeBlock(nullptr), UnreachableBlock(nullptr), NumReturnExprs(0), 79 NumSimpleReturnExprs(0), CXXABIThisDecl(nullptr), 80 CXXABIThisValue(nullptr), CXXThisValue(nullptr), 81 CXXStructorImplicitParamDecl(nullptr), 82 CXXStructorImplicitParamValue(nullptr), OutermostConditional(nullptr), 83 CurLexicalScope(nullptr), TerminateLandingPad(nullptr), 84 TerminateHandler(nullptr), TrapBB(nullptr), 85 ShouldEmitLifetimeMarkers( 86 shouldEmitLifetimeMarkers(CGM.getCodeGenOpts(), CGM.getLangOpts())) { 87 if (!suppressNewContext) 88 CGM.getCXXABI().getMangleContext().startNewFunction(); 89 90 llvm::FastMathFlags FMF; 91 if (CGM.getLangOpts().FastMath) 92 FMF.setFast(); 93 if (CGM.getLangOpts().FiniteMathOnly) { 94 FMF.setNoNaNs(); 95 FMF.setNoInfs(); 96 } 97 if (CGM.getCodeGenOpts().NoNaNsFPMath) { 98 FMF.setNoNaNs(); 99 } 100 if (CGM.getCodeGenOpts().NoSignedZeros) { 101 FMF.setNoSignedZeros(); 102 } 103 if (CGM.getCodeGenOpts().ReciprocalMath) { 104 FMF.setAllowReciprocal(); 105 } 106 Builder.setFastMathFlags(FMF); 107 } 108 109 CodeGenFunction::~CodeGenFunction() { 110 assert(LifetimeExtendedCleanupStack.empty() && "failed to emit a cleanup"); 111 112 // If there are any unclaimed block infos, go ahead and destroy them 113 // now. This can happen if IR-gen gets clever and skips evaluating 114 // something. 115 if (FirstBlockInfo) 116 destroyBlockInfos(FirstBlockInfo); 117 118 if (getLangOpts().OpenMP && CurFn) 119 CGM.getOpenMPRuntime().functionFinished(*this); 120 } 121 122 CharUnits CodeGenFunction::getNaturalPointeeTypeAlignment(QualType T, 123 LValueBaseInfo *BaseInfo, 124 TBAAAccessInfo *TBAAInfo) { 125 return getNaturalTypeAlignment(T->getPointeeType(), BaseInfo, TBAAInfo, 126 /* forPointeeType= */ true); 127 } 128 129 CharUnits CodeGenFunction::getNaturalTypeAlignment(QualType T, 130 LValueBaseInfo *BaseInfo, 131 TBAAAccessInfo *TBAAInfo, 132 bool forPointeeType) { 133 if (TBAAInfo) 134 *TBAAInfo = CGM.getTBAAAccessInfo(T); 135 136 // Honor alignment typedef attributes even on incomplete types. 137 // We also honor them straight for C++ class types, even as pointees; 138 // there's an expressivity gap here. 139 if (auto TT = T->getAs<TypedefType>()) { 140 if (auto Align = TT->getDecl()->getMaxAlignment()) { 141 if (BaseInfo) 142 *BaseInfo = LValueBaseInfo(AlignmentSource::AttributedType); 143 return getContext().toCharUnitsFromBits(Align); 144 } 145 } 146 147 if (BaseInfo) 148 *BaseInfo = LValueBaseInfo(AlignmentSource::Type); 149 150 CharUnits Alignment; 151 if (T->isIncompleteType()) { 152 Alignment = CharUnits::One(); // Shouldn't be used, but pessimistic is best. 153 } else { 154 // For C++ class pointees, we don't know whether we're pointing at a 155 // base or a complete object, so we generally need to use the 156 // non-virtual alignment. 157 const CXXRecordDecl *RD; 158 if (forPointeeType && (RD = T->getAsCXXRecordDecl())) { 159 Alignment = CGM.getClassPointerAlignment(RD); 160 } else { 161 Alignment = getContext().getTypeAlignInChars(T); 162 if (T.getQualifiers().hasUnaligned()) 163 Alignment = CharUnits::One(); 164 } 165 166 // Cap to the global maximum type alignment unless the alignment 167 // was somehow explicit on the type. 168 if (unsigned MaxAlign = getLangOpts().MaxTypeAlign) { 169 if (Alignment.getQuantity() > MaxAlign && 170 !getContext().isAlignmentRequired(T)) 171 Alignment = CharUnits::fromQuantity(MaxAlign); 172 } 173 } 174 return Alignment; 175 } 176 177 LValue CodeGenFunction::MakeNaturalAlignAddrLValue(llvm::Value *V, QualType T) { 178 LValueBaseInfo BaseInfo; 179 TBAAAccessInfo TBAAInfo; 180 CharUnits Alignment = getNaturalTypeAlignment(T, &BaseInfo, &TBAAInfo); 181 return LValue::MakeAddr(Address(V, Alignment), T, getContext(), BaseInfo, 182 TBAAInfo); 183 } 184 185 /// Given a value of type T* that may not be to a complete object, 186 /// construct an l-value with the natural pointee alignment of T. 187 LValue 188 CodeGenFunction::MakeNaturalAlignPointeeAddrLValue(llvm::Value *V, QualType T) { 189 LValueBaseInfo BaseInfo; 190 TBAAAccessInfo TBAAInfo; 191 CharUnits Align = getNaturalTypeAlignment(T, &BaseInfo, &TBAAInfo, 192 /* forPointeeType= */ true); 193 return MakeAddrLValue(Address(V, Align), T, BaseInfo, TBAAInfo); 194 } 195 196 197 llvm::Type *CodeGenFunction::ConvertTypeForMem(QualType T) { 198 return CGM.getTypes().ConvertTypeForMem(T); 199 } 200 201 llvm::Type *CodeGenFunction::ConvertType(QualType T) { 202 return CGM.getTypes().ConvertType(T); 203 } 204 205 TypeEvaluationKind CodeGenFunction::getEvaluationKind(QualType type) { 206 type = type.getCanonicalType(); 207 while (true) { 208 switch (type->getTypeClass()) { 209 #define TYPE(name, parent) 210 #define ABSTRACT_TYPE(name, parent) 211 #define NON_CANONICAL_TYPE(name, parent) case Type::name: 212 #define DEPENDENT_TYPE(name, parent) case Type::name: 213 #define NON_CANONICAL_UNLESS_DEPENDENT_TYPE(name, parent) case Type::name: 214 #include "clang/AST/TypeNodes.def" 215 llvm_unreachable("non-canonical or dependent type in IR-generation"); 216 217 case Type::Auto: 218 case Type::DeducedTemplateSpecialization: 219 llvm_unreachable("undeduced type in IR-generation"); 220 221 // Various scalar types. 222 case Type::Builtin: 223 case Type::Pointer: 224 case Type::BlockPointer: 225 case Type::LValueReference: 226 case Type::RValueReference: 227 case Type::MemberPointer: 228 case Type::Vector: 229 case Type::ExtVector: 230 case Type::FunctionProto: 231 case Type::FunctionNoProto: 232 case Type::Enum: 233 case Type::ObjCObjectPointer: 234 case Type::Pipe: 235 return TEK_Scalar; 236 237 // Complexes. 238 case Type::Complex: 239 return TEK_Complex; 240 241 // Arrays, records, and Objective-C objects. 242 case Type::ConstantArray: 243 case Type::IncompleteArray: 244 case Type::VariableArray: 245 case Type::Record: 246 case Type::ObjCObject: 247 case Type::ObjCInterface: 248 return TEK_Aggregate; 249 250 // We operate on atomic values according to their underlying type. 251 case Type::Atomic: 252 type = cast<AtomicType>(type)->getValueType(); 253 continue; 254 } 255 llvm_unreachable("unknown type kind!"); 256 } 257 } 258 259 llvm::DebugLoc CodeGenFunction::EmitReturnBlock() { 260 // For cleanliness, we try to avoid emitting the return block for 261 // simple cases. 262 llvm::BasicBlock *CurBB = Builder.GetInsertBlock(); 263 264 if (CurBB) { 265 assert(!CurBB->getTerminator() && "Unexpected terminated block."); 266 267 // We have a valid insert point, reuse it if it is empty or there are no 268 // explicit jumps to the return block. 269 if (CurBB->empty() || ReturnBlock.getBlock()->use_empty()) { 270 ReturnBlock.getBlock()->replaceAllUsesWith(CurBB); 271 delete ReturnBlock.getBlock(); 272 } else 273 EmitBlock(ReturnBlock.getBlock()); 274 return llvm::DebugLoc(); 275 } 276 277 // Otherwise, if the return block is the target of a single direct 278 // branch then we can just put the code in that block instead. This 279 // cleans up functions which started with a unified return block. 280 if (ReturnBlock.getBlock()->hasOneUse()) { 281 llvm::BranchInst *BI = 282 dyn_cast<llvm::BranchInst>(*ReturnBlock.getBlock()->user_begin()); 283 if (BI && BI->isUnconditional() && 284 BI->getSuccessor(0) == ReturnBlock.getBlock()) { 285 // Record/return the DebugLoc of the simple 'return' expression to be used 286 // later by the actual 'ret' instruction. 287 llvm::DebugLoc Loc = BI->getDebugLoc(); 288 Builder.SetInsertPoint(BI->getParent()); 289 BI->eraseFromParent(); 290 delete ReturnBlock.getBlock(); 291 return Loc; 292 } 293 } 294 295 // FIXME: We are at an unreachable point, there is no reason to emit the block 296 // unless it has uses. However, we still need a place to put the debug 297 // region.end for now. 298 299 EmitBlock(ReturnBlock.getBlock()); 300 return llvm::DebugLoc(); 301 } 302 303 static void EmitIfUsed(CodeGenFunction &CGF, llvm::BasicBlock *BB) { 304 if (!BB) return; 305 if (!BB->use_empty()) 306 return CGF.CurFn->getBasicBlockList().push_back(BB); 307 delete BB; 308 } 309 310 void CodeGenFunction::FinishFunction(SourceLocation EndLoc) { 311 assert(BreakContinueStack.empty() && 312 "mismatched push/pop in break/continue stack!"); 313 314 bool OnlySimpleReturnStmts = NumSimpleReturnExprs > 0 315 && NumSimpleReturnExprs == NumReturnExprs 316 && ReturnBlock.getBlock()->use_empty(); 317 // Usually the return expression is evaluated before the cleanup 318 // code. If the function contains only a simple return statement, 319 // such as a constant, the location before the cleanup code becomes 320 // the last useful breakpoint in the function, because the simple 321 // return expression will be evaluated after the cleanup code. To be 322 // safe, set the debug location for cleanup code to the location of 323 // the return statement. Otherwise the cleanup code should be at the 324 // end of the function's lexical scope. 325 // 326 // If there are multiple branches to the return block, the branch 327 // instructions will get the location of the return statements and 328 // all will be fine. 329 if (CGDebugInfo *DI = getDebugInfo()) { 330 if (OnlySimpleReturnStmts) 331 DI->EmitLocation(Builder, LastStopPoint); 332 else 333 DI->EmitLocation(Builder, EndLoc); 334 } 335 336 // Pop any cleanups that might have been associated with the 337 // parameters. Do this in whatever block we're currently in; it's 338 // important to do this before we enter the return block or return 339 // edges will be *really* confused. 340 bool HasCleanups = EHStack.stable_begin() != PrologueCleanupDepth; 341 bool HasOnlyLifetimeMarkers = 342 HasCleanups && EHStack.containsOnlyLifetimeMarkers(PrologueCleanupDepth); 343 bool EmitRetDbgLoc = !HasCleanups || HasOnlyLifetimeMarkers; 344 if (HasCleanups) { 345 // Make sure the line table doesn't jump back into the body for 346 // the ret after it's been at EndLoc. 347 if (CGDebugInfo *DI = getDebugInfo()) 348 if (OnlySimpleReturnStmts) 349 DI->EmitLocation(Builder, EndLoc); 350 351 PopCleanupBlocks(PrologueCleanupDepth); 352 } 353 354 // Emit function epilog (to return). 355 llvm::DebugLoc Loc = EmitReturnBlock(); 356 357 if (ShouldInstrumentFunction()) { 358 if (CGM.getCodeGenOpts().InstrumentFunctions) 359 CurFn->addFnAttr("instrument-function-exit", "__cyg_profile_func_exit"); 360 if (CGM.getCodeGenOpts().InstrumentFunctionsAfterInlining) 361 CurFn->addFnAttr("instrument-function-exit-inlined", 362 "__cyg_profile_func_exit"); 363 } 364 365 // Emit debug descriptor for function end. 366 if (CGDebugInfo *DI = getDebugInfo()) 367 DI->EmitFunctionEnd(Builder, CurFn); 368 369 // Reset the debug location to that of the simple 'return' expression, if any 370 // rather than that of the end of the function's scope '}'. 371 ApplyDebugLocation AL(*this, Loc); 372 EmitFunctionEpilog(*CurFnInfo, EmitRetDbgLoc, EndLoc); 373 EmitEndEHSpec(CurCodeDecl); 374 375 assert(EHStack.empty() && 376 "did not remove all scopes from cleanup stack!"); 377 378 // If someone did an indirect goto, emit the indirect goto block at the end of 379 // the function. 380 if (IndirectBranch) { 381 EmitBlock(IndirectBranch->getParent()); 382 Builder.ClearInsertionPoint(); 383 } 384 385 // If some of our locals escaped, insert a call to llvm.localescape in the 386 // entry block. 387 if (!EscapedLocals.empty()) { 388 // Invert the map from local to index into a simple vector. There should be 389 // no holes. 390 SmallVector<llvm::Value *, 4> EscapeArgs; 391 EscapeArgs.resize(EscapedLocals.size()); 392 for (auto &Pair : EscapedLocals) 393 EscapeArgs[Pair.second] = Pair.first; 394 llvm::Function *FrameEscapeFn = llvm::Intrinsic::getDeclaration( 395 &CGM.getModule(), llvm::Intrinsic::localescape); 396 CGBuilderTy(*this, AllocaInsertPt).CreateCall(FrameEscapeFn, EscapeArgs); 397 } 398 399 // Remove the AllocaInsertPt instruction, which is just a convenience for us. 400 llvm::Instruction *Ptr = AllocaInsertPt; 401 AllocaInsertPt = nullptr; 402 Ptr->eraseFromParent(); 403 404 // If someone took the address of a label but never did an indirect goto, we 405 // made a zero entry PHI node, which is illegal, zap it now. 406 if (IndirectBranch) { 407 llvm::PHINode *PN = cast<llvm::PHINode>(IndirectBranch->getAddress()); 408 if (PN->getNumIncomingValues() == 0) { 409 PN->replaceAllUsesWith(llvm::UndefValue::get(PN->getType())); 410 PN->eraseFromParent(); 411 } 412 } 413 414 EmitIfUsed(*this, EHResumeBlock); 415 EmitIfUsed(*this, TerminateLandingPad); 416 EmitIfUsed(*this, TerminateHandler); 417 EmitIfUsed(*this, UnreachableBlock); 418 419 if (CGM.getCodeGenOpts().EmitDeclMetadata) 420 EmitDeclMetadata(); 421 422 for (SmallVectorImpl<std::pair<llvm::Instruction *, llvm::Value *> >::iterator 423 I = DeferredReplacements.begin(), 424 E = DeferredReplacements.end(); 425 I != E; ++I) { 426 I->first->replaceAllUsesWith(I->second); 427 I->first->eraseFromParent(); 428 } 429 430 // Eliminate CleanupDestSlot alloca by replacing it with SSA values and 431 // PHIs if the current function is a coroutine. We don't do it for all 432 // functions as it may result in slight increase in numbers of instructions 433 // if compiled with no optimizations. We do it for coroutine as the lifetime 434 // of CleanupDestSlot alloca make correct coroutine frame building very 435 // difficult. 436 if (NormalCleanupDest && isCoroutine()) { 437 llvm::DominatorTree DT(*CurFn); 438 llvm::PromoteMemToReg(NormalCleanupDest, DT); 439 NormalCleanupDest = nullptr; 440 } 441 } 442 443 /// ShouldInstrumentFunction - Return true if the current function should be 444 /// instrumented with __cyg_profile_func_* calls 445 bool CodeGenFunction::ShouldInstrumentFunction() { 446 if (!CGM.getCodeGenOpts().InstrumentFunctions && 447 !CGM.getCodeGenOpts().InstrumentFunctionsAfterInlining && 448 !CGM.getCodeGenOpts().InstrumentFunctionEntryBare) 449 return false; 450 if (!CurFuncDecl || CurFuncDecl->hasAttr<NoInstrumentFunctionAttr>()) 451 return false; 452 return true; 453 } 454 455 /// ShouldXRayInstrument - Return true if the current function should be 456 /// instrumented with XRay nop sleds. 457 bool CodeGenFunction::ShouldXRayInstrumentFunction() const { 458 return CGM.getCodeGenOpts().XRayInstrumentFunctions; 459 } 460 461 /// AlwaysEmitXRayCustomEvents - Return true if we should emit IR for calls to 462 /// the __xray_customevent(...) builin calls, when doing XRay instrumentation. 463 bool CodeGenFunction::AlwaysEmitXRayCustomEvents() const { 464 return CGM.getCodeGenOpts().XRayAlwaysEmitCustomEvents; 465 } 466 467 llvm::Constant * 468 CodeGenFunction::EncodeAddrForUseInPrologue(llvm::Function *F, 469 llvm::Constant *Addr) { 470 // Addresses stored in prologue data can't require run-time fixups and must 471 // be PC-relative. Run-time fixups are undesirable because they necessitate 472 // writable text segments, which are unsafe. And absolute addresses are 473 // undesirable because they break PIE mode. 474 475 // Add a layer of indirection through a private global. Taking its address 476 // won't result in a run-time fixup, even if Addr has linkonce_odr linkage. 477 auto *GV = new llvm::GlobalVariable(CGM.getModule(), Addr->getType(), 478 /*isConstant=*/true, 479 llvm::GlobalValue::PrivateLinkage, Addr); 480 481 // Create a PC-relative address. 482 auto *GOTAsInt = llvm::ConstantExpr::getPtrToInt(GV, IntPtrTy); 483 auto *FuncAsInt = llvm::ConstantExpr::getPtrToInt(F, IntPtrTy); 484 auto *PCRelAsInt = llvm::ConstantExpr::getSub(GOTAsInt, FuncAsInt); 485 return (IntPtrTy == Int32Ty) 486 ? PCRelAsInt 487 : llvm::ConstantExpr::getTrunc(PCRelAsInt, Int32Ty); 488 } 489 490 llvm::Value * 491 CodeGenFunction::DecodeAddrUsedInPrologue(llvm::Value *F, 492 llvm::Value *EncodedAddr) { 493 // Reconstruct the address of the global. 494 auto *PCRelAsInt = Builder.CreateSExt(EncodedAddr, IntPtrTy); 495 auto *FuncAsInt = Builder.CreatePtrToInt(F, IntPtrTy, "func_addr.int"); 496 auto *GOTAsInt = Builder.CreateAdd(PCRelAsInt, FuncAsInt, "global_addr.int"); 497 auto *GOTAddr = Builder.CreateIntToPtr(GOTAsInt, Int8PtrPtrTy, "global_addr"); 498 499 // Load the original pointer through the global. 500 return Builder.CreateLoad(Address(GOTAddr, getPointerAlign()), 501 "decoded_addr"); 502 } 503 504 static void removeImageAccessQualifier(std::string& TyName) { 505 std::string ReadOnlyQual("__read_only"); 506 std::string::size_type ReadOnlyPos = TyName.find(ReadOnlyQual); 507 if (ReadOnlyPos != std::string::npos) 508 // "+ 1" for the space after access qualifier. 509 TyName.erase(ReadOnlyPos, ReadOnlyQual.size() + 1); 510 else { 511 std::string WriteOnlyQual("__write_only"); 512 std::string::size_type WriteOnlyPos = TyName.find(WriteOnlyQual); 513 if (WriteOnlyPos != std::string::npos) 514 TyName.erase(WriteOnlyPos, WriteOnlyQual.size() + 1); 515 else { 516 std::string ReadWriteQual("__read_write"); 517 std::string::size_type ReadWritePos = TyName.find(ReadWriteQual); 518 if (ReadWritePos != std::string::npos) 519 TyName.erase(ReadWritePos, ReadWriteQual.size() + 1); 520 } 521 } 522 } 523 524 // Returns the address space id that should be produced to the 525 // kernel_arg_addr_space metadata. This is always fixed to the ids 526 // as specified in the SPIR 2.0 specification in order to differentiate 527 // for example in clGetKernelArgInfo() implementation between the address 528 // spaces with targets without unique mapping to the OpenCL address spaces 529 // (basically all single AS CPUs). 530 static unsigned ArgInfoAddressSpace(LangAS AS) { 531 switch (AS) { 532 case LangAS::opencl_global: return 1; 533 case LangAS::opencl_constant: return 2; 534 case LangAS::opencl_local: return 3; 535 case LangAS::opencl_generic: return 4; // Not in SPIR 2.0 specs. 536 default: 537 return 0; // Assume private. 538 } 539 } 540 541 // OpenCL v1.2 s5.6.4.6 allows the compiler to store kernel argument 542 // information in the program executable. The argument information stored 543 // includes the argument name, its type, the address and access qualifiers used. 544 static void GenOpenCLArgMetadata(const FunctionDecl *FD, llvm::Function *Fn, 545 CodeGenModule &CGM, llvm::LLVMContext &Context, 546 CGBuilderTy &Builder, ASTContext &ASTCtx) { 547 // Create MDNodes that represent the kernel arg metadata. 548 // Each MDNode is a list in the form of "key", N number of values which is 549 // the same number of values as their are kernel arguments. 550 551 const PrintingPolicy &Policy = ASTCtx.getPrintingPolicy(); 552 553 // MDNode for the kernel argument address space qualifiers. 554 SmallVector<llvm::Metadata *, 8> addressQuals; 555 556 // MDNode for the kernel argument access qualifiers (images only). 557 SmallVector<llvm::Metadata *, 8> accessQuals; 558 559 // MDNode for the kernel argument type names. 560 SmallVector<llvm::Metadata *, 8> argTypeNames; 561 562 // MDNode for the kernel argument base type names. 563 SmallVector<llvm::Metadata *, 8> argBaseTypeNames; 564 565 // MDNode for the kernel argument type qualifiers. 566 SmallVector<llvm::Metadata *, 8> argTypeQuals; 567 568 // MDNode for the kernel argument names. 569 SmallVector<llvm::Metadata *, 8> argNames; 570 571 for (unsigned i = 0, e = FD->getNumParams(); i != e; ++i) { 572 const ParmVarDecl *parm = FD->getParamDecl(i); 573 QualType ty = parm->getType(); 574 std::string typeQuals; 575 576 if (ty->isPointerType()) { 577 QualType pointeeTy = ty->getPointeeType(); 578 579 // Get address qualifier. 580 addressQuals.push_back(llvm::ConstantAsMetadata::get(Builder.getInt32( 581 ArgInfoAddressSpace(pointeeTy.getAddressSpace())))); 582 583 // Get argument type name. 584 std::string typeName = 585 pointeeTy.getUnqualifiedType().getAsString(Policy) + "*"; 586 587 // Turn "unsigned type" to "utype" 588 std::string::size_type pos = typeName.find("unsigned"); 589 if (pointeeTy.isCanonical() && pos != std::string::npos) 590 typeName.erase(pos+1, 8); 591 592 argTypeNames.push_back(llvm::MDString::get(Context, typeName)); 593 594 std::string baseTypeName = 595 pointeeTy.getUnqualifiedType().getCanonicalType().getAsString( 596 Policy) + 597 "*"; 598 599 // Turn "unsigned type" to "utype" 600 pos = baseTypeName.find("unsigned"); 601 if (pos != std::string::npos) 602 baseTypeName.erase(pos+1, 8); 603 604 argBaseTypeNames.push_back(llvm::MDString::get(Context, baseTypeName)); 605 606 // Get argument type qualifiers: 607 if (ty.isRestrictQualified()) 608 typeQuals = "restrict"; 609 if (pointeeTy.isConstQualified() || 610 (pointeeTy.getAddressSpace() == LangAS::opencl_constant)) 611 typeQuals += typeQuals.empty() ? "const" : " const"; 612 if (pointeeTy.isVolatileQualified()) 613 typeQuals += typeQuals.empty() ? "volatile" : " volatile"; 614 } else { 615 uint32_t AddrSpc = 0; 616 bool isPipe = ty->isPipeType(); 617 if (ty->isImageType() || isPipe) 618 AddrSpc = ArgInfoAddressSpace(LangAS::opencl_global); 619 620 addressQuals.push_back( 621 llvm::ConstantAsMetadata::get(Builder.getInt32(AddrSpc))); 622 623 // Get argument type name. 624 std::string typeName; 625 if (isPipe) 626 typeName = ty.getCanonicalType()->getAs<PipeType>()->getElementType() 627 .getAsString(Policy); 628 else 629 typeName = ty.getUnqualifiedType().getAsString(Policy); 630 631 // Turn "unsigned type" to "utype" 632 std::string::size_type pos = typeName.find("unsigned"); 633 if (ty.isCanonical() && pos != std::string::npos) 634 typeName.erase(pos+1, 8); 635 636 std::string baseTypeName; 637 if (isPipe) 638 baseTypeName = ty.getCanonicalType()->getAs<PipeType>() 639 ->getElementType().getCanonicalType() 640 .getAsString(Policy); 641 else 642 baseTypeName = 643 ty.getUnqualifiedType().getCanonicalType().getAsString(Policy); 644 645 // Remove access qualifiers on images 646 // (as they are inseparable from type in clang implementation, 647 // but OpenCL spec provides a special query to get access qualifier 648 // via clGetKernelArgInfo with CL_KERNEL_ARG_ACCESS_QUALIFIER): 649 if (ty->isImageType()) { 650 removeImageAccessQualifier(typeName); 651 removeImageAccessQualifier(baseTypeName); 652 } 653 654 argTypeNames.push_back(llvm::MDString::get(Context, typeName)); 655 656 // Turn "unsigned type" to "utype" 657 pos = baseTypeName.find("unsigned"); 658 if (pos != std::string::npos) 659 baseTypeName.erase(pos+1, 8); 660 661 argBaseTypeNames.push_back(llvm::MDString::get(Context, baseTypeName)); 662 663 if (isPipe) 664 typeQuals = "pipe"; 665 } 666 667 argTypeQuals.push_back(llvm::MDString::get(Context, typeQuals)); 668 669 // Get image and pipe access qualifier: 670 if (ty->isImageType()|| ty->isPipeType()) { 671 const Decl *PDecl = parm; 672 if (auto *TD = dyn_cast<TypedefType>(ty)) 673 PDecl = TD->getDecl(); 674 const OpenCLAccessAttr *A = PDecl->getAttr<OpenCLAccessAttr>(); 675 if (A && A->isWriteOnly()) 676 accessQuals.push_back(llvm::MDString::get(Context, "write_only")); 677 else if (A && A->isReadWrite()) 678 accessQuals.push_back(llvm::MDString::get(Context, "read_write")); 679 else 680 accessQuals.push_back(llvm::MDString::get(Context, "read_only")); 681 } else 682 accessQuals.push_back(llvm::MDString::get(Context, "none")); 683 684 // Get argument name. 685 argNames.push_back(llvm::MDString::get(Context, parm->getName())); 686 } 687 688 Fn->setMetadata("kernel_arg_addr_space", 689 llvm::MDNode::get(Context, addressQuals)); 690 Fn->setMetadata("kernel_arg_access_qual", 691 llvm::MDNode::get(Context, accessQuals)); 692 Fn->setMetadata("kernel_arg_type", 693 llvm::MDNode::get(Context, argTypeNames)); 694 Fn->setMetadata("kernel_arg_base_type", 695 llvm::MDNode::get(Context, argBaseTypeNames)); 696 Fn->setMetadata("kernel_arg_type_qual", 697 llvm::MDNode::get(Context, argTypeQuals)); 698 if (CGM.getCodeGenOpts().EmitOpenCLArgMetadata) 699 Fn->setMetadata("kernel_arg_name", 700 llvm::MDNode::get(Context, argNames)); 701 } 702 703 void CodeGenFunction::EmitOpenCLKernelMetadata(const FunctionDecl *FD, 704 llvm::Function *Fn) 705 { 706 if (!FD->hasAttr<OpenCLKernelAttr>()) 707 return; 708 709 llvm::LLVMContext &Context = getLLVMContext(); 710 711 GenOpenCLArgMetadata(FD, Fn, CGM, Context, Builder, getContext()); 712 713 if (const VecTypeHintAttr *A = FD->getAttr<VecTypeHintAttr>()) { 714 QualType HintQTy = A->getTypeHint(); 715 const ExtVectorType *HintEltQTy = HintQTy->getAs<ExtVectorType>(); 716 bool IsSignedInteger = 717 HintQTy->isSignedIntegerType() || 718 (HintEltQTy && HintEltQTy->getElementType()->isSignedIntegerType()); 719 llvm::Metadata *AttrMDArgs[] = { 720 llvm::ConstantAsMetadata::get(llvm::UndefValue::get( 721 CGM.getTypes().ConvertType(A->getTypeHint()))), 722 llvm::ConstantAsMetadata::get(llvm::ConstantInt::get( 723 llvm::IntegerType::get(Context, 32), 724 llvm::APInt(32, (uint64_t)(IsSignedInteger ? 1 : 0))))}; 725 Fn->setMetadata("vec_type_hint", llvm::MDNode::get(Context, AttrMDArgs)); 726 } 727 728 if (const WorkGroupSizeHintAttr *A = FD->getAttr<WorkGroupSizeHintAttr>()) { 729 llvm::Metadata *AttrMDArgs[] = { 730 llvm::ConstantAsMetadata::get(Builder.getInt32(A->getXDim())), 731 llvm::ConstantAsMetadata::get(Builder.getInt32(A->getYDim())), 732 llvm::ConstantAsMetadata::get(Builder.getInt32(A->getZDim()))}; 733 Fn->setMetadata("work_group_size_hint", llvm::MDNode::get(Context, AttrMDArgs)); 734 } 735 736 if (const ReqdWorkGroupSizeAttr *A = FD->getAttr<ReqdWorkGroupSizeAttr>()) { 737 llvm::Metadata *AttrMDArgs[] = { 738 llvm::ConstantAsMetadata::get(Builder.getInt32(A->getXDim())), 739 llvm::ConstantAsMetadata::get(Builder.getInt32(A->getYDim())), 740 llvm::ConstantAsMetadata::get(Builder.getInt32(A->getZDim()))}; 741 Fn->setMetadata("reqd_work_group_size", llvm::MDNode::get(Context, AttrMDArgs)); 742 } 743 744 if (const OpenCLIntelReqdSubGroupSizeAttr *A = 745 FD->getAttr<OpenCLIntelReqdSubGroupSizeAttr>()) { 746 llvm::Metadata *AttrMDArgs[] = { 747 llvm::ConstantAsMetadata::get(Builder.getInt32(A->getSubGroupSize()))}; 748 Fn->setMetadata("intel_reqd_sub_group_size", 749 llvm::MDNode::get(Context, AttrMDArgs)); 750 } 751 } 752 753 /// Determine whether the function F ends with a return stmt. 754 static bool endsWithReturn(const Decl* F) { 755 const Stmt *Body = nullptr; 756 if (auto *FD = dyn_cast_or_null<FunctionDecl>(F)) 757 Body = FD->getBody(); 758 else if (auto *OMD = dyn_cast_or_null<ObjCMethodDecl>(F)) 759 Body = OMD->getBody(); 760 761 if (auto *CS = dyn_cast_or_null<CompoundStmt>(Body)) { 762 auto LastStmt = CS->body_rbegin(); 763 if (LastStmt != CS->body_rend()) 764 return isa<ReturnStmt>(*LastStmt); 765 } 766 return false; 767 } 768 769 static void markAsIgnoreThreadCheckingAtRuntime(llvm::Function *Fn) { 770 Fn->addFnAttr("sanitize_thread_no_checking_at_run_time"); 771 Fn->removeFnAttr(llvm::Attribute::SanitizeThread); 772 } 773 774 static bool matchesStlAllocatorFn(const Decl *D, const ASTContext &Ctx) { 775 auto *MD = dyn_cast_or_null<CXXMethodDecl>(D); 776 if (!MD || !MD->getDeclName().getAsIdentifierInfo() || 777 !MD->getDeclName().getAsIdentifierInfo()->isStr("allocate") || 778 (MD->getNumParams() != 1 && MD->getNumParams() != 2)) 779 return false; 780 781 if (MD->parameters()[0]->getType().getCanonicalType() != Ctx.getSizeType()) 782 return false; 783 784 if (MD->getNumParams() == 2) { 785 auto *PT = MD->parameters()[1]->getType()->getAs<PointerType>(); 786 if (!PT || !PT->isVoidPointerType() || 787 !PT->getPointeeType().isConstQualified()) 788 return false; 789 } 790 791 return true; 792 } 793 794 /// Return the UBSan prologue signature for \p FD if one is available. 795 static llvm::Constant *getPrologueSignature(CodeGenModule &CGM, 796 const FunctionDecl *FD) { 797 if (const auto *MD = dyn_cast<CXXMethodDecl>(FD)) 798 if (!MD->isStatic()) 799 return nullptr; 800 return CGM.getTargetCodeGenInfo().getUBSanFunctionSignature(CGM); 801 } 802 803 void CodeGenFunction::StartFunction(GlobalDecl GD, 804 QualType RetTy, 805 llvm::Function *Fn, 806 const CGFunctionInfo &FnInfo, 807 const FunctionArgList &Args, 808 SourceLocation Loc, 809 SourceLocation StartLoc) { 810 assert(!CurFn && 811 "Do not use a CodeGenFunction object for more than one function"); 812 813 const Decl *D = GD.getDecl(); 814 815 DidCallStackSave = false; 816 CurCodeDecl = D; 817 if (const auto *FD = dyn_cast_or_null<FunctionDecl>(D)) 818 if (FD->usesSEHTry()) 819 CurSEHParent = FD; 820 CurFuncDecl = (D ? D->getNonClosureContext() : nullptr); 821 FnRetTy = RetTy; 822 CurFn = Fn; 823 CurFnInfo = &FnInfo; 824 assert(CurFn->isDeclaration() && "Function already has body?"); 825 826 // If this function has been blacklisted for any of the enabled sanitizers, 827 // disable the sanitizer for the function. 828 do { 829 #define SANITIZER(NAME, ID) \ 830 if (SanOpts.empty()) \ 831 break; \ 832 if (SanOpts.has(SanitizerKind::ID)) \ 833 if (CGM.isInSanitizerBlacklist(SanitizerKind::ID, Fn, Loc)) \ 834 SanOpts.set(SanitizerKind::ID, false); 835 836 #include "clang/Basic/Sanitizers.def" 837 #undef SANITIZER 838 } while (0); 839 840 if (D) { 841 // Apply the no_sanitize* attributes to SanOpts. 842 for (auto Attr : D->specific_attrs<NoSanitizeAttr>()) 843 SanOpts.Mask &= ~Attr->getMask(); 844 } 845 846 // Apply sanitizer attributes to the function. 847 if (SanOpts.hasOneOf(SanitizerKind::Address | SanitizerKind::KernelAddress)) 848 Fn->addFnAttr(llvm::Attribute::SanitizeAddress); 849 if (SanOpts.hasOneOf(SanitizerKind::HWAddress)) 850 Fn->addFnAttr(llvm::Attribute::SanitizeHWAddress); 851 if (SanOpts.has(SanitizerKind::Thread)) 852 Fn->addFnAttr(llvm::Attribute::SanitizeThread); 853 if (SanOpts.has(SanitizerKind::Memory)) 854 Fn->addFnAttr(llvm::Attribute::SanitizeMemory); 855 if (SanOpts.has(SanitizerKind::SafeStack)) 856 Fn->addFnAttr(llvm::Attribute::SafeStack); 857 858 // Ignore TSan memory acesses from within ObjC/ObjC++ dealloc, initialize, 859 // .cxx_destruct, __destroy_helper_block_ and all of their calees at run time. 860 if (SanOpts.has(SanitizerKind::Thread)) { 861 if (const auto *OMD = dyn_cast_or_null<ObjCMethodDecl>(D)) { 862 IdentifierInfo *II = OMD->getSelector().getIdentifierInfoForSlot(0); 863 if (OMD->getMethodFamily() == OMF_dealloc || 864 OMD->getMethodFamily() == OMF_initialize || 865 (OMD->getSelector().isUnarySelector() && II->isStr(".cxx_destruct"))) { 866 markAsIgnoreThreadCheckingAtRuntime(Fn); 867 } 868 } else if (const auto *FD = dyn_cast_or_null<FunctionDecl>(D)) { 869 IdentifierInfo *II = FD->getIdentifier(); 870 if (II && II->isStr("__destroy_helper_block_")) 871 markAsIgnoreThreadCheckingAtRuntime(Fn); 872 } 873 } 874 875 // Ignore unrelated casts in STL allocate() since the allocator must cast 876 // from void* to T* before object initialization completes. Don't match on the 877 // namespace because not all allocators are in std:: 878 if (D && SanOpts.has(SanitizerKind::CFIUnrelatedCast)) { 879 if (matchesStlAllocatorFn(D, getContext())) 880 SanOpts.Mask &= ~SanitizerKind::CFIUnrelatedCast; 881 } 882 883 // Apply xray attributes to the function (as a string, for now) 884 if (D && ShouldXRayInstrumentFunction()) { 885 if (const auto *XRayAttr = D->getAttr<XRayInstrumentAttr>()) { 886 if (XRayAttr->alwaysXRayInstrument()) 887 Fn->addFnAttr("function-instrument", "xray-always"); 888 if (XRayAttr->neverXRayInstrument()) 889 Fn->addFnAttr("function-instrument", "xray-never"); 890 if (const auto *LogArgs = D->getAttr<XRayLogArgsAttr>()) { 891 Fn->addFnAttr("xray-log-args", 892 llvm::utostr(LogArgs->getArgumentCount())); 893 } 894 } else { 895 if (!CGM.imbueXRayAttrs(Fn, Loc)) 896 Fn->addFnAttr( 897 "xray-instruction-threshold", 898 llvm::itostr(CGM.getCodeGenOpts().XRayInstructionThreshold)); 899 } 900 } 901 902 // Add no-jump-tables value. 903 Fn->addFnAttr("no-jump-tables", 904 llvm::toStringRef(CGM.getCodeGenOpts().NoUseJumpTables)); 905 906 // Add profile-sample-accurate value. 907 if (CGM.getCodeGenOpts().ProfileSampleAccurate) 908 Fn->addFnAttr("profile-sample-accurate"); 909 910 if (getLangOpts().OpenCL) { 911 // Add metadata for a kernel function. 912 if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D)) 913 EmitOpenCLKernelMetadata(FD, Fn); 914 } 915 916 // If we are checking function types, emit a function type signature as 917 // prologue data. 918 if (getLangOpts().CPlusPlus && SanOpts.has(SanitizerKind::Function)) { 919 if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D)) { 920 if (llvm::Constant *PrologueSig = getPrologueSignature(CGM, FD)) { 921 llvm::Constant *FTRTTIConst = 922 CGM.GetAddrOfRTTIDescriptor(FD->getType(), /*ForEH=*/true); 923 llvm::Constant *FTRTTIConstEncoded = 924 EncodeAddrForUseInPrologue(Fn, FTRTTIConst); 925 llvm::Constant *PrologueStructElems[] = {PrologueSig, 926 FTRTTIConstEncoded}; 927 llvm::Constant *PrologueStructConst = 928 llvm::ConstantStruct::getAnon(PrologueStructElems, /*Packed=*/true); 929 Fn->setPrologueData(PrologueStructConst); 930 } 931 } 932 } 933 934 // If we're checking nullability, we need to know whether we can check the 935 // return value. Initialize the flag to 'true' and refine it in EmitParmDecl. 936 if (SanOpts.has(SanitizerKind::NullabilityReturn)) { 937 auto Nullability = FnRetTy->getNullability(getContext()); 938 if (Nullability && *Nullability == NullabilityKind::NonNull) { 939 if (!(SanOpts.has(SanitizerKind::ReturnsNonnullAttribute) && 940 CurCodeDecl && CurCodeDecl->getAttr<ReturnsNonNullAttr>())) 941 RetValNullabilityPrecondition = 942 llvm::ConstantInt::getTrue(getLLVMContext()); 943 } 944 } 945 946 // If we're in C++ mode and the function name is "main", it is guaranteed 947 // to be norecurse by the standard (3.6.1.3 "The function main shall not be 948 // used within a program"). 949 if (getLangOpts().CPlusPlus) 950 if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D)) 951 if (FD->isMain()) 952 Fn->addFnAttr(llvm::Attribute::NoRecurse); 953 954 llvm::BasicBlock *EntryBB = createBasicBlock("entry", CurFn); 955 956 // Create a marker to make it easy to insert allocas into the entryblock 957 // later. Don't create this with the builder, because we don't want it 958 // folded. 959 llvm::Value *Undef = llvm::UndefValue::get(Int32Ty); 960 AllocaInsertPt = new llvm::BitCastInst(Undef, Int32Ty, "allocapt", EntryBB); 961 962 ReturnBlock = getJumpDestInCurrentScope("return"); 963 964 Builder.SetInsertPoint(EntryBB); 965 966 // If we're checking the return value, allocate space for a pointer to a 967 // precise source location of the checked return statement. 968 if (requiresReturnValueCheck()) { 969 ReturnLocation = CreateDefaultAlignTempAlloca(Int8PtrTy, "return.sloc.ptr"); 970 InitTempAlloca(ReturnLocation, llvm::ConstantPointerNull::get(Int8PtrTy)); 971 } 972 973 // Emit subprogram debug descriptor. 974 if (CGDebugInfo *DI = getDebugInfo()) { 975 // Reconstruct the type from the argument list so that implicit parameters, 976 // such as 'this' and 'vtt', show up in the debug info. Preserve the calling 977 // convention. 978 CallingConv CC = CallingConv::CC_C; 979 if (auto *FD = dyn_cast_or_null<FunctionDecl>(D)) 980 if (const auto *SrcFnTy = FD->getType()->getAs<FunctionType>()) 981 CC = SrcFnTy->getCallConv(); 982 SmallVector<QualType, 16> ArgTypes; 983 for (const VarDecl *VD : Args) 984 ArgTypes.push_back(VD->getType()); 985 QualType FnType = getContext().getFunctionType( 986 RetTy, ArgTypes, FunctionProtoType::ExtProtoInfo(CC)); 987 DI->EmitFunctionStart(GD, Loc, StartLoc, FnType, CurFn, Builder); 988 } 989 990 if (ShouldInstrumentFunction()) { 991 if (CGM.getCodeGenOpts().InstrumentFunctions) 992 CurFn->addFnAttr("instrument-function-entry", "__cyg_profile_func_enter"); 993 if (CGM.getCodeGenOpts().InstrumentFunctionsAfterInlining) 994 CurFn->addFnAttr("instrument-function-entry-inlined", 995 "__cyg_profile_func_enter"); 996 if (CGM.getCodeGenOpts().InstrumentFunctionEntryBare) 997 CurFn->addFnAttr("instrument-function-entry-inlined", 998 "__cyg_profile_func_enter_bare"); 999 } 1000 1001 // Since emitting the mcount call here impacts optimizations such as function 1002 // inlining, we just add an attribute to insert a mcount call in backend. 1003 // The attribute "counting-function" is set to mcount function name which is 1004 // architecture dependent. 1005 if (CGM.getCodeGenOpts().InstrumentForProfiling) { 1006 if (CGM.getCodeGenOpts().CallFEntry) 1007 Fn->addFnAttr("fentry-call", "true"); 1008 else { 1009 if (!CurFuncDecl || !CurFuncDecl->hasAttr<NoInstrumentFunctionAttr>()) { 1010 Fn->addFnAttr("instrument-function-entry-inlined", 1011 getTarget().getMCountName()); 1012 } 1013 } 1014 } 1015 1016 if (RetTy->isVoidType()) { 1017 // Void type; nothing to return. 1018 ReturnValue = Address::invalid(); 1019 1020 // Count the implicit return. 1021 if (!endsWithReturn(D)) 1022 ++NumReturnExprs; 1023 } else if (CurFnInfo->getReturnInfo().getKind() == ABIArgInfo::Indirect && 1024 !hasScalarEvaluationKind(CurFnInfo->getReturnType())) { 1025 // Indirect aggregate return; emit returned value directly into sret slot. 1026 // This reduces code size, and affects correctness in C++. 1027 auto AI = CurFn->arg_begin(); 1028 if (CurFnInfo->getReturnInfo().isSRetAfterThis()) 1029 ++AI; 1030 ReturnValue = Address(&*AI, CurFnInfo->getReturnInfo().getIndirectAlign()); 1031 } else if (CurFnInfo->getReturnInfo().getKind() == ABIArgInfo::InAlloca && 1032 !hasScalarEvaluationKind(CurFnInfo->getReturnType())) { 1033 // Load the sret pointer from the argument struct and return into that. 1034 unsigned Idx = CurFnInfo->getReturnInfo().getInAllocaFieldIndex(); 1035 llvm::Function::arg_iterator EI = CurFn->arg_end(); 1036 --EI; 1037 llvm::Value *Addr = Builder.CreateStructGEP(nullptr, &*EI, Idx); 1038 Addr = Builder.CreateAlignedLoad(Addr, getPointerAlign(), "agg.result"); 1039 ReturnValue = Address(Addr, getNaturalTypeAlignment(RetTy)); 1040 } else { 1041 ReturnValue = CreateIRTemp(RetTy, "retval"); 1042 1043 // Tell the epilog emitter to autorelease the result. We do this 1044 // now so that various specialized functions can suppress it 1045 // during their IR-generation. 1046 if (getLangOpts().ObjCAutoRefCount && 1047 !CurFnInfo->isReturnsRetained() && 1048 RetTy->isObjCRetainableType()) 1049 AutoreleaseResult = true; 1050 } 1051 1052 EmitStartEHSpec(CurCodeDecl); 1053 1054 PrologueCleanupDepth = EHStack.stable_begin(); 1055 EmitFunctionProlog(*CurFnInfo, CurFn, Args); 1056 1057 if (D && isa<CXXMethodDecl>(D) && cast<CXXMethodDecl>(D)->isInstance()) { 1058 CGM.getCXXABI().EmitInstanceFunctionProlog(*this); 1059 const CXXMethodDecl *MD = cast<CXXMethodDecl>(D); 1060 if (MD->getParent()->isLambda() && 1061 MD->getOverloadedOperator() == OO_Call) { 1062 // We're in a lambda; figure out the captures. 1063 MD->getParent()->getCaptureFields(LambdaCaptureFields, 1064 LambdaThisCaptureField); 1065 if (LambdaThisCaptureField) { 1066 // If the lambda captures the object referred to by '*this' - either by 1067 // value or by reference, make sure CXXThisValue points to the correct 1068 // object. 1069 1070 // Get the lvalue for the field (which is a copy of the enclosing object 1071 // or contains the address of the enclosing object). 1072 LValue ThisFieldLValue = EmitLValueForLambdaField(LambdaThisCaptureField); 1073 if (!LambdaThisCaptureField->getType()->isPointerType()) { 1074 // If the enclosing object was captured by value, just use its address. 1075 CXXThisValue = ThisFieldLValue.getAddress().getPointer(); 1076 } else { 1077 // Load the lvalue pointed to by the field, since '*this' was captured 1078 // by reference. 1079 CXXThisValue = 1080 EmitLoadOfLValue(ThisFieldLValue, SourceLocation()).getScalarVal(); 1081 } 1082 } 1083 for (auto *FD : MD->getParent()->fields()) { 1084 if (FD->hasCapturedVLAType()) { 1085 auto *ExprArg = EmitLoadOfLValue(EmitLValueForLambdaField(FD), 1086 SourceLocation()).getScalarVal(); 1087 auto VAT = FD->getCapturedVLAType(); 1088 VLASizeMap[VAT->getSizeExpr()] = ExprArg; 1089 } 1090 } 1091 } else { 1092 // Not in a lambda; just use 'this' from the method. 1093 // FIXME: Should we generate a new load for each use of 'this'? The 1094 // fast register allocator would be happier... 1095 CXXThisValue = CXXABIThisValue; 1096 } 1097 1098 // Check the 'this' pointer once per function, if it's available. 1099 if (CXXABIThisValue) { 1100 SanitizerSet SkippedChecks; 1101 SkippedChecks.set(SanitizerKind::ObjectSize, true); 1102 QualType ThisTy = MD->getThisType(getContext()); 1103 1104 // If this is the call operator of a lambda with no capture-default, it 1105 // may have a static invoker function, which may call this operator with 1106 // a null 'this' pointer. 1107 if (isLambdaCallOperator(MD) && 1108 cast<CXXRecordDecl>(MD->getParent())->getLambdaCaptureDefault() == 1109 LCD_None) 1110 SkippedChecks.set(SanitizerKind::Null, true); 1111 1112 EmitTypeCheck(isa<CXXConstructorDecl>(MD) ? TCK_ConstructorCall 1113 : TCK_MemberCall, 1114 Loc, CXXABIThisValue, ThisTy, 1115 getContext().getTypeAlignInChars(ThisTy->getPointeeType()), 1116 SkippedChecks); 1117 } 1118 } 1119 1120 // If any of the arguments have a variably modified type, make sure to 1121 // emit the type size. 1122 for (FunctionArgList::const_iterator i = Args.begin(), e = Args.end(); 1123 i != e; ++i) { 1124 const VarDecl *VD = *i; 1125 1126 // Dig out the type as written from ParmVarDecls; it's unclear whether 1127 // the standard (C99 6.9.1p10) requires this, but we're following the 1128 // precedent set by gcc. 1129 QualType Ty; 1130 if (const ParmVarDecl *PVD = dyn_cast<ParmVarDecl>(VD)) 1131 Ty = PVD->getOriginalType(); 1132 else 1133 Ty = VD->getType(); 1134 1135 if (Ty->isVariablyModifiedType()) 1136 EmitVariablyModifiedType(Ty); 1137 } 1138 // Emit a location at the end of the prologue. 1139 if (CGDebugInfo *DI = getDebugInfo()) 1140 DI->EmitLocation(Builder, StartLoc); 1141 } 1142 1143 void CodeGenFunction::EmitFunctionBody(FunctionArgList &Args, 1144 const Stmt *Body) { 1145 incrementProfileCounter(Body); 1146 if (const CompoundStmt *S = dyn_cast<CompoundStmt>(Body)) 1147 EmitCompoundStmtWithoutScope(*S); 1148 else 1149 EmitStmt(Body); 1150 } 1151 1152 /// When instrumenting to collect profile data, the counts for some blocks 1153 /// such as switch cases need to not include the fall-through counts, so 1154 /// emit a branch around the instrumentation code. When not instrumenting, 1155 /// this just calls EmitBlock(). 1156 void CodeGenFunction::EmitBlockWithFallThrough(llvm::BasicBlock *BB, 1157 const Stmt *S) { 1158 llvm::BasicBlock *SkipCountBB = nullptr; 1159 if (HaveInsertPoint() && CGM.getCodeGenOpts().hasProfileClangInstr()) { 1160 // When instrumenting for profiling, the fallthrough to certain 1161 // statements needs to skip over the instrumentation code so that we 1162 // get an accurate count. 1163 SkipCountBB = createBasicBlock("skipcount"); 1164 EmitBranch(SkipCountBB); 1165 } 1166 EmitBlock(BB); 1167 uint64_t CurrentCount = getCurrentProfileCount(); 1168 incrementProfileCounter(S); 1169 setCurrentProfileCount(getCurrentProfileCount() + CurrentCount); 1170 if (SkipCountBB) 1171 EmitBlock(SkipCountBB); 1172 } 1173 1174 /// Tries to mark the given function nounwind based on the 1175 /// non-existence of any throwing calls within it. We believe this is 1176 /// lightweight enough to do at -O0. 1177 static void TryMarkNoThrow(llvm::Function *F) { 1178 // LLVM treats 'nounwind' on a function as part of the type, so we 1179 // can't do this on functions that can be overwritten. 1180 if (F->isInterposable()) return; 1181 1182 for (llvm::BasicBlock &BB : *F) 1183 for (llvm::Instruction &I : BB) 1184 if (I.mayThrow()) 1185 return; 1186 1187 F->setDoesNotThrow(); 1188 } 1189 1190 QualType CodeGenFunction::BuildFunctionArgList(GlobalDecl GD, 1191 FunctionArgList &Args) { 1192 const FunctionDecl *FD = cast<FunctionDecl>(GD.getDecl()); 1193 QualType ResTy = FD->getReturnType(); 1194 1195 const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(FD); 1196 if (MD && MD->isInstance()) { 1197 if (CGM.getCXXABI().HasThisReturn(GD)) 1198 ResTy = MD->getThisType(getContext()); 1199 else if (CGM.getCXXABI().hasMostDerivedReturn(GD)) 1200 ResTy = CGM.getContext().VoidPtrTy; 1201 CGM.getCXXABI().buildThisParam(*this, Args); 1202 } 1203 1204 // The base version of an inheriting constructor whose constructed base is a 1205 // virtual base is not passed any arguments (because it doesn't actually call 1206 // the inherited constructor). 1207 bool PassedParams = true; 1208 if (const CXXConstructorDecl *CD = dyn_cast<CXXConstructorDecl>(FD)) 1209 if (auto Inherited = CD->getInheritedConstructor()) 1210 PassedParams = 1211 getTypes().inheritingCtorHasParams(Inherited, GD.getCtorType()); 1212 1213 if (PassedParams) { 1214 for (auto *Param : FD->parameters()) { 1215 Args.push_back(Param); 1216 if (!Param->hasAttr<PassObjectSizeAttr>()) 1217 continue; 1218 1219 auto *Implicit = ImplicitParamDecl::Create( 1220 getContext(), Param->getDeclContext(), Param->getLocation(), 1221 /*Id=*/nullptr, getContext().getSizeType(), ImplicitParamDecl::Other); 1222 SizeArguments[Param] = Implicit; 1223 Args.push_back(Implicit); 1224 } 1225 } 1226 1227 if (MD && (isa<CXXConstructorDecl>(MD) || isa<CXXDestructorDecl>(MD))) 1228 CGM.getCXXABI().addImplicitStructorParams(*this, ResTy, Args); 1229 1230 return ResTy; 1231 } 1232 1233 static bool 1234 shouldUseUndefinedBehaviorReturnOptimization(const FunctionDecl *FD, 1235 const ASTContext &Context) { 1236 QualType T = FD->getReturnType(); 1237 // Avoid the optimization for functions that return a record type with a 1238 // trivial destructor or another trivially copyable type. 1239 if (const RecordType *RT = T.getCanonicalType()->getAs<RecordType>()) { 1240 if (const auto *ClassDecl = dyn_cast<CXXRecordDecl>(RT->getDecl())) 1241 return !ClassDecl->hasTrivialDestructor(); 1242 } 1243 return !T.isTriviallyCopyableType(Context); 1244 } 1245 1246 void CodeGenFunction::GenerateCode(GlobalDecl GD, llvm::Function *Fn, 1247 const CGFunctionInfo &FnInfo) { 1248 const FunctionDecl *FD = cast<FunctionDecl>(GD.getDecl()); 1249 CurGD = GD; 1250 1251 FunctionArgList Args; 1252 QualType ResTy = BuildFunctionArgList(GD, Args); 1253 1254 // Check if we should generate debug info for this function. 1255 if (FD->hasAttr<NoDebugAttr>()) 1256 DebugInfo = nullptr; // disable debug info indefinitely for this function 1257 1258 // The function might not have a body if we're generating thunks for a 1259 // function declaration. 1260 SourceRange BodyRange; 1261 if (Stmt *Body = FD->getBody()) 1262 BodyRange = Body->getSourceRange(); 1263 else 1264 BodyRange = FD->getLocation(); 1265 CurEHLocation = BodyRange.getEnd(); 1266 1267 // Use the location of the start of the function to determine where 1268 // the function definition is located. By default use the location 1269 // of the declaration as the location for the subprogram. A function 1270 // may lack a declaration in the source code if it is created by code 1271 // gen. (examples: _GLOBAL__I_a, __cxx_global_array_dtor, thunk). 1272 SourceLocation Loc = FD->getLocation(); 1273 1274 // If this is a function specialization then use the pattern body 1275 // as the location for the function. 1276 if (const FunctionDecl *SpecDecl = FD->getTemplateInstantiationPattern()) 1277 if (SpecDecl->hasBody(SpecDecl)) 1278 Loc = SpecDecl->getLocation(); 1279 1280 Stmt *Body = FD->getBody(); 1281 1282 // Initialize helper which will detect jumps which can cause invalid lifetime 1283 // markers. 1284 if (Body && ShouldEmitLifetimeMarkers) 1285 Bypasses.Init(Body); 1286 1287 // Emit the standard function prologue. 1288 StartFunction(GD, ResTy, Fn, FnInfo, Args, Loc, BodyRange.getBegin()); 1289 1290 // Generate the body of the function. 1291 PGO.assignRegionCounters(GD, CurFn); 1292 if (isa<CXXDestructorDecl>(FD)) 1293 EmitDestructorBody(Args); 1294 else if (isa<CXXConstructorDecl>(FD)) 1295 EmitConstructorBody(Args); 1296 else if (getLangOpts().CUDA && 1297 !getLangOpts().CUDAIsDevice && 1298 FD->hasAttr<CUDAGlobalAttr>()) 1299 CGM.getCUDARuntime().emitDeviceStub(*this, Args); 1300 else if (isa<CXXMethodDecl>(FD) && 1301 cast<CXXMethodDecl>(FD)->isLambdaStaticInvoker()) { 1302 // The lambda static invoker function is special, because it forwards or 1303 // clones the body of the function call operator (but is actually static). 1304 EmitLambdaStaticInvokeBody(cast<CXXMethodDecl>(FD)); 1305 } else if (FD->isDefaulted() && isa<CXXMethodDecl>(FD) && 1306 (cast<CXXMethodDecl>(FD)->isCopyAssignmentOperator() || 1307 cast<CXXMethodDecl>(FD)->isMoveAssignmentOperator())) { 1308 // Implicit copy-assignment gets the same special treatment as implicit 1309 // copy-constructors. 1310 emitImplicitAssignmentOperatorBody(Args); 1311 } else if (Body) { 1312 EmitFunctionBody(Args, Body); 1313 } else 1314 llvm_unreachable("no definition for emitted function"); 1315 1316 // C++11 [stmt.return]p2: 1317 // Flowing off the end of a function [...] results in undefined behavior in 1318 // a value-returning function. 1319 // C11 6.9.1p12: 1320 // If the '}' that terminates a function is reached, and the value of the 1321 // function call is used by the caller, the behavior is undefined. 1322 if (getLangOpts().CPlusPlus && !FD->hasImplicitReturnZero() && !SawAsmBlock && 1323 !FD->getReturnType()->isVoidType() && Builder.GetInsertBlock()) { 1324 bool ShouldEmitUnreachable = 1325 CGM.getCodeGenOpts().StrictReturn || 1326 shouldUseUndefinedBehaviorReturnOptimization(FD, getContext()); 1327 if (SanOpts.has(SanitizerKind::Return)) { 1328 SanitizerScope SanScope(this); 1329 llvm::Value *IsFalse = Builder.getFalse(); 1330 EmitCheck(std::make_pair(IsFalse, SanitizerKind::Return), 1331 SanitizerHandler::MissingReturn, 1332 EmitCheckSourceLocation(FD->getLocation()), None); 1333 } else if (ShouldEmitUnreachable) { 1334 if (CGM.getCodeGenOpts().OptimizationLevel == 0) 1335 EmitTrapCall(llvm::Intrinsic::trap); 1336 } 1337 if (SanOpts.has(SanitizerKind::Return) || ShouldEmitUnreachable) { 1338 Builder.CreateUnreachable(); 1339 Builder.ClearInsertionPoint(); 1340 } 1341 } 1342 1343 // Emit the standard function epilogue. 1344 FinishFunction(BodyRange.getEnd()); 1345 1346 // If we haven't marked the function nothrow through other means, do 1347 // a quick pass now to see if we can. 1348 if (!CurFn->doesNotThrow()) 1349 TryMarkNoThrow(CurFn); 1350 } 1351 1352 /// ContainsLabel - Return true if the statement contains a label in it. If 1353 /// this statement is not executed normally, it not containing a label means 1354 /// that we can just remove the code. 1355 bool CodeGenFunction::ContainsLabel(const Stmt *S, bool IgnoreCaseStmts) { 1356 // Null statement, not a label! 1357 if (!S) return false; 1358 1359 // If this is a label, we have to emit the code, consider something like: 1360 // if (0) { ... foo: bar(); } goto foo; 1361 // 1362 // TODO: If anyone cared, we could track __label__'s, since we know that you 1363 // can't jump to one from outside their declared region. 1364 if (isa<LabelStmt>(S)) 1365 return true; 1366 1367 // If this is a case/default statement, and we haven't seen a switch, we have 1368 // to emit the code. 1369 if (isa<SwitchCase>(S) && !IgnoreCaseStmts) 1370 return true; 1371 1372 // If this is a switch statement, we want to ignore cases below it. 1373 if (isa<SwitchStmt>(S)) 1374 IgnoreCaseStmts = true; 1375 1376 // Scan subexpressions for verboten labels. 1377 for (const Stmt *SubStmt : S->children()) 1378 if (ContainsLabel(SubStmt, IgnoreCaseStmts)) 1379 return true; 1380 1381 return false; 1382 } 1383 1384 /// containsBreak - Return true if the statement contains a break out of it. 1385 /// If the statement (recursively) contains a switch or loop with a break 1386 /// inside of it, this is fine. 1387 bool CodeGenFunction::containsBreak(const Stmt *S) { 1388 // Null statement, not a label! 1389 if (!S) return false; 1390 1391 // If this is a switch or loop that defines its own break scope, then we can 1392 // include it and anything inside of it. 1393 if (isa<SwitchStmt>(S) || isa<WhileStmt>(S) || isa<DoStmt>(S) || 1394 isa<ForStmt>(S)) 1395 return false; 1396 1397 if (isa<BreakStmt>(S)) 1398 return true; 1399 1400 // Scan subexpressions for verboten breaks. 1401 for (const Stmt *SubStmt : S->children()) 1402 if (containsBreak(SubStmt)) 1403 return true; 1404 1405 return false; 1406 } 1407 1408 bool CodeGenFunction::mightAddDeclToScope(const Stmt *S) { 1409 if (!S) return false; 1410 1411 // Some statement kinds add a scope and thus never add a decl to the current 1412 // scope. Note, this list is longer than the list of statements that might 1413 // have an unscoped decl nested within them, but this way is conservatively 1414 // correct even if more statement kinds are added. 1415 if (isa<IfStmt>(S) || isa<SwitchStmt>(S) || isa<WhileStmt>(S) || 1416 isa<DoStmt>(S) || isa<ForStmt>(S) || isa<CompoundStmt>(S) || 1417 isa<CXXForRangeStmt>(S) || isa<CXXTryStmt>(S) || 1418 isa<ObjCForCollectionStmt>(S) || isa<ObjCAtTryStmt>(S)) 1419 return false; 1420 1421 if (isa<DeclStmt>(S)) 1422 return true; 1423 1424 for (const Stmt *SubStmt : S->children()) 1425 if (mightAddDeclToScope(SubStmt)) 1426 return true; 1427 1428 return false; 1429 } 1430 1431 /// ConstantFoldsToSimpleInteger - If the specified expression does not fold 1432 /// to a constant, or if it does but contains a label, return false. If it 1433 /// constant folds return true and set the boolean result in Result. 1434 bool CodeGenFunction::ConstantFoldsToSimpleInteger(const Expr *Cond, 1435 bool &ResultBool, 1436 bool AllowLabels) { 1437 llvm::APSInt ResultInt; 1438 if (!ConstantFoldsToSimpleInteger(Cond, ResultInt, AllowLabels)) 1439 return false; 1440 1441 ResultBool = ResultInt.getBoolValue(); 1442 return true; 1443 } 1444 1445 /// ConstantFoldsToSimpleInteger - If the specified expression does not fold 1446 /// to a constant, or if it does but contains a label, return false. If it 1447 /// constant folds return true and set the folded value. 1448 bool CodeGenFunction::ConstantFoldsToSimpleInteger(const Expr *Cond, 1449 llvm::APSInt &ResultInt, 1450 bool AllowLabels) { 1451 // FIXME: Rename and handle conversion of other evaluatable things 1452 // to bool. 1453 llvm::APSInt Int; 1454 if (!Cond->EvaluateAsInt(Int, getContext())) 1455 return false; // Not foldable, not integer or not fully evaluatable. 1456 1457 if (!AllowLabels && CodeGenFunction::ContainsLabel(Cond)) 1458 return false; // Contains a label. 1459 1460 ResultInt = Int; 1461 return true; 1462 } 1463 1464 1465 1466 /// EmitBranchOnBoolExpr - Emit a branch on a boolean condition (e.g. for an if 1467 /// statement) to the specified blocks. Based on the condition, this might try 1468 /// to simplify the codegen of the conditional based on the branch. 1469 /// 1470 void CodeGenFunction::EmitBranchOnBoolExpr(const Expr *Cond, 1471 llvm::BasicBlock *TrueBlock, 1472 llvm::BasicBlock *FalseBlock, 1473 uint64_t TrueCount) { 1474 Cond = Cond->IgnoreParens(); 1475 1476 if (const BinaryOperator *CondBOp = dyn_cast<BinaryOperator>(Cond)) { 1477 1478 // Handle X && Y in a condition. 1479 if (CondBOp->getOpcode() == BO_LAnd) { 1480 // If we have "1 && X", simplify the code. "0 && X" would have constant 1481 // folded if the case was simple enough. 1482 bool ConstantBool = false; 1483 if (ConstantFoldsToSimpleInteger(CondBOp->getLHS(), ConstantBool) && 1484 ConstantBool) { 1485 // br(1 && X) -> br(X). 1486 incrementProfileCounter(CondBOp); 1487 return EmitBranchOnBoolExpr(CondBOp->getRHS(), TrueBlock, FalseBlock, 1488 TrueCount); 1489 } 1490 1491 // If we have "X && 1", simplify the code to use an uncond branch. 1492 // "X && 0" would have been constant folded to 0. 1493 if (ConstantFoldsToSimpleInteger(CondBOp->getRHS(), ConstantBool) && 1494 ConstantBool) { 1495 // br(X && 1) -> br(X). 1496 return EmitBranchOnBoolExpr(CondBOp->getLHS(), TrueBlock, FalseBlock, 1497 TrueCount); 1498 } 1499 1500 // Emit the LHS as a conditional. If the LHS conditional is false, we 1501 // want to jump to the FalseBlock. 1502 llvm::BasicBlock *LHSTrue = createBasicBlock("land.lhs.true"); 1503 // The counter tells us how often we evaluate RHS, and all of TrueCount 1504 // can be propagated to that branch. 1505 uint64_t RHSCount = getProfileCount(CondBOp->getRHS()); 1506 1507 ConditionalEvaluation eval(*this); 1508 { 1509 ApplyDebugLocation DL(*this, Cond); 1510 EmitBranchOnBoolExpr(CondBOp->getLHS(), LHSTrue, FalseBlock, RHSCount); 1511 EmitBlock(LHSTrue); 1512 } 1513 1514 incrementProfileCounter(CondBOp); 1515 setCurrentProfileCount(getProfileCount(CondBOp->getRHS())); 1516 1517 // Any temporaries created here are conditional. 1518 eval.begin(*this); 1519 EmitBranchOnBoolExpr(CondBOp->getRHS(), TrueBlock, FalseBlock, TrueCount); 1520 eval.end(*this); 1521 1522 return; 1523 } 1524 1525 if (CondBOp->getOpcode() == BO_LOr) { 1526 // If we have "0 || X", simplify the code. "1 || X" would have constant 1527 // folded if the case was simple enough. 1528 bool ConstantBool = false; 1529 if (ConstantFoldsToSimpleInteger(CondBOp->getLHS(), ConstantBool) && 1530 !ConstantBool) { 1531 // br(0 || X) -> br(X). 1532 incrementProfileCounter(CondBOp); 1533 return EmitBranchOnBoolExpr(CondBOp->getRHS(), TrueBlock, FalseBlock, 1534 TrueCount); 1535 } 1536 1537 // If we have "X || 0", simplify the code to use an uncond branch. 1538 // "X || 1" would have been constant folded to 1. 1539 if (ConstantFoldsToSimpleInteger(CondBOp->getRHS(), ConstantBool) && 1540 !ConstantBool) { 1541 // br(X || 0) -> br(X). 1542 return EmitBranchOnBoolExpr(CondBOp->getLHS(), TrueBlock, FalseBlock, 1543 TrueCount); 1544 } 1545 1546 // Emit the LHS as a conditional. If the LHS conditional is true, we 1547 // want to jump to the TrueBlock. 1548 llvm::BasicBlock *LHSFalse = createBasicBlock("lor.lhs.false"); 1549 // We have the count for entry to the RHS and for the whole expression 1550 // being true, so we can divy up True count between the short circuit and 1551 // the RHS. 1552 uint64_t LHSCount = 1553 getCurrentProfileCount() - getProfileCount(CondBOp->getRHS()); 1554 uint64_t RHSCount = TrueCount - LHSCount; 1555 1556 ConditionalEvaluation eval(*this); 1557 { 1558 ApplyDebugLocation DL(*this, Cond); 1559 EmitBranchOnBoolExpr(CondBOp->getLHS(), TrueBlock, LHSFalse, LHSCount); 1560 EmitBlock(LHSFalse); 1561 } 1562 1563 incrementProfileCounter(CondBOp); 1564 setCurrentProfileCount(getProfileCount(CondBOp->getRHS())); 1565 1566 // Any temporaries created here are conditional. 1567 eval.begin(*this); 1568 EmitBranchOnBoolExpr(CondBOp->getRHS(), TrueBlock, FalseBlock, RHSCount); 1569 1570 eval.end(*this); 1571 1572 return; 1573 } 1574 } 1575 1576 if (const UnaryOperator *CondUOp = dyn_cast<UnaryOperator>(Cond)) { 1577 // br(!x, t, f) -> br(x, f, t) 1578 if (CondUOp->getOpcode() == UO_LNot) { 1579 // Negate the count. 1580 uint64_t FalseCount = getCurrentProfileCount() - TrueCount; 1581 // Negate the condition and swap the destination blocks. 1582 return EmitBranchOnBoolExpr(CondUOp->getSubExpr(), FalseBlock, TrueBlock, 1583 FalseCount); 1584 } 1585 } 1586 1587 if (const ConditionalOperator *CondOp = dyn_cast<ConditionalOperator>(Cond)) { 1588 // br(c ? x : y, t, f) -> br(c, br(x, t, f), br(y, t, f)) 1589 llvm::BasicBlock *LHSBlock = createBasicBlock("cond.true"); 1590 llvm::BasicBlock *RHSBlock = createBasicBlock("cond.false"); 1591 1592 ConditionalEvaluation cond(*this); 1593 EmitBranchOnBoolExpr(CondOp->getCond(), LHSBlock, RHSBlock, 1594 getProfileCount(CondOp)); 1595 1596 // When computing PGO branch weights, we only know the overall count for 1597 // the true block. This code is essentially doing tail duplication of the 1598 // naive code-gen, introducing new edges for which counts are not 1599 // available. Divide the counts proportionally between the LHS and RHS of 1600 // the conditional operator. 1601 uint64_t LHSScaledTrueCount = 0; 1602 if (TrueCount) { 1603 double LHSRatio = 1604 getProfileCount(CondOp) / (double)getCurrentProfileCount(); 1605 LHSScaledTrueCount = TrueCount * LHSRatio; 1606 } 1607 1608 cond.begin(*this); 1609 EmitBlock(LHSBlock); 1610 incrementProfileCounter(CondOp); 1611 { 1612 ApplyDebugLocation DL(*this, Cond); 1613 EmitBranchOnBoolExpr(CondOp->getLHS(), TrueBlock, FalseBlock, 1614 LHSScaledTrueCount); 1615 } 1616 cond.end(*this); 1617 1618 cond.begin(*this); 1619 EmitBlock(RHSBlock); 1620 EmitBranchOnBoolExpr(CondOp->getRHS(), TrueBlock, FalseBlock, 1621 TrueCount - LHSScaledTrueCount); 1622 cond.end(*this); 1623 1624 return; 1625 } 1626 1627 if (const CXXThrowExpr *Throw = dyn_cast<CXXThrowExpr>(Cond)) { 1628 // Conditional operator handling can give us a throw expression as a 1629 // condition for a case like: 1630 // br(c ? throw x : y, t, f) -> br(c, br(throw x, t, f), br(y, t, f) 1631 // Fold this to: 1632 // br(c, throw x, br(y, t, f)) 1633 EmitCXXThrowExpr(Throw, /*KeepInsertionPoint*/false); 1634 return; 1635 } 1636 1637 // If the branch has a condition wrapped by __builtin_unpredictable, 1638 // create metadata that specifies that the branch is unpredictable. 1639 // Don't bother if not optimizing because that metadata would not be used. 1640 llvm::MDNode *Unpredictable = nullptr; 1641 auto *Call = dyn_cast<CallExpr>(Cond); 1642 if (Call && CGM.getCodeGenOpts().OptimizationLevel != 0) { 1643 auto *FD = dyn_cast_or_null<FunctionDecl>(Call->getCalleeDecl()); 1644 if (FD && FD->getBuiltinID() == Builtin::BI__builtin_unpredictable) { 1645 llvm::MDBuilder MDHelper(getLLVMContext()); 1646 Unpredictable = MDHelper.createUnpredictable(); 1647 } 1648 } 1649 1650 // Create branch weights based on the number of times we get here and the 1651 // number of times the condition should be true. 1652 uint64_t CurrentCount = std::max(getCurrentProfileCount(), TrueCount); 1653 llvm::MDNode *Weights = 1654 createProfileWeights(TrueCount, CurrentCount - TrueCount); 1655 1656 // Emit the code with the fully general case. 1657 llvm::Value *CondV; 1658 { 1659 ApplyDebugLocation DL(*this, Cond); 1660 CondV = EvaluateExprAsBool(Cond); 1661 } 1662 Builder.CreateCondBr(CondV, TrueBlock, FalseBlock, Weights, Unpredictable); 1663 } 1664 1665 /// ErrorUnsupported - Print out an error that codegen doesn't support the 1666 /// specified stmt yet. 1667 void CodeGenFunction::ErrorUnsupported(const Stmt *S, const char *Type) { 1668 CGM.ErrorUnsupported(S, Type); 1669 } 1670 1671 /// emitNonZeroVLAInit - Emit the "zero" initialization of a 1672 /// variable-length array whose elements have a non-zero bit-pattern. 1673 /// 1674 /// \param baseType the inner-most element type of the array 1675 /// \param src - a char* pointing to the bit-pattern for a single 1676 /// base element of the array 1677 /// \param sizeInChars - the total size of the VLA, in chars 1678 static void emitNonZeroVLAInit(CodeGenFunction &CGF, QualType baseType, 1679 Address dest, Address src, 1680 llvm::Value *sizeInChars) { 1681 CGBuilderTy &Builder = CGF.Builder; 1682 1683 CharUnits baseSize = CGF.getContext().getTypeSizeInChars(baseType); 1684 llvm::Value *baseSizeInChars 1685 = llvm::ConstantInt::get(CGF.IntPtrTy, baseSize.getQuantity()); 1686 1687 Address begin = 1688 Builder.CreateElementBitCast(dest, CGF.Int8Ty, "vla.begin"); 1689 llvm::Value *end = 1690 Builder.CreateInBoundsGEP(begin.getPointer(), sizeInChars, "vla.end"); 1691 1692 llvm::BasicBlock *originBB = CGF.Builder.GetInsertBlock(); 1693 llvm::BasicBlock *loopBB = CGF.createBasicBlock("vla-init.loop"); 1694 llvm::BasicBlock *contBB = CGF.createBasicBlock("vla-init.cont"); 1695 1696 // Make a loop over the VLA. C99 guarantees that the VLA element 1697 // count must be nonzero. 1698 CGF.EmitBlock(loopBB); 1699 1700 llvm::PHINode *cur = Builder.CreatePHI(begin.getType(), 2, "vla.cur"); 1701 cur->addIncoming(begin.getPointer(), originBB); 1702 1703 CharUnits curAlign = 1704 dest.getAlignment().alignmentOfArrayElement(baseSize); 1705 1706 // memcpy the individual element bit-pattern. 1707 Builder.CreateMemCpy(Address(cur, curAlign), src, baseSizeInChars, 1708 /*volatile*/ false); 1709 1710 // Go to the next element. 1711 llvm::Value *next = 1712 Builder.CreateInBoundsGEP(CGF.Int8Ty, cur, baseSizeInChars, "vla.next"); 1713 1714 // Leave if that's the end of the VLA. 1715 llvm::Value *done = Builder.CreateICmpEQ(next, end, "vla-init.isdone"); 1716 Builder.CreateCondBr(done, contBB, loopBB); 1717 cur->addIncoming(next, loopBB); 1718 1719 CGF.EmitBlock(contBB); 1720 } 1721 1722 void 1723 CodeGenFunction::EmitNullInitialization(Address DestPtr, QualType Ty) { 1724 // Ignore empty classes in C++. 1725 if (getLangOpts().CPlusPlus) { 1726 if (const RecordType *RT = Ty->getAs<RecordType>()) { 1727 if (cast<CXXRecordDecl>(RT->getDecl())->isEmpty()) 1728 return; 1729 } 1730 } 1731 1732 // Cast the dest ptr to the appropriate i8 pointer type. 1733 if (DestPtr.getElementType() != Int8Ty) 1734 DestPtr = Builder.CreateElementBitCast(DestPtr, Int8Ty); 1735 1736 // Get size and alignment info for this aggregate. 1737 CharUnits size = getContext().getTypeSizeInChars(Ty); 1738 1739 llvm::Value *SizeVal; 1740 const VariableArrayType *vla; 1741 1742 // Don't bother emitting a zero-byte memset. 1743 if (size.isZero()) { 1744 // But note that getTypeInfo returns 0 for a VLA. 1745 if (const VariableArrayType *vlaType = 1746 dyn_cast_or_null<VariableArrayType>( 1747 getContext().getAsArrayType(Ty))) { 1748 QualType eltType; 1749 llvm::Value *numElts; 1750 std::tie(numElts, eltType) = getVLASize(vlaType); 1751 1752 SizeVal = numElts; 1753 CharUnits eltSize = getContext().getTypeSizeInChars(eltType); 1754 if (!eltSize.isOne()) 1755 SizeVal = Builder.CreateNUWMul(SizeVal, CGM.getSize(eltSize)); 1756 vla = vlaType; 1757 } else { 1758 return; 1759 } 1760 } else { 1761 SizeVal = CGM.getSize(size); 1762 vla = nullptr; 1763 } 1764 1765 // If the type contains a pointer to data member we can't memset it to zero. 1766 // Instead, create a null constant and copy it to the destination. 1767 // TODO: there are other patterns besides zero that we can usefully memset, 1768 // like -1, which happens to be the pattern used by member-pointers. 1769 if (!CGM.getTypes().isZeroInitializable(Ty)) { 1770 // For a VLA, emit a single element, then splat that over the VLA. 1771 if (vla) Ty = getContext().getBaseElementType(vla); 1772 1773 llvm::Constant *NullConstant = CGM.EmitNullConstant(Ty); 1774 1775 llvm::GlobalVariable *NullVariable = 1776 new llvm::GlobalVariable(CGM.getModule(), NullConstant->getType(), 1777 /*isConstant=*/true, 1778 llvm::GlobalVariable::PrivateLinkage, 1779 NullConstant, Twine()); 1780 CharUnits NullAlign = DestPtr.getAlignment(); 1781 NullVariable->setAlignment(NullAlign.getQuantity()); 1782 Address SrcPtr(Builder.CreateBitCast(NullVariable, Builder.getInt8PtrTy()), 1783 NullAlign); 1784 1785 if (vla) return emitNonZeroVLAInit(*this, Ty, DestPtr, SrcPtr, SizeVal); 1786 1787 // Get and call the appropriate llvm.memcpy overload. 1788 Builder.CreateMemCpy(DestPtr, SrcPtr, SizeVal, false); 1789 return; 1790 } 1791 1792 // Otherwise, just memset the whole thing to zero. This is legal 1793 // because in LLVM, all default initializers (other than the ones we just 1794 // handled above) are guaranteed to have a bit pattern of all zeros. 1795 Builder.CreateMemSet(DestPtr, Builder.getInt8(0), SizeVal, false); 1796 } 1797 1798 llvm::BlockAddress *CodeGenFunction::GetAddrOfLabel(const LabelDecl *L) { 1799 // Make sure that there is a block for the indirect goto. 1800 if (!IndirectBranch) 1801 GetIndirectGotoBlock(); 1802 1803 llvm::BasicBlock *BB = getJumpDestForLabel(L).getBlock(); 1804 1805 // Make sure the indirect branch includes all of the address-taken blocks. 1806 IndirectBranch->addDestination(BB); 1807 return llvm::BlockAddress::get(CurFn, BB); 1808 } 1809 1810 llvm::BasicBlock *CodeGenFunction::GetIndirectGotoBlock() { 1811 // If we already made the indirect branch for indirect goto, return its block. 1812 if (IndirectBranch) return IndirectBranch->getParent(); 1813 1814 CGBuilderTy TmpBuilder(*this, createBasicBlock("indirectgoto")); 1815 1816 // Create the PHI node that indirect gotos will add entries to. 1817 llvm::Value *DestVal = TmpBuilder.CreatePHI(Int8PtrTy, 0, 1818 "indirect.goto.dest"); 1819 1820 // Create the indirect branch instruction. 1821 IndirectBranch = TmpBuilder.CreateIndirectBr(DestVal); 1822 return IndirectBranch->getParent(); 1823 } 1824 1825 /// Computes the length of an array in elements, as well as the base 1826 /// element type and a properly-typed first element pointer. 1827 llvm::Value *CodeGenFunction::emitArrayLength(const ArrayType *origArrayType, 1828 QualType &baseType, 1829 Address &addr) { 1830 const ArrayType *arrayType = origArrayType; 1831 1832 // If it's a VLA, we have to load the stored size. Note that 1833 // this is the size of the VLA in bytes, not its size in elements. 1834 llvm::Value *numVLAElements = nullptr; 1835 if (isa<VariableArrayType>(arrayType)) { 1836 numVLAElements = getVLASize(cast<VariableArrayType>(arrayType)).first; 1837 1838 // Walk into all VLAs. This doesn't require changes to addr, 1839 // which has type T* where T is the first non-VLA element type. 1840 do { 1841 QualType elementType = arrayType->getElementType(); 1842 arrayType = getContext().getAsArrayType(elementType); 1843 1844 // If we only have VLA components, 'addr' requires no adjustment. 1845 if (!arrayType) { 1846 baseType = elementType; 1847 return numVLAElements; 1848 } 1849 } while (isa<VariableArrayType>(arrayType)); 1850 1851 // We get out here only if we find a constant array type 1852 // inside the VLA. 1853 } 1854 1855 // We have some number of constant-length arrays, so addr should 1856 // have LLVM type [M x [N x [...]]]*. Build a GEP that walks 1857 // down to the first element of addr. 1858 SmallVector<llvm::Value*, 8> gepIndices; 1859 1860 // GEP down to the array type. 1861 llvm::ConstantInt *zero = Builder.getInt32(0); 1862 gepIndices.push_back(zero); 1863 1864 uint64_t countFromCLAs = 1; 1865 QualType eltType; 1866 1867 llvm::ArrayType *llvmArrayType = 1868 dyn_cast<llvm::ArrayType>(addr.getElementType()); 1869 while (llvmArrayType) { 1870 assert(isa<ConstantArrayType>(arrayType)); 1871 assert(cast<ConstantArrayType>(arrayType)->getSize().getZExtValue() 1872 == llvmArrayType->getNumElements()); 1873 1874 gepIndices.push_back(zero); 1875 countFromCLAs *= llvmArrayType->getNumElements(); 1876 eltType = arrayType->getElementType(); 1877 1878 llvmArrayType = 1879 dyn_cast<llvm::ArrayType>(llvmArrayType->getElementType()); 1880 arrayType = getContext().getAsArrayType(arrayType->getElementType()); 1881 assert((!llvmArrayType || arrayType) && 1882 "LLVM and Clang types are out-of-synch"); 1883 } 1884 1885 if (arrayType) { 1886 // From this point onwards, the Clang array type has been emitted 1887 // as some other type (probably a packed struct). Compute the array 1888 // size, and just emit the 'begin' expression as a bitcast. 1889 while (arrayType) { 1890 countFromCLAs *= 1891 cast<ConstantArrayType>(arrayType)->getSize().getZExtValue(); 1892 eltType = arrayType->getElementType(); 1893 arrayType = getContext().getAsArrayType(eltType); 1894 } 1895 1896 llvm::Type *baseType = ConvertType(eltType); 1897 addr = Builder.CreateElementBitCast(addr, baseType, "array.begin"); 1898 } else { 1899 // Create the actual GEP. 1900 addr = Address(Builder.CreateInBoundsGEP(addr.getPointer(), 1901 gepIndices, "array.begin"), 1902 addr.getAlignment()); 1903 } 1904 1905 baseType = eltType; 1906 1907 llvm::Value *numElements 1908 = llvm::ConstantInt::get(SizeTy, countFromCLAs); 1909 1910 // If we had any VLA dimensions, factor them in. 1911 if (numVLAElements) 1912 numElements = Builder.CreateNUWMul(numVLAElements, numElements); 1913 1914 return numElements; 1915 } 1916 1917 std::pair<llvm::Value*, QualType> 1918 CodeGenFunction::getVLASize(QualType type) { 1919 const VariableArrayType *vla = getContext().getAsVariableArrayType(type); 1920 assert(vla && "type was not a variable array type!"); 1921 return getVLASize(vla); 1922 } 1923 1924 std::pair<llvm::Value*, QualType> 1925 CodeGenFunction::getVLASize(const VariableArrayType *type) { 1926 // The number of elements so far; always size_t. 1927 llvm::Value *numElements = nullptr; 1928 1929 QualType elementType; 1930 do { 1931 elementType = type->getElementType(); 1932 llvm::Value *vlaSize = VLASizeMap[type->getSizeExpr()]; 1933 assert(vlaSize && "no size for VLA!"); 1934 assert(vlaSize->getType() == SizeTy); 1935 1936 if (!numElements) { 1937 numElements = vlaSize; 1938 } else { 1939 // It's undefined behavior if this wraps around, so mark it that way. 1940 // FIXME: Teach -fsanitize=undefined to trap this. 1941 numElements = Builder.CreateNUWMul(numElements, vlaSize); 1942 } 1943 } while ((type = getContext().getAsVariableArrayType(elementType))); 1944 1945 return std::pair<llvm::Value*,QualType>(numElements, elementType); 1946 } 1947 1948 void CodeGenFunction::EmitVariablyModifiedType(QualType type) { 1949 assert(type->isVariablyModifiedType() && 1950 "Must pass variably modified type to EmitVLASizes!"); 1951 1952 EnsureInsertPoint(); 1953 1954 // We're going to walk down into the type and look for VLA 1955 // expressions. 1956 do { 1957 assert(type->isVariablyModifiedType()); 1958 1959 const Type *ty = type.getTypePtr(); 1960 switch (ty->getTypeClass()) { 1961 1962 #define TYPE(Class, Base) 1963 #define ABSTRACT_TYPE(Class, Base) 1964 #define NON_CANONICAL_TYPE(Class, Base) 1965 #define DEPENDENT_TYPE(Class, Base) case Type::Class: 1966 #define NON_CANONICAL_UNLESS_DEPENDENT_TYPE(Class, Base) 1967 #include "clang/AST/TypeNodes.def" 1968 llvm_unreachable("unexpected dependent type!"); 1969 1970 // These types are never variably-modified. 1971 case Type::Builtin: 1972 case Type::Complex: 1973 case Type::Vector: 1974 case Type::ExtVector: 1975 case Type::Record: 1976 case Type::Enum: 1977 case Type::Elaborated: 1978 case Type::TemplateSpecialization: 1979 case Type::ObjCTypeParam: 1980 case Type::ObjCObject: 1981 case Type::ObjCInterface: 1982 case Type::ObjCObjectPointer: 1983 llvm_unreachable("type class is never variably-modified!"); 1984 1985 case Type::Adjusted: 1986 type = cast<AdjustedType>(ty)->getAdjustedType(); 1987 break; 1988 1989 case Type::Decayed: 1990 type = cast<DecayedType>(ty)->getPointeeType(); 1991 break; 1992 1993 case Type::Pointer: 1994 type = cast<PointerType>(ty)->getPointeeType(); 1995 break; 1996 1997 case Type::BlockPointer: 1998 type = cast<BlockPointerType>(ty)->getPointeeType(); 1999 break; 2000 2001 case Type::LValueReference: 2002 case Type::RValueReference: 2003 type = cast<ReferenceType>(ty)->getPointeeType(); 2004 break; 2005 2006 case Type::MemberPointer: 2007 type = cast<MemberPointerType>(ty)->getPointeeType(); 2008 break; 2009 2010 case Type::ConstantArray: 2011 case Type::IncompleteArray: 2012 // Losing element qualification here is fine. 2013 type = cast<ArrayType>(ty)->getElementType(); 2014 break; 2015 2016 case Type::VariableArray: { 2017 // Losing element qualification here is fine. 2018 const VariableArrayType *vat = cast<VariableArrayType>(ty); 2019 2020 // Unknown size indication requires no size computation. 2021 // Otherwise, evaluate and record it. 2022 if (const Expr *size = vat->getSizeExpr()) { 2023 // It's possible that we might have emitted this already, 2024 // e.g. with a typedef and a pointer to it. 2025 llvm::Value *&entry = VLASizeMap[size]; 2026 if (!entry) { 2027 llvm::Value *Size = EmitScalarExpr(size); 2028 2029 // C11 6.7.6.2p5: 2030 // If the size is an expression that is not an integer constant 2031 // expression [...] each time it is evaluated it shall have a value 2032 // greater than zero. 2033 if (SanOpts.has(SanitizerKind::VLABound) && 2034 size->getType()->isSignedIntegerType()) { 2035 SanitizerScope SanScope(this); 2036 llvm::Value *Zero = llvm::Constant::getNullValue(Size->getType()); 2037 llvm::Constant *StaticArgs[] = { 2038 EmitCheckSourceLocation(size->getLocStart()), 2039 EmitCheckTypeDescriptor(size->getType()) 2040 }; 2041 EmitCheck(std::make_pair(Builder.CreateICmpSGT(Size, Zero), 2042 SanitizerKind::VLABound), 2043 SanitizerHandler::VLABoundNotPositive, StaticArgs, Size); 2044 } 2045 2046 // Always zexting here would be wrong if it weren't 2047 // undefined behavior to have a negative bound. 2048 entry = Builder.CreateIntCast(Size, SizeTy, /*signed*/ false); 2049 } 2050 } 2051 type = vat->getElementType(); 2052 break; 2053 } 2054 2055 case Type::FunctionProto: 2056 case Type::FunctionNoProto: 2057 type = cast<FunctionType>(ty)->getReturnType(); 2058 break; 2059 2060 case Type::Paren: 2061 case Type::TypeOf: 2062 case Type::UnaryTransform: 2063 case Type::Attributed: 2064 case Type::SubstTemplateTypeParm: 2065 case Type::PackExpansion: 2066 // Keep walking after single level desugaring. 2067 type = type.getSingleStepDesugaredType(getContext()); 2068 break; 2069 2070 case Type::Typedef: 2071 case Type::Decltype: 2072 case Type::Auto: 2073 case Type::DeducedTemplateSpecialization: 2074 // Stop walking: nothing to do. 2075 return; 2076 2077 case Type::TypeOfExpr: 2078 // Stop walking: emit typeof expression. 2079 EmitIgnoredExpr(cast<TypeOfExprType>(ty)->getUnderlyingExpr()); 2080 return; 2081 2082 case Type::Atomic: 2083 type = cast<AtomicType>(ty)->getValueType(); 2084 break; 2085 2086 case Type::Pipe: 2087 type = cast<PipeType>(ty)->getElementType(); 2088 break; 2089 } 2090 } while (type->isVariablyModifiedType()); 2091 } 2092 2093 Address CodeGenFunction::EmitVAListRef(const Expr* E) { 2094 if (getContext().getBuiltinVaListType()->isArrayType()) 2095 return EmitPointerWithAlignment(E); 2096 return EmitLValue(E).getAddress(); 2097 } 2098 2099 Address CodeGenFunction::EmitMSVAListRef(const Expr *E) { 2100 return EmitLValue(E).getAddress(); 2101 } 2102 2103 void CodeGenFunction::EmitDeclRefExprDbgValue(const DeclRefExpr *E, 2104 const APValue &Init) { 2105 assert(!Init.isUninit() && "Invalid DeclRefExpr initializer!"); 2106 if (CGDebugInfo *Dbg = getDebugInfo()) 2107 if (CGM.getCodeGenOpts().getDebugInfo() >= codegenoptions::LimitedDebugInfo) 2108 Dbg->EmitGlobalVariable(E->getDecl(), Init); 2109 } 2110 2111 CodeGenFunction::PeepholeProtection 2112 CodeGenFunction::protectFromPeepholes(RValue rvalue) { 2113 // At the moment, the only aggressive peephole we do in IR gen 2114 // is trunc(zext) folding, but if we add more, we can easily 2115 // extend this protection. 2116 2117 if (!rvalue.isScalar()) return PeepholeProtection(); 2118 llvm::Value *value = rvalue.getScalarVal(); 2119 if (!isa<llvm::ZExtInst>(value)) return PeepholeProtection(); 2120 2121 // Just make an extra bitcast. 2122 assert(HaveInsertPoint()); 2123 llvm::Instruction *inst = new llvm::BitCastInst(value, value->getType(), "", 2124 Builder.GetInsertBlock()); 2125 2126 PeepholeProtection protection; 2127 protection.Inst = inst; 2128 return protection; 2129 } 2130 2131 void CodeGenFunction::unprotectFromPeepholes(PeepholeProtection protection) { 2132 if (!protection.Inst) return; 2133 2134 // In theory, we could try to duplicate the peepholes now, but whatever. 2135 protection.Inst->eraseFromParent(); 2136 } 2137 2138 llvm::Value *CodeGenFunction::EmitAnnotationCall(llvm::Value *AnnotationFn, 2139 llvm::Value *AnnotatedVal, 2140 StringRef AnnotationStr, 2141 SourceLocation Location) { 2142 llvm::Value *Args[4] = { 2143 AnnotatedVal, 2144 Builder.CreateBitCast(CGM.EmitAnnotationString(AnnotationStr), Int8PtrTy), 2145 Builder.CreateBitCast(CGM.EmitAnnotationUnit(Location), Int8PtrTy), 2146 CGM.EmitAnnotationLineNo(Location) 2147 }; 2148 return Builder.CreateCall(AnnotationFn, Args); 2149 } 2150 2151 void CodeGenFunction::EmitVarAnnotations(const VarDecl *D, llvm::Value *V) { 2152 assert(D->hasAttr<AnnotateAttr>() && "no annotate attribute"); 2153 // FIXME We create a new bitcast for every annotation because that's what 2154 // llvm-gcc was doing. 2155 for (const auto *I : D->specific_attrs<AnnotateAttr>()) 2156 EmitAnnotationCall(CGM.getIntrinsic(llvm::Intrinsic::var_annotation), 2157 Builder.CreateBitCast(V, CGM.Int8PtrTy, V->getName()), 2158 I->getAnnotation(), D->getLocation()); 2159 } 2160 2161 Address CodeGenFunction::EmitFieldAnnotations(const FieldDecl *D, 2162 Address Addr) { 2163 assert(D->hasAttr<AnnotateAttr>() && "no annotate attribute"); 2164 llvm::Value *V = Addr.getPointer(); 2165 llvm::Type *VTy = V->getType(); 2166 llvm::Value *F = CGM.getIntrinsic(llvm::Intrinsic::ptr_annotation, 2167 CGM.Int8PtrTy); 2168 2169 for (const auto *I : D->specific_attrs<AnnotateAttr>()) { 2170 // FIXME Always emit the cast inst so we can differentiate between 2171 // annotation on the first field of a struct and annotation on the struct 2172 // itself. 2173 if (VTy != CGM.Int8PtrTy) 2174 V = Builder.Insert(new llvm::BitCastInst(V, CGM.Int8PtrTy)); 2175 V = EmitAnnotationCall(F, V, I->getAnnotation(), D->getLocation()); 2176 V = Builder.CreateBitCast(V, VTy); 2177 } 2178 2179 return Address(V, Addr.getAlignment()); 2180 } 2181 2182 CodeGenFunction::CGCapturedStmtInfo::~CGCapturedStmtInfo() { } 2183 2184 CodeGenFunction::SanitizerScope::SanitizerScope(CodeGenFunction *CGF) 2185 : CGF(CGF) { 2186 assert(!CGF->IsSanitizerScope); 2187 CGF->IsSanitizerScope = true; 2188 } 2189 2190 CodeGenFunction::SanitizerScope::~SanitizerScope() { 2191 CGF->IsSanitizerScope = false; 2192 } 2193 2194 void CodeGenFunction::InsertHelper(llvm::Instruction *I, 2195 const llvm::Twine &Name, 2196 llvm::BasicBlock *BB, 2197 llvm::BasicBlock::iterator InsertPt) const { 2198 LoopStack.InsertHelper(I); 2199 if (IsSanitizerScope) 2200 CGM.getSanitizerMetadata()->disableSanitizerForInstruction(I); 2201 } 2202 2203 void CGBuilderInserter::InsertHelper( 2204 llvm::Instruction *I, const llvm::Twine &Name, llvm::BasicBlock *BB, 2205 llvm::BasicBlock::iterator InsertPt) const { 2206 llvm::IRBuilderDefaultInserter::InsertHelper(I, Name, BB, InsertPt); 2207 if (CGF) 2208 CGF->InsertHelper(I, Name, BB, InsertPt); 2209 } 2210 2211 static bool hasRequiredFeatures(const SmallVectorImpl<StringRef> &ReqFeatures, 2212 CodeGenModule &CGM, const FunctionDecl *FD, 2213 std::string &FirstMissing) { 2214 // If there aren't any required features listed then go ahead and return. 2215 if (ReqFeatures.empty()) 2216 return false; 2217 2218 // Now build up the set of caller features and verify that all the required 2219 // features are there. 2220 llvm::StringMap<bool> CallerFeatureMap; 2221 CGM.getFunctionFeatureMap(CallerFeatureMap, FD); 2222 2223 // If we have at least one of the features in the feature list return 2224 // true, otherwise return false. 2225 return std::all_of( 2226 ReqFeatures.begin(), ReqFeatures.end(), [&](StringRef Feature) { 2227 SmallVector<StringRef, 1> OrFeatures; 2228 Feature.split(OrFeatures, "|"); 2229 return std::any_of(OrFeatures.begin(), OrFeatures.end(), 2230 [&](StringRef Feature) { 2231 if (!CallerFeatureMap.lookup(Feature)) { 2232 FirstMissing = Feature.str(); 2233 return false; 2234 } 2235 return true; 2236 }); 2237 }); 2238 } 2239 2240 // Emits an error if we don't have a valid set of target features for the 2241 // called function. 2242 void CodeGenFunction::checkTargetFeatures(const CallExpr *E, 2243 const FunctionDecl *TargetDecl) { 2244 // Early exit if this is an indirect call. 2245 if (!TargetDecl) 2246 return; 2247 2248 // Get the current enclosing function if it exists. If it doesn't 2249 // we can't check the target features anyhow. 2250 const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(CurFuncDecl); 2251 if (!FD) 2252 return; 2253 2254 // Grab the required features for the call. For a builtin this is listed in 2255 // the td file with the default cpu, for an always_inline function this is any 2256 // listed cpu and any listed features. 2257 unsigned BuiltinID = TargetDecl->getBuiltinID(); 2258 std::string MissingFeature; 2259 if (BuiltinID) { 2260 SmallVector<StringRef, 1> ReqFeatures; 2261 const char *FeatureList = 2262 CGM.getContext().BuiltinInfo.getRequiredFeatures(BuiltinID); 2263 // Return if the builtin doesn't have any required features. 2264 if (!FeatureList || StringRef(FeatureList) == "") 2265 return; 2266 StringRef(FeatureList).split(ReqFeatures, ","); 2267 if (!hasRequiredFeatures(ReqFeatures, CGM, FD, MissingFeature)) 2268 CGM.getDiags().Report(E->getLocStart(), diag::err_builtin_needs_feature) 2269 << TargetDecl->getDeclName() 2270 << CGM.getContext().BuiltinInfo.getRequiredFeatures(BuiltinID); 2271 2272 } else if (TargetDecl->hasAttr<TargetAttr>()) { 2273 // Get the required features for the callee. 2274 SmallVector<StringRef, 1> ReqFeatures; 2275 llvm::StringMap<bool> CalleeFeatureMap; 2276 CGM.getFunctionFeatureMap(CalleeFeatureMap, TargetDecl); 2277 for (const auto &F : CalleeFeatureMap) { 2278 // Only positive features are "required". 2279 if (F.getValue()) 2280 ReqFeatures.push_back(F.getKey()); 2281 } 2282 if (!hasRequiredFeatures(ReqFeatures, CGM, FD, MissingFeature)) 2283 CGM.getDiags().Report(E->getLocStart(), diag::err_function_needs_feature) 2284 << FD->getDeclName() << TargetDecl->getDeclName() << MissingFeature; 2285 } 2286 } 2287 2288 void CodeGenFunction::EmitSanitizerStatReport(llvm::SanitizerStatKind SSK) { 2289 if (!CGM.getCodeGenOpts().SanitizeStats) 2290 return; 2291 2292 llvm::IRBuilder<> IRB(Builder.GetInsertBlock(), Builder.GetInsertPoint()); 2293 IRB.SetCurrentDebugLocation(Builder.getCurrentDebugLocation()); 2294 CGM.getSanStats().create(IRB, SSK); 2295 } 2296 2297 llvm::DebugLoc CodeGenFunction::SourceLocToDebugLoc(SourceLocation Location) { 2298 if (CGDebugInfo *DI = getDebugInfo()) 2299 return DI->SourceLocToDebugLoc(Location); 2300 2301 return llvm::DebugLoc(); 2302 } 2303