1 //===--- CodeGenFunction.cpp - Emit LLVM Code from ASTs for a Function ----===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // This coordinates the per-function state used while generating code. 11 // 12 //===----------------------------------------------------------------------===// 13 14 #include "CodeGenFunction.h" 15 #include "CGBlocks.h" 16 #include "CGCleanup.h" 17 #include "CGCUDARuntime.h" 18 #include "CGCXXABI.h" 19 #include "CGDebugInfo.h" 20 #include "CGOpenMPRuntime.h" 21 #include "CodeGenModule.h" 22 #include "CodeGenPGO.h" 23 #include "TargetInfo.h" 24 #include "clang/AST/ASTContext.h" 25 #include "clang/AST/ASTLambda.h" 26 #include "clang/AST/Decl.h" 27 #include "clang/AST/DeclCXX.h" 28 #include "clang/AST/StmtCXX.h" 29 #include "clang/AST/StmtObjC.h" 30 #include "clang/Basic/Builtins.h" 31 #include "clang/Basic/TargetInfo.h" 32 #include "clang/CodeGen/CGFunctionInfo.h" 33 #include "clang/Frontend/CodeGenOptions.h" 34 #include "clang/Sema/SemaDiagnostic.h" 35 #include "llvm/IR/DataLayout.h" 36 #include "llvm/IR/Dominators.h" 37 #include "llvm/IR/Intrinsics.h" 38 #include "llvm/IR/MDBuilder.h" 39 #include "llvm/IR/Operator.h" 40 #include "llvm/Transforms/Utils/PromoteMemToReg.h" 41 using namespace clang; 42 using namespace CodeGen; 43 44 /// shouldEmitLifetimeMarkers - Decide whether we need emit the life-time 45 /// markers. 46 static bool shouldEmitLifetimeMarkers(const CodeGenOptions &CGOpts, 47 const LangOptions &LangOpts) { 48 if (CGOpts.DisableLifetimeMarkers) 49 return false; 50 51 // Disable lifetime markers in msan builds. 52 // FIXME: Remove this when msan works with lifetime markers. 53 if (LangOpts.Sanitize.has(SanitizerKind::Memory)) 54 return false; 55 56 // Asan uses markers for use-after-scope checks. 57 if (CGOpts.SanitizeAddressUseAfterScope) 58 return true; 59 60 // For now, only in optimized builds. 61 return CGOpts.OptimizationLevel != 0; 62 } 63 64 CodeGenFunction::CodeGenFunction(CodeGenModule &cgm, bool suppressNewContext) 65 : CodeGenTypeCache(cgm), CGM(cgm), Target(cgm.getTarget()), 66 Builder(cgm, cgm.getModule().getContext(), llvm::ConstantFolder(), 67 CGBuilderInserterTy(this)), 68 CurFn(nullptr), ReturnValue(Address::invalid()), 69 CapturedStmtInfo(nullptr), SanOpts(CGM.getLangOpts().Sanitize), 70 IsSanitizerScope(false), CurFuncIsThunk(false), AutoreleaseResult(false), 71 SawAsmBlock(false), IsOutlinedSEHHelper(false), BlockInfo(nullptr), 72 BlockPointer(nullptr), LambdaThisCaptureField(nullptr), 73 NormalCleanupDest(Address::invalid()), NextCleanupDestIndex(1), 74 FirstBlockInfo(nullptr), EHResumeBlock(nullptr), ExceptionSlot(nullptr), 75 EHSelectorSlot(nullptr), DebugInfo(CGM.getModuleDebugInfo()), 76 DisableDebugInfo(false), DidCallStackSave(false), IndirectBranch(nullptr), 77 PGO(cgm), SwitchInsn(nullptr), SwitchWeights(nullptr), 78 CaseRangeBlock(nullptr), UnreachableBlock(nullptr), NumReturnExprs(0), 79 NumSimpleReturnExprs(0), CXXABIThisDecl(nullptr), 80 CXXABIThisValue(nullptr), CXXThisValue(nullptr), 81 CXXStructorImplicitParamDecl(nullptr), 82 CXXStructorImplicitParamValue(nullptr), OutermostConditional(nullptr), 83 CurLexicalScope(nullptr), TerminateLandingPad(nullptr), 84 TerminateHandler(nullptr), TrapBB(nullptr), 85 ShouldEmitLifetimeMarkers( 86 shouldEmitLifetimeMarkers(CGM.getCodeGenOpts(), CGM.getLangOpts())) { 87 if (!suppressNewContext) 88 CGM.getCXXABI().getMangleContext().startNewFunction(); 89 90 llvm::FastMathFlags FMF; 91 if (CGM.getLangOpts().FastMath) 92 FMF.setFast(); 93 if (CGM.getLangOpts().FiniteMathOnly) { 94 FMF.setNoNaNs(); 95 FMF.setNoInfs(); 96 } 97 if (CGM.getCodeGenOpts().NoNaNsFPMath) { 98 FMF.setNoNaNs(); 99 } 100 if (CGM.getCodeGenOpts().NoSignedZeros) { 101 FMF.setNoSignedZeros(); 102 } 103 if (CGM.getCodeGenOpts().ReciprocalMath) { 104 FMF.setAllowReciprocal(); 105 } 106 if (CGM.getCodeGenOpts().Reassociate) { 107 FMF.setAllowReassoc(); 108 } 109 Builder.setFastMathFlags(FMF); 110 } 111 112 CodeGenFunction::~CodeGenFunction() { 113 assert(LifetimeExtendedCleanupStack.empty() && "failed to emit a cleanup"); 114 115 // If there are any unclaimed block infos, go ahead and destroy them 116 // now. This can happen if IR-gen gets clever and skips evaluating 117 // something. 118 if (FirstBlockInfo) 119 destroyBlockInfos(FirstBlockInfo); 120 121 if (getLangOpts().OpenMP && CurFn) 122 CGM.getOpenMPRuntime().functionFinished(*this); 123 } 124 125 CharUnits CodeGenFunction::getNaturalPointeeTypeAlignment(QualType T, 126 LValueBaseInfo *BaseInfo, 127 TBAAAccessInfo *TBAAInfo) { 128 return getNaturalTypeAlignment(T->getPointeeType(), BaseInfo, TBAAInfo, 129 /* forPointeeType= */ true); 130 } 131 132 CharUnits CodeGenFunction::getNaturalTypeAlignment(QualType T, 133 LValueBaseInfo *BaseInfo, 134 TBAAAccessInfo *TBAAInfo, 135 bool forPointeeType) { 136 if (TBAAInfo) 137 *TBAAInfo = CGM.getTBAAAccessInfo(T); 138 139 // Honor alignment typedef attributes even on incomplete types. 140 // We also honor them straight for C++ class types, even as pointees; 141 // there's an expressivity gap here. 142 if (auto TT = T->getAs<TypedefType>()) { 143 if (auto Align = TT->getDecl()->getMaxAlignment()) { 144 if (BaseInfo) 145 *BaseInfo = LValueBaseInfo(AlignmentSource::AttributedType); 146 return getContext().toCharUnitsFromBits(Align); 147 } 148 } 149 150 if (BaseInfo) 151 *BaseInfo = LValueBaseInfo(AlignmentSource::Type); 152 153 CharUnits Alignment; 154 if (T->isIncompleteType()) { 155 Alignment = CharUnits::One(); // Shouldn't be used, but pessimistic is best. 156 } else { 157 // For C++ class pointees, we don't know whether we're pointing at a 158 // base or a complete object, so we generally need to use the 159 // non-virtual alignment. 160 const CXXRecordDecl *RD; 161 if (forPointeeType && (RD = T->getAsCXXRecordDecl())) { 162 Alignment = CGM.getClassPointerAlignment(RD); 163 } else { 164 Alignment = getContext().getTypeAlignInChars(T); 165 if (T.getQualifiers().hasUnaligned()) 166 Alignment = CharUnits::One(); 167 } 168 169 // Cap to the global maximum type alignment unless the alignment 170 // was somehow explicit on the type. 171 if (unsigned MaxAlign = getLangOpts().MaxTypeAlign) { 172 if (Alignment.getQuantity() > MaxAlign && 173 !getContext().isAlignmentRequired(T)) 174 Alignment = CharUnits::fromQuantity(MaxAlign); 175 } 176 } 177 return Alignment; 178 } 179 180 LValue CodeGenFunction::MakeNaturalAlignAddrLValue(llvm::Value *V, QualType T) { 181 LValueBaseInfo BaseInfo; 182 TBAAAccessInfo TBAAInfo; 183 CharUnits Alignment = getNaturalTypeAlignment(T, &BaseInfo, &TBAAInfo); 184 return LValue::MakeAddr(Address(V, Alignment), T, getContext(), BaseInfo, 185 TBAAInfo); 186 } 187 188 /// Given a value of type T* that may not be to a complete object, 189 /// construct an l-value with the natural pointee alignment of T. 190 LValue 191 CodeGenFunction::MakeNaturalAlignPointeeAddrLValue(llvm::Value *V, QualType T) { 192 LValueBaseInfo BaseInfo; 193 TBAAAccessInfo TBAAInfo; 194 CharUnits Align = getNaturalTypeAlignment(T, &BaseInfo, &TBAAInfo, 195 /* forPointeeType= */ true); 196 return MakeAddrLValue(Address(V, Align), T, BaseInfo, TBAAInfo); 197 } 198 199 200 llvm::Type *CodeGenFunction::ConvertTypeForMem(QualType T) { 201 return CGM.getTypes().ConvertTypeForMem(T); 202 } 203 204 llvm::Type *CodeGenFunction::ConvertType(QualType T) { 205 return CGM.getTypes().ConvertType(T); 206 } 207 208 TypeEvaluationKind CodeGenFunction::getEvaluationKind(QualType type) { 209 type = type.getCanonicalType(); 210 while (true) { 211 switch (type->getTypeClass()) { 212 #define TYPE(name, parent) 213 #define ABSTRACT_TYPE(name, parent) 214 #define NON_CANONICAL_TYPE(name, parent) case Type::name: 215 #define DEPENDENT_TYPE(name, parent) case Type::name: 216 #define NON_CANONICAL_UNLESS_DEPENDENT_TYPE(name, parent) case Type::name: 217 #include "clang/AST/TypeNodes.def" 218 llvm_unreachable("non-canonical or dependent type in IR-generation"); 219 220 case Type::Auto: 221 case Type::DeducedTemplateSpecialization: 222 llvm_unreachable("undeduced type in IR-generation"); 223 224 // Various scalar types. 225 case Type::Builtin: 226 case Type::Pointer: 227 case Type::BlockPointer: 228 case Type::LValueReference: 229 case Type::RValueReference: 230 case Type::MemberPointer: 231 case Type::Vector: 232 case Type::ExtVector: 233 case Type::FunctionProto: 234 case Type::FunctionNoProto: 235 case Type::Enum: 236 case Type::ObjCObjectPointer: 237 case Type::Pipe: 238 return TEK_Scalar; 239 240 // Complexes. 241 case Type::Complex: 242 return TEK_Complex; 243 244 // Arrays, records, and Objective-C objects. 245 case Type::ConstantArray: 246 case Type::IncompleteArray: 247 case Type::VariableArray: 248 case Type::Record: 249 case Type::ObjCObject: 250 case Type::ObjCInterface: 251 return TEK_Aggregate; 252 253 // We operate on atomic values according to their underlying type. 254 case Type::Atomic: 255 type = cast<AtomicType>(type)->getValueType(); 256 continue; 257 } 258 llvm_unreachable("unknown type kind!"); 259 } 260 } 261 262 llvm::DebugLoc CodeGenFunction::EmitReturnBlock() { 263 // For cleanliness, we try to avoid emitting the return block for 264 // simple cases. 265 llvm::BasicBlock *CurBB = Builder.GetInsertBlock(); 266 267 if (CurBB) { 268 assert(!CurBB->getTerminator() && "Unexpected terminated block."); 269 270 // We have a valid insert point, reuse it if it is empty or there are no 271 // explicit jumps to the return block. 272 if (CurBB->empty() || ReturnBlock.getBlock()->use_empty()) { 273 ReturnBlock.getBlock()->replaceAllUsesWith(CurBB); 274 delete ReturnBlock.getBlock(); 275 } else 276 EmitBlock(ReturnBlock.getBlock()); 277 return llvm::DebugLoc(); 278 } 279 280 // Otherwise, if the return block is the target of a single direct 281 // branch then we can just put the code in that block instead. This 282 // cleans up functions which started with a unified return block. 283 if (ReturnBlock.getBlock()->hasOneUse()) { 284 llvm::BranchInst *BI = 285 dyn_cast<llvm::BranchInst>(*ReturnBlock.getBlock()->user_begin()); 286 if (BI && BI->isUnconditional() && 287 BI->getSuccessor(0) == ReturnBlock.getBlock()) { 288 // Record/return the DebugLoc of the simple 'return' expression to be used 289 // later by the actual 'ret' instruction. 290 llvm::DebugLoc Loc = BI->getDebugLoc(); 291 Builder.SetInsertPoint(BI->getParent()); 292 BI->eraseFromParent(); 293 delete ReturnBlock.getBlock(); 294 return Loc; 295 } 296 } 297 298 // FIXME: We are at an unreachable point, there is no reason to emit the block 299 // unless it has uses. However, we still need a place to put the debug 300 // region.end for now. 301 302 EmitBlock(ReturnBlock.getBlock()); 303 return llvm::DebugLoc(); 304 } 305 306 static void EmitIfUsed(CodeGenFunction &CGF, llvm::BasicBlock *BB) { 307 if (!BB) return; 308 if (!BB->use_empty()) 309 return CGF.CurFn->getBasicBlockList().push_back(BB); 310 delete BB; 311 } 312 313 void CodeGenFunction::FinishFunction(SourceLocation EndLoc) { 314 assert(BreakContinueStack.empty() && 315 "mismatched push/pop in break/continue stack!"); 316 317 bool OnlySimpleReturnStmts = NumSimpleReturnExprs > 0 318 && NumSimpleReturnExprs == NumReturnExprs 319 && ReturnBlock.getBlock()->use_empty(); 320 // Usually the return expression is evaluated before the cleanup 321 // code. If the function contains only a simple return statement, 322 // such as a constant, the location before the cleanup code becomes 323 // the last useful breakpoint in the function, because the simple 324 // return expression will be evaluated after the cleanup code. To be 325 // safe, set the debug location for cleanup code to the location of 326 // the return statement. Otherwise the cleanup code should be at the 327 // end of the function's lexical scope. 328 // 329 // If there are multiple branches to the return block, the branch 330 // instructions will get the location of the return statements and 331 // all will be fine. 332 if (CGDebugInfo *DI = getDebugInfo()) { 333 if (OnlySimpleReturnStmts) 334 DI->EmitLocation(Builder, LastStopPoint); 335 else 336 DI->EmitLocation(Builder, EndLoc); 337 } 338 339 // Pop any cleanups that might have been associated with the 340 // parameters. Do this in whatever block we're currently in; it's 341 // important to do this before we enter the return block or return 342 // edges will be *really* confused. 343 bool HasCleanups = EHStack.stable_begin() != PrologueCleanupDepth; 344 bool HasOnlyLifetimeMarkers = 345 HasCleanups && EHStack.containsOnlyLifetimeMarkers(PrologueCleanupDepth); 346 bool EmitRetDbgLoc = !HasCleanups || HasOnlyLifetimeMarkers; 347 if (HasCleanups) { 348 // Make sure the line table doesn't jump back into the body for 349 // the ret after it's been at EndLoc. 350 if (CGDebugInfo *DI = getDebugInfo()) 351 if (OnlySimpleReturnStmts) 352 DI->EmitLocation(Builder, EndLoc); 353 354 PopCleanupBlocks(PrologueCleanupDepth); 355 } 356 357 // Emit function epilog (to return). 358 llvm::DebugLoc Loc = EmitReturnBlock(); 359 360 if (ShouldInstrumentFunction()) { 361 if (CGM.getCodeGenOpts().InstrumentFunctions) 362 CurFn->addFnAttr("instrument-function-exit", "__cyg_profile_func_exit"); 363 if (CGM.getCodeGenOpts().InstrumentFunctionsAfterInlining) 364 CurFn->addFnAttr("instrument-function-exit-inlined", 365 "__cyg_profile_func_exit"); 366 } 367 368 // Emit debug descriptor for function end. 369 if (CGDebugInfo *DI = getDebugInfo()) 370 DI->EmitFunctionEnd(Builder, CurFn); 371 372 // Reset the debug location to that of the simple 'return' expression, if any 373 // rather than that of the end of the function's scope '}'. 374 ApplyDebugLocation AL(*this, Loc); 375 EmitFunctionEpilog(*CurFnInfo, EmitRetDbgLoc, EndLoc); 376 EmitEndEHSpec(CurCodeDecl); 377 378 assert(EHStack.empty() && 379 "did not remove all scopes from cleanup stack!"); 380 381 // If someone did an indirect goto, emit the indirect goto block at the end of 382 // the function. 383 if (IndirectBranch) { 384 EmitBlock(IndirectBranch->getParent()); 385 Builder.ClearInsertionPoint(); 386 } 387 388 // If some of our locals escaped, insert a call to llvm.localescape in the 389 // entry block. 390 if (!EscapedLocals.empty()) { 391 // Invert the map from local to index into a simple vector. There should be 392 // no holes. 393 SmallVector<llvm::Value *, 4> EscapeArgs; 394 EscapeArgs.resize(EscapedLocals.size()); 395 for (auto &Pair : EscapedLocals) 396 EscapeArgs[Pair.second] = Pair.first; 397 llvm::Function *FrameEscapeFn = llvm::Intrinsic::getDeclaration( 398 &CGM.getModule(), llvm::Intrinsic::localescape); 399 CGBuilderTy(*this, AllocaInsertPt).CreateCall(FrameEscapeFn, EscapeArgs); 400 } 401 402 // Remove the AllocaInsertPt instruction, which is just a convenience for us. 403 llvm::Instruction *Ptr = AllocaInsertPt; 404 AllocaInsertPt = nullptr; 405 Ptr->eraseFromParent(); 406 407 // If someone took the address of a label but never did an indirect goto, we 408 // made a zero entry PHI node, which is illegal, zap it now. 409 if (IndirectBranch) { 410 llvm::PHINode *PN = cast<llvm::PHINode>(IndirectBranch->getAddress()); 411 if (PN->getNumIncomingValues() == 0) { 412 PN->replaceAllUsesWith(llvm::UndefValue::get(PN->getType())); 413 PN->eraseFromParent(); 414 } 415 } 416 417 EmitIfUsed(*this, EHResumeBlock); 418 EmitIfUsed(*this, TerminateLandingPad); 419 EmitIfUsed(*this, TerminateHandler); 420 EmitIfUsed(*this, UnreachableBlock); 421 422 for (const auto &FuncletAndParent : TerminateFunclets) 423 EmitIfUsed(*this, FuncletAndParent.second); 424 425 if (CGM.getCodeGenOpts().EmitDeclMetadata) 426 EmitDeclMetadata(); 427 428 for (SmallVectorImpl<std::pair<llvm::Instruction *, llvm::Value *> >::iterator 429 I = DeferredReplacements.begin(), 430 E = DeferredReplacements.end(); 431 I != E; ++I) { 432 I->first->replaceAllUsesWith(I->second); 433 I->first->eraseFromParent(); 434 } 435 436 // Eliminate CleanupDestSlot alloca by replacing it with SSA values and 437 // PHIs if the current function is a coroutine. We don't do it for all 438 // functions as it may result in slight increase in numbers of instructions 439 // if compiled with no optimizations. We do it for coroutine as the lifetime 440 // of CleanupDestSlot alloca make correct coroutine frame building very 441 // difficult. 442 if (NormalCleanupDest.isValid() && isCoroutine()) { 443 llvm::DominatorTree DT(*CurFn); 444 llvm::PromoteMemToReg( 445 cast<llvm::AllocaInst>(NormalCleanupDest.getPointer()), DT); 446 NormalCleanupDest = Address::invalid(); 447 } 448 } 449 450 /// ShouldInstrumentFunction - Return true if the current function should be 451 /// instrumented with __cyg_profile_func_* calls 452 bool CodeGenFunction::ShouldInstrumentFunction() { 453 if (!CGM.getCodeGenOpts().InstrumentFunctions && 454 !CGM.getCodeGenOpts().InstrumentFunctionsAfterInlining && 455 !CGM.getCodeGenOpts().InstrumentFunctionEntryBare) 456 return false; 457 if (!CurFuncDecl || CurFuncDecl->hasAttr<NoInstrumentFunctionAttr>()) 458 return false; 459 return true; 460 } 461 462 /// ShouldXRayInstrument - Return true if the current function should be 463 /// instrumented with XRay nop sleds. 464 bool CodeGenFunction::ShouldXRayInstrumentFunction() const { 465 return CGM.getCodeGenOpts().XRayInstrumentFunctions; 466 } 467 468 /// AlwaysEmitXRayCustomEvents - Return true if we should emit IR for calls to 469 /// the __xray_customevent(...) builin calls, when doing XRay instrumentation. 470 bool CodeGenFunction::AlwaysEmitXRayCustomEvents() const { 471 return CGM.getCodeGenOpts().XRayInstrumentFunctions && 472 (CGM.getCodeGenOpts().XRayAlwaysEmitCustomEvents || 473 CGM.getCodeGenOpts().XRayInstrumentationBundle.Mask == 474 XRayInstrKind::Custom); 475 } 476 477 llvm::Constant * 478 CodeGenFunction::EncodeAddrForUseInPrologue(llvm::Function *F, 479 llvm::Constant *Addr) { 480 // Addresses stored in prologue data can't require run-time fixups and must 481 // be PC-relative. Run-time fixups are undesirable because they necessitate 482 // writable text segments, which are unsafe. And absolute addresses are 483 // undesirable because they break PIE mode. 484 485 // Add a layer of indirection through a private global. Taking its address 486 // won't result in a run-time fixup, even if Addr has linkonce_odr linkage. 487 auto *GV = new llvm::GlobalVariable(CGM.getModule(), Addr->getType(), 488 /*isConstant=*/true, 489 llvm::GlobalValue::PrivateLinkage, Addr); 490 491 // Create a PC-relative address. 492 auto *GOTAsInt = llvm::ConstantExpr::getPtrToInt(GV, IntPtrTy); 493 auto *FuncAsInt = llvm::ConstantExpr::getPtrToInt(F, IntPtrTy); 494 auto *PCRelAsInt = llvm::ConstantExpr::getSub(GOTAsInt, FuncAsInt); 495 return (IntPtrTy == Int32Ty) 496 ? PCRelAsInt 497 : llvm::ConstantExpr::getTrunc(PCRelAsInt, Int32Ty); 498 } 499 500 llvm::Value * 501 CodeGenFunction::DecodeAddrUsedInPrologue(llvm::Value *F, 502 llvm::Value *EncodedAddr) { 503 // Reconstruct the address of the global. 504 auto *PCRelAsInt = Builder.CreateSExt(EncodedAddr, IntPtrTy); 505 auto *FuncAsInt = Builder.CreatePtrToInt(F, IntPtrTy, "func_addr.int"); 506 auto *GOTAsInt = Builder.CreateAdd(PCRelAsInt, FuncAsInt, "global_addr.int"); 507 auto *GOTAddr = Builder.CreateIntToPtr(GOTAsInt, Int8PtrPtrTy, "global_addr"); 508 509 // Load the original pointer through the global. 510 return Builder.CreateLoad(Address(GOTAddr, getPointerAlign()), 511 "decoded_addr"); 512 } 513 514 static void removeImageAccessQualifier(std::string& TyName) { 515 std::string ReadOnlyQual("__read_only"); 516 std::string::size_type ReadOnlyPos = TyName.find(ReadOnlyQual); 517 if (ReadOnlyPos != std::string::npos) 518 // "+ 1" for the space after access qualifier. 519 TyName.erase(ReadOnlyPos, ReadOnlyQual.size() + 1); 520 else { 521 std::string WriteOnlyQual("__write_only"); 522 std::string::size_type WriteOnlyPos = TyName.find(WriteOnlyQual); 523 if (WriteOnlyPos != std::string::npos) 524 TyName.erase(WriteOnlyPos, WriteOnlyQual.size() + 1); 525 else { 526 std::string ReadWriteQual("__read_write"); 527 std::string::size_type ReadWritePos = TyName.find(ReadWriteQual); 528 if (ReadWritePos != std::string::npos) 529 TyName.erase(ReadWritePos, ReadWriteQual.size() + 1); 530 } 531 } 532 } 533 534 // Returns the address space id that should be produced to the 535 // kernel_arg_addr_space metadata. This is always fixed to the ids 536 // as specified in the SPIR 2.0 specification in order to differentiate 537 // for example in clGetKernelArgInfo() implementation between the address 538 // spaces with targets without unique mapping to the OpenCL address spaces 539 // (basically all single AS CPUs). 540 static unsigned ArgInfoAddressSpace(LangAS AS) { 541 switch (AS) { 542 case LangAS::opencl_global: return 1; 543 case LangAS::opencl_constant: return 2; 544 case LangAS::opencl_local: return 3; 545 case LangAS::opencl_generic: return 4; // Not in SPIR 2.0 specs. 546 default: 547 return 0; // Assume private. 548 } 549 } 550 551 // OpenCL v1.2 s5.6.4.6 allows the compiler to store kernel argument 552 // information in the program executable. The argument information stored 553 // includes the argument name, its type, the address and access qualifiers used. 554 static void GenOpenCLArgMetadata(const FunctionDecl *FD, llvm::Function *Fn, 555 CodeGenModule &CGM, llvm::LLVMContext &Context, 556 CGBuilderTy &Builder, ASTContext &ASTCtx) { 557 // Create MDNodes that represent the kernel arg metadata. 558 // Each MDNode is a list in the form of "key", N number of values which is 559 // the same number of values as their are kernel arguments. 560 561 const PrintingPolicy &Policy = ASTCtx.getPrintingPolicy(); 562 563 // MDNode for the kernel argument address space qualifiers. 564 SmallVector<llvm::Metadata *, 8> addressQuals; 565 566 // MDNode for the kernel argument access qualifiers (images only). 567 SmallVector<llvm::Metadata *, 8> accessQuals; 568 569 // MDNode for the kernel argument type names. 570 SmallVector<llvm::Metadata *, 8> argTypeNames; 571 572 // MDNode for the kernel argument base type names. 573 SmallVector<llvm::Metadata *, 8> argBaseTypeNames; 574 575 // MDNode for the kernel argument type qualifiers. 576 SmallVector<llvm::Metadata *, 8> argTypeQuals; 577 578 // MDNode for the kernel argument names. 579 SmallVector<llvm::Metadata *, 8> argNames; 580 581 for (unsigned i = 0, e = FD->getNumParams(); i != e; ++i) { 582 const ParmVarDecl *parm = FD->getParamDecl(i); 583 QualType ty = parm->getType(); 584 std::string typeQuals; 585 586 if (ty->isPointerType()) { 587 QualType pointeeTy = ty->getPointeeType(); 588 589 // Get address qualifier. 590 addressQuals.push_back(llvm::ConstantAsMetadata::get(Builder.getInt32( 591 ArgInfoAddressSpace(pointeeTy.getAddressSpace())))); 592 593 // Get argument type name. 594 std::string typeName = 595 pointeeTy.getUnqualifiedType().getAsString(Policy) + "*"; 596 597 // Turn "unsigned type" to "utype" 598 std::string::size_type pos = typeName.find("unsigned"); 599 if (pointeeTy.isCanonical() && pos != std::string::npos) 600 typeName.erase(pos+1, 8); 601 602 argTypeNames.push_back(llvm::MDString::get(Context, typeName)); 603 604 std::string baseTypeName = 605 pointeeTy.getUnqualifiedType().getCanonicalType().getAsString( 606 Policy) + 607 "*"; 608 609 // Turn "unsigned type" to "utype" 610 pos = baseTypeName.find("unsigned"); 611 if (pos != std::string::npos) 612 baseTypeName.erase(pos+1, 8); 613 614 argBaseTypeNames.push_back(llvm::MDString::get(Context, baseTypeName)); 615 616 // Get argument type qualifiers: 617 if (ty.isRestrictQualified()) 618 typeQuals = "restrict"; 619 if (pointeeTy.isConstQualified() || 620 (pointeeTy.getAddressSpace() == LangAS::opencl_constant)) 621 typeQuals += typeQuals.empty() ? "const" : " const"; 622 if (pointeeTy.isVolatileQualified()) 623 typeQuals += typeQuals.empty() ? "volatile" : " volatile"; 624 } else { 625 uint32_t AddrSpc = 0; 626 bool isPipe = ty->isPipeType(); 627 if (ty->isImageType() || isPipe) 628 AddrSpc = ArgInfoAddressSpace(LangAS::opencl_global); 629 630 addressQuals.push_back( 631 llvm::ConstantAsMetadata::get(Builder.getInt32(AddrSpc))); 632 633 // Get argument type name. 634 std::string typeName; 635 if (isPipe) 636 typeName = ty.getCanonicalType()->getAs<PipeType>()->getElementType() 637 .getAsString(Policy); 638 else 639 typeName = ty.getUnqualifiedType().getAsString(Policy); 640 641 // Turn "unsigned type" to "utype" 642 std::string::size_type pos = typeName.find("unsigned"); 643 if (ty.isCanonical() && pos != std::string::npos) 644 typeName.erase(pos+1, 8); 645 646 std::string baseTypeName; 647 if (isPipe) 648 baseTypeName = ty.getCanonicalType()->getAs<PipeType>() 649 ->getElementType().getCanonicalType() 650 .getAsString(Policy); 651 else 652 baseTypeName = 653 ty.getUnqualifiedType().getCanonicalType().getAsString(Policy); 654 655 // Remove access qualifiers on images 656 // (as they are inseparable from type in clang implementation, 657 // but OpenCL spec provides a special query to get access qualifier 658 // via clGetKernelArgInfo with CL_KERNEL_ARG_ACCESS_QUALIFIER): 659 if (ty->isImageType()) { 660 removeImageAccessQualifier(typeName); 661 removeImageAccessQualifier(baseTypeName); 662 } 663 664 argTypeNames.push_back(llvm::MDString::get(Context, typeName)); 665 666 // Turn "unsigned type" to "utype" 667 pos = baseTypeName.find("unsigned"); 668 if (pos != std::string::npos) 669 baseTypeName.erase(pos+1, 8); 670 671 argBaseTypeNames.push_back(llvm::MDString::get(Context, baseTypeName)); 672 673 if (isPipe) 674 typeQuals = "pipe"; 675 } 676 677 argTypeQuals.push_back(llvm::MDString::get(Context, typeQuals)); 678 679 // Get image and pipe access qualifier: 680 if (ty->isImageType()|| ty->isPipeType()) { 681 const Decl *PDecl = parm; 682 if (auto *TD = dyn_cast<TypedefType>(ty)) 683 PDecl = TD->getDecl(); 684 const OpenCLAccessAttr *A = PDecl->getAttr<OpenCLAccessAttr>(); 685 if (A && A->isWriteOnly()) 686 accessQuals.push_back(llvm::MDString::get(Context, "write_only")); 687 else if (A && A->isReadWrite()) 688 accessQuals.push_back(llvm::MDString::get(Context, "read_write")); 689 else 690 accessQuals.push_back(llvm::MDString::get(Context, "read_only")); 691 } else 692 accessQuals.push_back(llvm::MDString::get(Context, "none")); 693 694 // Get argument name. 695 argNames.push_back(llvm::MDString::get(Context, parm->getName())); 696 } 697 698 Fn->setMetadata("kernel_arg_addr_space", 699 llvm::MDNode::get(Context, addressQuals)); 700 Fn->setMetadata("kernel_arg_access_qual", 701 llvm::MDNode::get(Context, accessQuals)); 702 Fn->setMetadata("kernel_arg_type", 703 llvm::MDNode::get(Context, argTypeNames)); 704 Fn->setMetadata("kernel_arg_base_type", 705 llvm::MDNode::get(Context, argBaseTypeNames)); 706 Fn->setMetadata("kernel_arg_type_qual", 707 llvm::MDNode::get(Context, argTypeQuals)); 708 if (CGM.getCodeGenOpts().EmitOpenCLArgMetadata) 709 Fn->setMetadata("kernel_arg_name", 710 llvm::MDNode::get(Context, argNames)); 711 } 712 713 void CodeGenFunction::EmitOpenCLKernelMetadata(const FunctionDecl *FD, 714 llvm::Function *Fn) 715 { 716 if (!FD->hasAttr<OpenCLKernelAttr>()) 717 return; 718 719 llvm::LLVMContext &Context = getLLVMContext(); 720 721 GenOpenCLArgMetadata(FD, Fn, CGM, Context, Builder, getContext()); 722 723 if (const VecTypeHintAttr *A = FD->getAttr<VecTypeHintAttr>()) { 724 QualType HintQTy = A->getTypeHint(); 725 const ExtVectorType *HintEltQTy = HintQTy->getAs<ExtVectorType>(); 726 bool IsSignedInteger = 727 HintQTy->isSignedIntegerType() || 728 (HintEltQTy && HintEltQTy->getElementType()->isSignedIntegerType()); 729 llvm::Metadata *AttrMDArgs[] = { 730 llvm::ConstantAsMetadata::get(llvm::UndefValue::get( 731 CGM.getTypes().ConvertType(A->getTypeHint()))), 732 llvm::ConstantAsMetadata::get(llvm::ConstantInt::get( 733 llvm::IntegerType::get(Context, 32), 734 llvm::APInt(32, (uint64_t)(IsSignedInteger ? 1 : 0))))}; 735 Fn->setMetadata("vec_type_hint", llvm::MDNode::get(Context, AttrMDArgs)); 736 } 737 738 if (const WorkGroupSizeHintAttr *A = FD->getAttr<WorkGroupSizeHintAttr>()) { 739 llvm::Metadata *AttrMDArgs[] = { 740 llvm::ConstantAsMetadata::get(Builder.getInt32(A->getXDim())), 741 llvm::ConstantAsMetadata::get(Builder.getInt32(A->getYDim())), 742 llvm::ConstantAsMetadata::get(Builder.getInt32(A->getZDim()))}; 743 Fn->setMetadata("work_group_size_hint", llvm::MDNode::get(Context, AttrMDArgs)); 744 } 745 746 if (const ReqdWorkGroupSizeAttr *A = FD->getAttr<ReqdWorkGroupSizeAttr>()) { 747 llvm::Metadata *AttrMDArgs[] = { 748 llvm::ConstantAsMetadata::get(Builder.getInt32(A->getXDim())), 749 llvm::ConstantAsMetadata::get(Builder.getInt32(A->getYDim())), 750 llvm::ConstantAsMetadata::get(Builder.getInt32(A->getZDim()))}; 751 Fn->setMetadata("reqd_work_group_size", llvm::MDNode::get(Context, AttrMDArgs)); 752 } 753 754 if (const OpenCLIntelReqdSubGroupSizeAttr *A = 755 FD->getAttr<OpenCLIntelReqdSubGroupSizeAttr>()) { 756 llvm::Metadata *AttrMDArgs[] = { 757 llvm::ConstantAsMetadata::get(Builder.getInt32(A->getSubGroupSize()))}; 758 Fn->setMetadata("intel_reqd_sub_group_size", 759 llvm::MDNode::get(Context, AttrMDArgs)); 760 } 761 } 762 763 /// Determine whether the function F ends with a return stmt. 764 static bool endsWithReturn(const Decl* F) { 765 const Stmt *Body = nullptr; 766 if (auto *FD = dyn_cast_or_null<FunctionDecl>(F)) 767 Body = FD->getBody(); 768 else if (auto *OMD = dyn_cast_or_null<ObjCMethodDecl>(F)) 769 Body = OMD->getBody(); 770 771 if (auto *CS = dyn_cast_or_null<CompoundStmt>(Body)) { 772 auto LastStmt = CS->body_rbegin(); 773 if (LastStmt != CS->body_rend()) 774 return isa<ReturnStmt>(*LastStmt); 775 } 776 return false; 777 } 778 779 static void markAsIgnoreThreadCheckingAtRuntime(llvm::Function *Fn) { 780 Fn->addFnAttr("sanitize_thread_no_checking_at_run_time"); 781 Fn->removeFnAttr(llvm::Attribute::SanitizeThread); 782 } 783 784 static bool matchesStlAllocatorFn(const Decl *D, const ASTContext &Ctx) { 785 auto *MD = dyn_cast_or_null<CXXMethodDecl>(D); 786 if (!MD || !MD->getDeclName().getAsIdentifierInfo() || 787 !MD->getDeclName().getAsIdentifierInfo()->isStr("allocate") || 788 (MD->getNumParams() != 1 && MD->getNumParams() != 2)) 789 return false; 790 791 if (MD->parameters()[0]->getType().getCanonicalType() != Ctx.getSizeType()) 792 return false; 793 794 if (MD->getNumParams() == 2) { 795 auto *PT = MD->parameters()[1]->getType()->getAs<PointerType>(); 796 if (!PT || !PT->isVoidPointerType() || 797 !PT->getPointeeType().isConstQualified()) 798 return false; 799 } 800 801 return true; 802 } 803 804 /// Return the UBSan prologue signature for \p FD if one is available. 805 static llvm::Constant *getPrologueSignature(CodeGenModule &CGM, 806 const FunctionDecl *FD) { 807 if (const auto *MD = dyn_cast<CXXMethodDecl>(FD)) 808 if (!MD->isStatic()) 809 return nullptr; 810 return CGM.getTargetCodeGenInfo().getUBSanFunctionSignature(CGM); 811 } 812 813 void CodeGenFunction::StartFunction(GlobalDecl GD, 814 QualType RetTy, 815 llvm::Function *Fn, 816 const CGFunctionInfo &FnInfo, 817 const FunctionArgList &Args, 818 SourceLocation Loc, 819 SourceLocation StartLoc) { 820 assert(!CurFn && 821 "Do not use a CodeGenFunction object for more than one function"); 822 823 const Decl *D = GD.getDecl(); 824 825 DidCallStackSave = false; 826 CurCodeDecl = D; 827 if (const auto *FD = dyn_cast_or_null<FunctionDecl>(D)) 828 if (FD->usesSEHTry()) 829 CurSEHParent = FD; 830 CurFuncDecl = (D ? D->getNonClosureContext() : nullptr); 831 FnRetTy = RetTy; 832 CurFn = Fn; 833 CurFnInfo = &FnInfo; 834 assert(CurFn->isDeclaration() && "Function already has body?"); 835 836 // If this function has been blacklisted for any of the enabled sanitizers, 837 // disable the sanitizer for the function. 838 do { 839 #define SANITIZER(NAME, ID) \ 840 if (SanOpts.empty()) \ 841 break; \ 842 if (SanOpts.has(SanitizerKind::ID)) \ 843 if (CGM.isInSanitizerBlacklist(SanitizerKind::ID, Fn, Loc)) \ 844 SanOpts.set(SanitizerKind::ID, false); 845 846 #include "clang/Basic/Sanitizers.def" 847 #undef SANITIZER 848 } while (0); 849 850 if (D) { 851 // Apply the no_sanitize* attributes to SanOpts. 852 for (auto Attr : D->specific_attrs<NoSanitizeAttr>()) { 853 SanitizerMask mask = Attr->getMask(); 854 SanOpts.Mask &= ~mask; 855 if (mask & SanitizerKind::Address) 856 SanOpts.set(SanitizerKind::KernelAddress, false); 857 if (mask & SanitizerKind::KernelAddress) 858 SanOpts.set(SanitizerKind::Address, false); 859 if (mask & SanitizerKind::HWAddress) 860 SanOpts.set(SanitizerKind::KernelHWAddress, false); 861 if (mask & SanitizerKind::KernelHWAddress) 862 SanOpts.set(SanitizerKind::HWAddress, false); 863 } 864 } 865 866 // Apply sanitizer attributes to the function. 867 if (SanOpts.hasOneOf(SanitizerKind::Address | SanitizerKind::KernelAddress)) 868 Fn->addFnAttr(llvm::Attribute::SanitizeAddress); 869 if (SanOpts.hasOneOf(SanitizerKind::HWAddress | SanitizerKind::KernelHWAddress)) 870 Fn->addFnAttr(llvm::Attribute::SanitizeHWAddress); 871 if (SanOpts.has(SanitizerKind::Thread)) 872 Fn->addFnAttr(llvm::Attribute::SanitizeThread); 873 if (SanOpts.has(SanitizerKind::Memory)) 874 Fn->addFnAttr(llvm::Attribute::SanitizeMemory); 875 if (SanOpts.has(SanitizerKind::SafeStack)) 876 Fn->addFnAttr(llvm::Attribute::SafeStack); 877 if (SanOpts.has(SanitizerKind::ShadowCallStack)) 878 Fn->addFnAttr(llvm::Attribute::ShadowCallStack); 879 880 // Apply fuzzing attribute to the function. 881 if (SanOpts.hasOneOf(SanitizerKind::Fuzzer | SanitizerKind::FuzzerNoLink)) 882 Fn->addFnAttr(llvm::Attribute::OptForFuzzing); 883 884 // Ignore TSan memory acesses from within ObjC/ObjC++ dealloc, initialize, 885 // .cxx_destruct, __destroy_helper_block_ and all of their calees at run time. 886 if (SanOpts.has(SanitizerKind::Thread)) { 887 if (const auto *OMD = dyn_cast_or_null<ObjCMethodDecl>(D)) { 888 IdentifierInfo *II = OMD->getSelector().getIdentifierInfoForSlot(0); 889 if (OMD->getMethodFamily() == OMF_dealloc || 890 OMD->getMethodFamily() == OMF_initialize || 891 (OMD->getSelector().isUnarySelector() && II->isStr(".cxx_destruct"))) { 892 markAsIgnoreThreadCheckingAtRuntime(Fn); 893 } 894 } else if (const auto *FD = dyn_cast_or_null<FunctionDecl>(D)) { 895 IdentifierInfo *II = FD->getIdentifier(); 896 if (II && II->isStr("__destroy_helper_block_")) 897 markAsIgnoreThreadCheckingAtRuntime(Fn); 898 } 899 } 900 901 // Ignore unrelated casts in STL allocate() since the allocator must cast 902 // from void* to T* before object initialization completes. Don't match on the 903 // namespace because not all allocators are in std:: 904 if (D && SanOpts.has(SanitizerKind::CFIUnrelatedCast)) { 905 if (matchesStlAllocatorFn(D, getContext())) 906 SanOpts.Mask &= ~SanitizerKind::CFIUnrelatedCast; 907 } 908 909 // Apply xray attributes to the function (as a string, for now) 910 bool InstrumentXray = ShouldXRayInstrumentFunction() && 911 CGM.getCodeGenOpts().XRayInstrumentationBundle.has( 912 XRayInstrKind::Function); 913 if (D && InstrumentXray) { 914 if (const auto *XRayAttr = D->getAttr<XRayInstrumentAttr>()) { 915 if (XRayAttr->alwaysXRayInstrument()) 916 Fn->addFnAttr("function-instrument", "xray-always"); 917 if (XRayAttr->neverXRayInstrument()) 918 Fn->addFnAttr("function-instrument", "xray-never"); 919 if (const auto *LogArgs = D->getAttr<XRayLogArgsAttr>()) { 920 Fn->addFnAttr("xray-log-args", 921 llvm::utostr(LogArgs->getArgumentCount())); 922 } 923 } else { 924 if (!CGM.imbueXRayAttrs(Fn, Loc)) 925 Fn->addFnAttr( 926 "xray-instruction-threshold", 927 llvm::itostr(CGM.getCodeGenOpts().XRayInstructionThreshold)); 928 } 929 } 930 931 // Add no-jump-tables value. 932 Fn->addFnAttr("no-jump-tables", 933 llvm::toStringRef(CGM.getCodeGenOpts().NoUseJumpTables)); 934 935 // Add profile-sample-accurate value. 936 if (CGM.getCodeGenOpts().ProfileSampleAccurate) 937 Fn->addFnAttr("profile-sample-accurate"); 938 939 if (getLangOpts().OpenCL) { 940 // Add metadata for a kernel function. 941 if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D)) 942 EmitOpenCLKernelMetadata(FD, Fn); 943 } 944 945 // If we are checking function types, emit a function type signature as 946 // prologue data. 947 if (getLangOpts().CPlusPlus && SanOpts.has(SanitizerKind::Function)) { 948 if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D)) { 949 if (llvm::Constant *PrologueSig = getPrologueSignature(CGM, FD)) { 950 // Remove any (C++17) exception specifications, to allow calling e.g. a 951 // noexcept function through a non-noexcept pointer. 952 auto ProtoTy = 953 getContext().getFunctionTypeWithExceptionSpec(FD->getType(), 954 EST_None); 955 llvm::Constant *FTRTTIConst = 956 CGM.GetAddrOfRTTIDescriptor(ProtoTy, /*ForEH=*/true); 957 llvm::Constant *FTRTTIConstEncoded = 958 EncodeAddrForUseInPrologue(Fn, FTRTTIConst); 959 llvm::Constant *PrologueStructElems[] = {PrologueSig, 960 FTRTTIConstEncoded}; 961 llvm::Constant *PrologueStructConst = 962 llvm::ConstantStruct::getAnon(PrologueStructElems, /*Packed=*/true); 963 Fn->setPrologueData(PrologueStructConst); 964 } 965 } 966 } 967 968 // If we're checking nullability, we need to know whether we can check the 969 // return value. Initialize the flag to 'true' and refine it in EmitParmDecl. 970 if (SanOpts.has(SanitizerKind::NullabilityReturn)) { 971 auto Nullability = FnRetTy->getNullability(getContext()); 972 if (Nullability && *Nullability == NullabilityKind::NonNull) { 973 if (!(SanOpts.has(SanitizerKind::ReturnsNonnullAttribute) && 974 CurCodeDecl && CurCodeDecl->getAttr<ReturnsNonNullAttr>())) 975 RetValNullabilityPrecondition = 976 llvm::ConstantInt::getTrue(getLLVMContext()); 977 } 978 } 979 980 // If we're in C++ mode and the function name is "main", it is guaranteed 981 // to be norecurse by the standard (3.6.1.3 "The function main shall not be 982 // used within a program"). 983 if (getLangOpts().CPlusPlus) 984 if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D)) 985 if (FD->isMain()) 986 Fn->addFnAttr(llvm::Attribute::NoRecurse); 987 988 llvm::BasicBlock *EntryBB = createBasicBlock("entry", CurFn); 989 990 // Create a marker to make it easy to insert allocas into the entryblock 991 // later. Don't create this with the builder, because we don't want it 992 // folded. 993 llvm::Value *Undef = llvm::UndefValue::get(Int32Ty); 994 AllocaInsertPt = new llvm::BitCastInst(Undef, Int32Ty, "allocapt", EntryBB); 995 996 ReturnBlock = getJumpDestInCurrentScope("return"); 997 998 Builder.SetInsertPoint(EntryBB); 999 1000 // If we're checking the return value, allocate space for a pointer to a 1001 // precise source location of the checked return statement. 1002 if (requiresReturnValueCheck()) { 1003 ReturnLocation = CreateDefaultAlignTempAlloca(Int8PtrTy, "return.sloc.ptr"); 1004 InitTempAlloca(ReturnLocation, llvm::ConstantPointerNull::get(Int8PtrTy)); 1005 } 1006 1007 // Emit subprogram debug descriptor. 1008 if (CGDebugInfo *DI = getDebugInfo()) { 1009 // Reconstruct the type from the argument list so that implicit parameters, 1010 // such as 'this' and 'vtt', show up in the debug info. Preserve the calling 1011 // convention. 1012 CallingConv CC = CallingConv::CC_C; 1013 if (auto *FD = dyn_cast_or_null<FunctionDecl>(D)) 1014 if (const auto *SrcFnTy = FD->getType()->getAs<FunctionType>()) 1015 CC = SrcFnTy->getCallConv(); 1016 SmallVector<QualType, 16> ArgTypes; 1017 for (const VarDecl *VD : Args) 1018 ArgTypes.push_back(VD->getType()); 1019 QualType FnType = getContext().getFunctionType( 1020 RetTy, ArgTypes, FunctionProtoType::ExtProtoInfo(CC)); 1021 DI->EmitFunctionStart(GD, Loc, StartLoc, FnType, CurFn, CurFuncIsThunk, 1022 Builder); 1023 } 1024 1025 if (ShouldInstrumentFunction()) { 1026 if (CGM.getCodeGenOpts().InstrumentFunctions) 1027 CurFn->addFnAttr("instrument-function-entry", "__cyg_profile_func_enter"); 1028 if (CGM.getCodeGenOpts().InstrumentFunctionsAfterInlining) 1029 CurFn->addFnAttr("instrument-function-entry-inlined", 1030 "__cyg_profile_func_enter"); 1031 if (CGM.getCodeGenOpts().InstrumentFunctionEntryBare) 1032 CurFn->addFnAttr("instrument-function-entry-inlined", 1033 "__cyg_profile_func_enter_bare"); 1034 } 1035 1036 // Since emitting the mcount call here impacts optimizations such as function 1037 // inlining, we just add an attribute to insert a mcount call in backend. 1038 // The attribute "counting-function" is set to mcount function name which is 1039 // architecture dependent. 1040 if (CGM.getCodeGenOpts().InstrumentForProfiling) { 1041 // Calls to fentry/mcount should not be generated if function has 1042 // the no_instrument_function attribute. 1043 if (!CurFuncDecl || !CurFuncDecl->hasAttr<NoInstrumentFunctionAttr>()) { 1044 if (CGM.getCodeGenOpts().CallFEntry) 1045 Fn->addFnAttr("fentry-call", "true"); 1046 else { 1047 Fn->addFnAttr("instrument-function-entry-inlined", 1048 getTarget().getMCountName()); 1049 } 1050 } 1051 } 1052 1053 if (RetTy->isVoidType()) { 1054 // Void type; nothing to return. 1055 ReturnValue = Address::invalid(); 1056 1057 // Count the implicit return. 1058 if (!endsWithReturn(D)) 1059 ++NumReturnExprs; 1060 } else if (CurFnInfo->getReturnInfo().getKind() == ABIArgInfo::Indirect && 1061 !hasScalarEvaluationKind(CurFnInfo->getReturnType())) { 1062 // Indirect aggregate return; emit returned value directly into sret slot. 1063 // This reduces code size, and affects correctness in C++. 1064 auto AI = CurFn->arg_begin(); 1065 if (CurFnInfo->getReturnInfo().isSRetAfterThis()) 1066 ++AI; 1067 ReturnValue = Address(&*AI, CurFnInfo->getReturnInfo().getIndirectAlign()); 1068 } else if (CurFnInfo->getReturnInfo().getKind() == ABIArgInfo::InAlloca && 1069 !hasScalarEvaluationKind(CurFnInfo->getReturnType())) { 1070 // Load the sret pointer from the argument struct and return into that. 1071 unsigned Idx = CurFnInfo->getReturnInfo().getInAllocaFieldIndex(); 1072 llvm::Function::arg_iterator EI = CurFn->arg_end(); 1073 --EI; 1074 llvm::Value *Addr = Builder.CreateStructGEP(nullptr, &*EI, Idx); 1075 Addr = Builder.CreateAlignedLoad(Addr, getPointerAlign(), "agg.result"); 1076 ReturnValue = Address(Addr, getNaturalTypeAlignment(RetTy)); 1077 } else { 1078 ReturnValue = CreateIRTemp(RetTy, "retval"); 1079 1080 // Tell the epilog emitter to autorelease the result. We do this 1081 // now so that various specialized functions can suppress it 1082 // during their IR-generation. 1083 if (getLangOpts().ObjCAutoRefCount && 1084 !CurFnInfo->isReturnsRetained() && 1085 RetTy->isObjCRetainableType()) 1086 AutoreleaseResult = true; 1087 } 1088 1089 EmitStartEHSpec(CurCodeDecl); 1090 1091 PrologueCleanupDepth = EHStack.stable_begin(); 1092 1093 // Emit OpenMP specific initialization of the device functions. 1094 if (getLangOpts().OpenMP && CurCodeDecl) 1095 CGM.getOpenMPRuntime().emitFunctionProlog(*this, CurCodeDecl); 1096 1097 EmitFunctionProlog(*CurFnInfo, CurFn, Args); 1098 1099 if (D && isa<CXXMethodDecl>(D) && cast<CXXMethodDecl>(D)->isInstance()) { 1100 CGM.getCXXABI().EmitInstanceFunctionProlog(*this); 1101 const CXXMethodDecl *MD = cast<CXXMethodDecl>(D); 1102 if (MD->getParent()->isLambda() && 1103 MD->getOverloadedOperator() == OO_Call) { 1104 // We're in a lambda; figure out the captures. 1105 MD->getParent()->getCaptureFields(LambdaCaptureFields, 1106 LambdaThisCaptureField); 1107 if (LambdaThisCaptureField) { 1108 // If the lambda captures the object referred to by '*this' - either by 1109 // value or by reference, make sure CXXThisValue points to the correct 1110 // object. 1111 1112 // Get the lvalue for the field (which is a copy of the enclosing object 1113 // or contains the address of the enclosing object). 1114 LValue ThisFieldLValue = EmitLValueForLambdaField(LambdaThisCaptureField); 1115 if (!LambdaThisCaptureField->getType()->isPointerType()) { 1116 // If the enclosing object was captured by value, just use its address. 1117 CXXThisValue = ThisFieldLValue.getAddress().getPointer(); 1118 } else { 1119 // Load the lvalue pointed to by the field, since '*this' was captured 1120 // by reference. 1121 CXXThisValue = 1122 EmitLoadOfLValue(ThisFieldLValue, SourceLocation()).getScalarVal(); 1123 } 1124 } 1125 for (auto *FD : MD->getParent()->fields()) { 1126 if (FD->hasCapturedVLAType()) { 1127 auto *ExprArg = EmitLoadOfLValue(EmitLValueForLambdaField(FD), 1128 SourceLocation()).getScalarVal(); 1129 auto VAT = FD->getCapturedVLAType(); 1130 VLASizeMap[VAT->getSizeExpr()] = ExprArg; 1131 } 1132 } 1133 } else { 1134 // Not in a lambda; just use 'this' from the method. 1135 // FIXME: Should we generate a new load for each use of 'this'? The 1136 // fast register allocator would be happier... 1137 CXXThisValue = CXXABIThisValue; 1138 } 1139 1140 // Check the 'this' pointer once per function, if it's available. 1141 if (CXXABIThisValue) { 1142 SanitizerSet SkippedChecks; 1143 SkippedChecks.set(SanitizerKind::ObjectSize, true); 1144 QualType ThisTy = MD->getThisType(getContext()); 1145 1146 // If this is the call operator of a lambda with no capture-default, it 1147 // may have a static invoker function, which may call this operator with 1148 // a null 'this' pointer. 1149 if (isLambdaCallOperator(MD) && 1150 MD->getParent()->getLambdaCaptureDefault() == LCD_None) 1151 SkippedChecks.set(SanitizerKind::Null, true); 1152 1153 EmitTypeCheck(isa<CXXConstructorDecl>(MD) ? TCK_ConstructorCall 1154 : TCK_MemberCall, 1155 Loc, CXXABIThisValue, ThisTy, 1156 getContext().getTypeAlignInChars(ThisTy->getPointeeType()), 1157 SkippedChecks); 1158 } 1159 } 1160 1161 // If any of the arguments have a variably modified type, make sure to 1162 // emit the type size. 1163 for (FunctionArgList::const_iterator i = Args.begin(), e = Args.end(); 1164 i != e; ++i) { 1165 const VarDecl *VD = *i; 1166 1167 // Dig out the type as written from ParmVarDecls; it's unclear whether 1168 // the standard (C99 6.9.1p10) requires this, but we're following the 1169 // precedent set by gcc. 1170 QualType Ty; 1171 if (const ParmVarDecl *PVD = dyn_cast<ParmVarDecl>(VD)) 1172 Ty = PVD->getOriginalType(); 1173 else 1174 Ty = VD->getType(); 1175 1176 if (Ty->isVariablyModifiedType()) 1177 EmitVariablyModifiedType(Ty); 1178 } 1179 // Emit a location at the end of the prologue. 1180 if (CGDebugInfo *DI = getDebugInfo()) 1181 DI->EmitLocation(Builder, StartLoc); 1182 } 1183 1184 void CodeGenFunction::EmitFunctionBody(FunctionArgList &Args, 1185 const Stmt *Body) { 1186 incrementProfileCounter(Body); 1187 if (const CompoundStmt *S = dyn_cast<CompoundStmt>(Body)) 1188 EmitCompoundStmtWithoutScope(*S); 1189 else 1190 EmitStmt(Body); 1191 } 1192 1193 /// When instrumenting to collect profile data, the counts for some blocks 1194 /// such as switch cases need to not include the fall-through counts, so 1195 /// emit a branch around the instrumentation code. When not instrumenting, 1196 /// this just calls EmitBlock(). 1197 void CodeGenFunction::EmitBlockWithFallThrough(llvm::BasicBlock *BB, 1198 const Stmt *S) { 1199 llvm::BasicBlock *SkipCountBB = nullptr; 1200 if (HaveInsertPoint() && CGM.getCodeGenOpts().hasProfileClangInstr()) { 1201 // When instrumenting for profiling, the fallthrough to certain 1202 // statements needs to skip over the instrumentation code so that we 1203 // get an accurate count. 1204 SkipCountBB = createBasicBlock("skipcount"); 1205 EmitBranch(SkipCountBB); 1206 } 1207 EmitBlock(BB); 1208 uint64_t CurrentCount = getCurrentProfileCount(); 1209 incrementProfileCounter(S); 1210 setCurrentProfileCount(getCurrentProfileCount() + CurrentCount); 1211 if (SkipCountBB) 1212 EmitBlock(SkipCountBB); 1213 } 1214 1215 /// Tries to mark the given function nounwind based on the 1216 /// non-existence of any throwing calls within it. We believe this is 1217 /// lightweight enough to do at -O0. 1218 static void TryMarkNoThrow(llvm::Function *F) { 1219 // LLVM treats 'nounwind' on a function as part of the type, so we 1220 // can't do this on functions that can be overwritten. 1221 if (F->isInterposable()) return; 1222 1223 for (llvm::BasicBlock &BB : *F) 1224 for (llvm::Instruction &I : BB) 1225 if (I.mayThrow()) 1226 return; 1227 1228 F->setDoesNotThrow(); 1229 } 1230 1231 QualType CodeGenFunction::BuildFunctionArgList(GlobalDecl GD, 1232 FunctionArgList &Args) { 1233 const FunctionDecl *FD = cast<FunctionDecl>(GD.getDecl()); 1234 QualType ResTy = FD->getReturnType(); 1235 1236 const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(FD); 1237 if (MD && MD->isInstance()) { 1238 if (CGM.getCXXABI().HasThisReturn(GD)) 1239 ResTy = MD->getThisType(getContext()); 1240 else if (CGM.getCXXABI().hasMostDerivedReturn(GD)) 1241 ResTy = CGM.getContext().VoidPtrTy; 1242 CGM.getCXXABI().buildThisParam(*this, Args); 1243 } 1244 1245 // The base version of an inheriting constructor whose constructed base is a 1246 // virtual base is not passed any arguments (because it doesn't actually call 1247 // the inherited constructor). 1248 bool PassedParams = true; 1249 if (const CXXConstructorDecl *CD = dyn_cast<CXXConstructorDecl>(FD)) 1250 if (auto Inherited = CD->getInheritedConstructor()) 1251 PassedParams = 1252 getTypes().inheritingCtorHasParams(Inherited, GD.getCtorType()); 1253 1254 if (PassedParams) { 1255 for (auto *Param : FD->parameters()) { 1256 Args.push_back(Param); 1257 if (!Param->hasAttr<PassObjectSizeAttr>()) 1258 continue; 1259 1260 auto *Implicit = ImplicitParamDecl::Create( 1261 getContext(), Param->getDeclContext(), Param->getLocation(), 1262 /*Id=*/nullptr, getContext().getSizeType(), ImplicitParamDecl::Other); 1263 SizeArguments[Param] = Implicit; 1264 Args.push_back(Implicit); 1265 } 1266 } 1267 1268 if (MD && (isa<CXXConstructorDecl>(MD) || isa<CXXDestructorDecl>(MD))) 1269 CGM.getCXXABI().addImplicitStructorParams(*this, ResTy, Args); 1270 1271 return ResTy; 1272 } 1273 1274 static bool 1275 shouldUseUndefinedBehaviorReturnOptimization(const FunctionDecl *FD, 1276 const ASTContext &Context) { 1277 QualType T = FD->getReturnType(); 1278 // Avoid the optimization for functions that return a record type with a 1279 // trivial destructor or another trivially copyable type. 1280 if (const RecordType *RT = T.getCanonicalType()->getAs<RecordType>()) { 1281 if (const auto *ClassDecl = dyn_cast<CXXRecordDecl>(RT->getDecl())) 1282 return !ClassDecl->hasTrivialDestructor(); 1283 } 1284 return !T.isTriviallyCopyableType(Context); 1285 } 1286 1287 void CodeGenFunction::GenerateCode(GlobalDecl GD, llvm::Function *Fn, 1288 const CGFunctionInfo &FnInfo) { 1289 const FunctionDecl *FD = cast<FunctionDecl>(GD.getDecl()); 1290 CurGD = GD; 1291 1292 FunctionArgList Args; 1293 QualType ResTy = BuildFunctionArgList(GD, Args); 1294 1295 // Check if we should generate debug info for this function. 1296 if (FD->hasAttr<NoDebugAttr>()) 1297 DebugInfo = nullptr; // disable debug info indefinitely for this function 1298 1299 // The function might not have a body if we're generating thunks for a 1300 // function declaration. 1301 SourceRange BodyRange; 1302 if (Stmt *Body = FD->getBody()) 1303 BodyRange = Body->getSourceRange(); 1304 else 1305 BodyRange = FD->getLocation(); 1306 CurEHLocation = BodyRange.getEnd(); 1307 1308 // Use the location of the start of the function to determine where 1309 // the function definition is located. By default use the location 1310 // of the declaration as the location for the subprogram. A function 1311 // may lack a declaration in the source code if it is created by code 1312 // gen. (examples: _GLOBAL__I_a, __cxx_global_array_dtor, thunk). 1313 SourceLocation Loc = FD->getLocation(); 1314 1315 // If this is a function specialization then use the pattern body 1316 // as the location for the function. 1317 if (const FunctionDecl *SpecDecl = FD->getTemplateInstantiationPattern()) 1318 if (SpecDecl->hasBody(SpecDecl)) 1319 Loc = SpecDecl->getLocation(); 1320 1321 Stmt *Body = FD->getBody(); 1322 1323 // Initialize helper which will detect jumps which can cause invalid lifetime 1324 // markers. 1325 if (Body && ShouldEmitLifetimeMarkers) 1326 Bypasses.Init(Body); 1327 1328 // Emit the standard function prologue. 1329 StartFunction(GD, ResTy, Fn, FnInfo, Args, Loc, BodyRange.getBegin()); 1330 1331 // Generate the body of the function. 1332 PGO.assignRegionCounters(GD, CurFn); 1333 if (isa<CXXDestructorDecl>(FD)) 1334 EmitDestructorBody(Args); 1335 else if (isa<CXXConstructorDecl>(FD)) 1336 EmitConstructorBody(Args); 1337 else if (getLangOpts().CUDA && 1338 !getLangOpts().CUDAIsDevice && 1339 FD->hasAttr<CUDAGlobalAttr>()) 1340 CGM.getCUDARuntime().emitDeviceStub(*this, Args); 1341 else if (isa<CXXMethodDecl>(FD) && 1342 cast<CXXMethodDecl>(FD)->isLambdaStaticInvoker()) { 1343 // The lambda static invoker function is special, because it forwards or 1344 // clones the body of the function call operator (but is actually static). 1345 EmitLambdaStaticInvokeBody(cast<CXXMethodDecl>(FD)); 1346 } else if (FD->isDefaulted() && isa<CXXMethodDecl>(FD) && 1347 (cast<CXXMethodDecl>(FD)->isCopyAssignmentOperator() || 1348 cast<CXXMethodDecl>(FD)->isMoveAssignmentOperator())) { 1349 // Implicit copy-assignment gets the same special treatment as implicit 1350 // copy-constructors. 1351 emitImplicitAssignmentOperatorBody(Args); 1352 } else if (Body) { 1353 EmitFunctionBody(Args, Body); 1354 } else 1355 llvm_unreachable("no definition for emitted function"); 1356 1357 // C++11 [stmt.return]p2: 1358 // Flowing off the end of a function [...] results in undefined behavior in 1359 // a value-returning function. 1360 // C11 6.9.1p12: 1361 // If the '}' that terminates a function is reached, and the value of the 1362 // function call is used by the caller, the behavior is undefined. 1363 if (getLangOpts().CPlusPlus && !FD->hasImplicitReturnZero() && !SawAsmBlock && 1364 !FD->getReturnType()->isVoidType() && Builder.GetInsertBlock()) { 1365 bool ShouldEmitUnreachable = 1366 CGM.getCodeGenOpts().StrictReturn || 1367 shouldUseUndefinedBehaviorReturnOptimization(FD, getContext()); 1368 if (SanOpts.has(SanitizerKind::Return)) { 1369 SanitizerScope SanScope(this); 1370 llvm::Value *IsFalse = Builder.getFalse(); 1371 EmitCheck(std::make_pair(IsFalse, SanitizerKind::Return), 1372 SanitizerHandler::MissingReturn, 1373 EmitCheckSourceLocation(FD->getLocation()), None); 1374 } else if (ShouldEmitUnreachable) { 1375 if (CGM.getCodeGenOpts().OptimizationLevel == 0) 1376 EmitTrapCall(llvm::Intrinsic::trap); 1377 } 1378 if (SanOpts.has(SanitizerKind::Return) || ShouldEmitUnreachable) { 1379 Builder.CreateUnreachable(); 1380 Builder.ClearInsertionPoint(); 1381 } 1382 } 1383 1384 // Emit the standard function epilogue. 1385 FinishFunction(BodyRange.getEnd()); 1386 1387 // If we haven't marked the function nothrow through other means, do 1388 // a quick pass now to see if we can. 1389 if (!CurFn->doesNotThrow()) 1390 TryMarkNoThrow(CurFn); 1391 } 1392 1393 /// ContainsLabel - Return true if the statement contains a label in it. If 1394 /// this statement is not executed normally, it not containing a label means 1395 /// that we can just remove the code. 1396 bool CodeGenFunction::ContainsLabel(const Stmt *S, bool IgnoreCaseStmts) { 1397 // Null statement, not a label! 1398 if (!S) return false; 1399 1400 // If this is a label, we have to emit the code, consider something like: 1401 // if (0) { ... foo: bar(); } goto foo; 1402 // 1403 // TODO: If anyone cared, we could track __label__'s, since we know that you 1404 // can't jump to one from outside their declared region. 1405 if (isa<LabelStmt>(S)) 1406 return true; 1407 1408 // If this is a case/default statement, and we haven't seen a switch, we have 1409 // to emit the code. 1410 if (isa<SwitchCase>(S) && !IgnoreCaseStmts) 1411 return true; 1412 1413 // If this is a switch statement, we want to ignore cases below it. 1414 if (isa<SwitchStmt>(S)) 1415 IgnoreCaseStmts = true; 1416 1417 // Scan subexpressions for verboten labels. 1418 for (const Stmt *SubStmt : S->children()) 1419 if (ContainsLabel(SubStmt, IgnoreCaseStmts)) 1420 return true; 1421 1422 return false; 1423 } 1424 1425 /// containsBreak - Return true if the statement contains a break out of it. 1426 /// If the statement (recursively) contains a switch or loop with a break 1427 /// inside of it, this is fine. 1428 bool CodeGenFunction::containsBreak(const Stmt *S) { 1429 // Null statement, not a label! 1430 if (!S) return false; 1431 1432 // If this is a switch or loop that defines its own break scope, then we can 1433 // include it and anything inside of it. 1434 if (isa<SwitchStmt>(S) || isa<WhileStmt>(S) || isa<DoStmt>(S) || 1435 isa<ForStmt>(S)) 1436 return false; 1437 1438 if (isa<BreakStmt>(S)) 1439 return true; 1440 1441 // Scan subexpressions for verboten breaks. 1442 for (const Stmt *SubStmt : S->children()) 1443 if (containsBreak(SubStmt)) 1444 return true; 1445 1446 return false; 1447 } 1448 1449 bool CodeGenFunction::mightAddDeclToScope(const Stmt *S) { 1450 if (!S) return false; 1451 1452 // Some statement kinds add a scope and thus never add a decl to the current 1453 // scope. Note, this list is longer than the list of statements that might 1454 // have an unscoped decl nested within them, but this way is conservatively 1455 // correct even if more statement kinds are added. 1456 if (isa<IfStmt>(S) || isa<SwitchStmt>(S) || isa<WhileStmt>(S) || 1457 isa<DoStmt>(S) || isa<ForStmt>(S) || isa<CompoundStmt>(S) || 1458 isa<CXXForRangeStmt>(S) || isa<CXXTryStmt>(S) || 1459 isa<ObjCForCollectionStmt>(S) || isa<ObjCAtTryStmt>(S)) 1460 return false; 1461 1462 if (isa<DeclStmt>(S)) 1463 return true; 1464 1465 for (const Stmt *SubStmt : S->children()) 1466 if (mightAddDeclToScope(SubStmt)) 1467 return true; 1468 1469 return false; 1470 } 1471 1472 /// ConstantFoldsToSimpleInteger - If the specified expression does not fold 1473 /// to a constant, or if it does but contains a label, return false. If it 1474 /// constant folds return true and set the boolean result in Result. 1475 bool CodeGenFunction::ConstantFoldsToSimpleInteger(const Expr *Cond, 1476 bool &ResultBool, 1477 bool AllowLabels) { 1478 llvm::APSInt ResultInt; 1479 if (!ConstantFoldsToSimpleInteger(Cond, ResultInt, AllowLabels)) 1480 return false; 1481 1482 ResultBool = ResultInt.getBoolValue(); 1483 return true; 1484 } 1485 1486 /// ConstantFoldsToSimpleInteger - If the specified expression does not fold 1487 /// to a constant, or if it does but contains a label, return false. If it 1488 /// constant folds return true and set the folded value. 1489 bool CodeGenFunction::ConstantFoldsToSimpleInteger(const Expr *Cond, 1490 llvm::APSInt &ResultInt, 1491 bool AllowLabels) { 1492 // FIXME: Rename and handle conversion of other evaluatable things 1493 // to bool. 1494 llvm::APSInt Int; 1495 if (!Cond->EvaluateAsInt(Int, getContext())) 1496 return false; // Not foldable, not integer or not fully evaluatable. 1497 1498 if (!AllowLabels && CodeGenFunction::ContainsLabel(Cond)) 1499 return false; // Contains a label. 1500 1501 ResultInt = Int; 1502 return true; 1503 } 1504 1505 1506 1507 /// EmitBranchOnBoolExpr - Emit a branch on a boolean condition (e.g. for an if 1508 /// statement) to the specified blocks. Based on the condition, this might try 1509 /// to simplify the codegen of the conditional based on the branch. 1510 /// 1511 void CodeGenFunction::EmitBranchOnBoolExpr(const Expr *Cond, 1512 llvm::BasicBlock *TrueBlock, 1513 llvm::BasicBlock *FalseBlock, 1514 uint64_t TrueCount) { 1515 Cond = Cond->IgnoreParens(); 1516 1517 if (const BinaryOperator *CondBOp = dyn_cast<BinaryOperator>(Cond)) { 1518 1519 // Handle X && Y in a condition. 1520 if (CondBOp->getOpcode() == BO_LAnd) { 1521 // If we have "1 && X", simplify the code. "0 && X" would have constant 1522 // folded if the case was simple enough. 1523 bool ConstantBool = false; 1524 if (ConstantFoldsToSimpleInteger(CondBOp->getLHS(), ConstantBool) && 1525 ConstantBool) { 1526 // br(1 && X) -> br(X). 1527 incrementProfileCounter(CondBOp); 1528 return EmitBranchOnBoolExpr(CondBOp->getRHS(), TrueBlock, FalseBlock, 1529 TrueCount); 1530 } 1531 1532 // If we have "X && 1", simplify the code to use an uncond branch. 1533 // "X && 0" would have been constant folded to 0. 1534 if (ConstantFoldsToSimpleInteger(CondBOp->getRHS(), ConstantBool) && 1535 ConstantBool) { 1536 // br(X && 1) -> br(X). 1537 return EmitBranchOnBoolExpr(CondBOp->getLHS(), TrueBlock, FalseBlock, 1538 TrueCount); 1539 } 1540 1541 // Emit the LHS as a conditional. If the LHS conditional is false, we 1542 // want to jump to the FalseBlock. 1543 llvm::BasicBlock *LHSTrue = createBasicBlock("land.lhs.true"); 1544 // The counter tells us how often we evaluate RHS, and all of TrueCount 1545 // can be propagated to that branch. 1546 uint64_t RHSCount = getProfileCount(CondBOp->getRHS()); 1547 1548 ConditionalEvaluation eval(*this); 1549 { 1550 ApplyDebugLocation DL(*this, Cond); 1551 EmitBranchOnBoolExpr(CondBOp->getLHS(), LHSTrue, FalseBlock, RHSCount); 1552 EmitBlock(LHSTrue); 1553 } 1554 1555 incrementProfileCounter(CondBOp); 1556 setCurrentProfileCount(getProfileCount(CondBOp->getRHS())); 1557 1558 // Any temporaries created here are conditional. 1559 eval.begin(*this); 1560 EmitBranchOnBoolExpr(CondBOp->getRHS(), TrueBlock, FalseBlock, TrueCount); 1561 eval.end(*this); 1562 1563 return; 1564 } 1565 1566 if (CondBOp->getOpcode() == BO_LOr) { 1567 // If we have "0 || X", simplify the code. "1 || X" would have constant 1568 // folded if the case was simple enough. 1569 bool ConstantBool = false; 1570 if (ConstantFoldsToSimpleInteger(CondBOp->getLHS(), ConstantBool) && 1571 !ConstantBool) { 1572 // br(0 || X) -> br(X). 1573 incrementProfileCounter(CondBOp); 1574 return EmitBranchOnBoolExpr(CondBOp->getRHS(), TrueBlock, FalseBlock, 1575 TrueCount); 1576 } 1577 1578 // If we have "X || 0", simplify the code to use an uncond branch. 1579 // "X || 1" would have been constant folded to 1. 1580 if (ConstantFoldsToSimpleInteger(CondBOp->getRHS(), ConstantBool) && 1581 !ConstantBool) { 1582 // br(X || 0) -> br(X). 1583 return EmitBranchOnBoolExpr(CondBOp->getLHS(), TrueBlock, FalseBlock, 1584 TrueCount); 1585 } 1586 1587 // Emit the LHS as a conditional. If the LHS conditional is true, we 1588 // want to jump to the TrueBlock. 1589 llvm::BasicBlock *LHSFalse = createBasicBlock("lor.lhs.false"); 1590 // We have the count for entry to the RHS and for the whole expression 1591 // being true, so we can divy up True count between the short circuit and 1592 // the RHS. 1593 uint64_t LHSCount = 1594 getCurrentProfileCount() - getProfileCount(CondBOp->getRHS()); 1595 uint64_t RHSCount = TrueCount - LHSCount; 1596 1597 ConditionalEvaluation eval(*this); 1598 { 1599 ApplyDebugLocation DL(*this, Cond); 1600 EmitBranchOnBoolExpr(CondBOp->getLHS(), TrueBlock, LHSFalse, LHSCount); 1601 EmitBlock(LHSFalse); 1602 } 1603 1604 incrementProfileCounter(CondBOp); 1605 setCurrentProfileCount(getProfileCount(CondBOp->getRHS())); 1606 1607 // Any temporaries created here are conditional. 1608 eval.begin(*this); 1609 EmitBranchOnBoolExpr(CondBOp->getRHS(), TrueBlock, FalseBlock, RHSCount); 1610 1611 eval.end(*this); 1612 1613 return; 1614 } 1615 } 1616 1617 if (const UnaryOperator *CondUOp = dyn_cast<UnaryOperator>(Cond)) { 1618 // br(!x, t, f) -> br(x, f, t) 1619 if (CondUOp->getOpcode() == UO_LNot) { 1620 // Negate the count. 1621 uint64_t FalseCount = getCurrentProfileCount() - TrueCount; 1622 // Negate the condition and swap the destination blocks. 1623 return EmitBranchOnBoolExpr(CondUOp->getSubExpr(), FalseBlock, TrueBlock, 1624 FalseCount); 1625 } 1626 } 1627 1628 if (const ConditionalOperator *CondOp = dyn_cast<ConditionalOperator>(Cond)) { 1629 // br(c ? x : y, t, f) -> br(c, br(x, t, f), br(y, t, f)) 1630 llvm::BasicBlock *LHSBlock = createBasicBlock("cond.true"); 1631 llvm::BasicBlock *RHSBlock = createBasicBlock("cond.false"); 1632 1633 ConditionalEvaluation cond(*this); 1634 EmitBranchOnBoolExpr(CondOp->getCond(), LHSBlock, RHSBlock, 1635 getProfileCount(CondOp)); 1636 1637 // When computing PGO branch weights, we only know the overall count for 1638 // the true block. This code is essentially doing tail duplication of the 1639 // naive code-gen, introducing new edges for which counts are not 1640 // available. Divide the counts proportionally between the LHS and RHS of 1641 // the conditional operator. 1642 uint64_t LHSScaledTrueCount = 0; 1643 if (TrueCount) { 1644 double LHSRatio = 1645 getProfileCount(CondOp) / (double)getCurrentProfileCount(); 1646 LHSScaledTrueCount = TrueCount * LHSRatio; 1647 } 1648 1649 cond.begin(*this); 1650 EmitBlock(LHSBlock); 1651 incrementProfileCounter(CondOp); 1652 { 1653 ApplyDebugLocation DL(*this, Cond); 1654 EmitBranchOnBoolExpr(CondOp->getLHS(), TrueBlock, FalseBlock, 1655 LHSScaledTrueCount); 1656 } 1657 cond.end(*this); 1658 1659 cond.begin(*this); 1660 EmitBlock(RHSBlock); 1661 EmitBranchOnBoolExpr(CondOp->getRHS(), TrueBlock, FalseBlock, 1662 TrueCount - LHSScaledTrueCount); 1663 cond.end(*this); 1664 1665 return; 1666 } 1667 1668 if (const CXXThrowExpr *Throw = dyn_cast<CXXThrowExpr>(Cond)) { 1669 // Conditional operator handling can give us a throw expression as a 1670 // condition for a case like: 1671 // br(c ? throw x : y, t, f) -> br(c, br(throw x, t, f), br(y, t, f) 1672 // Fold this to: 1673 // br(c, throw x, br(y, t, f)) 1674 EmitCXXThrowExpr(Throw, /*KeepInsertionPoint*/false); 1675 return; 1676 } 1677 1678 // If the branch has a condition wrapped by __builtin_unpredictable, 1679 // create metadata that specifies that the branch is unpredictable. 1680 // Don't bother if not optimizing because that metadata would not be used. 1681 llvm::MDNode *Unpredictable = nullptr; 1682 auto *Call = dyn_cast<CallExpr>(Cond); 1683 if (Call && CGM.getCodeGenOpts().OptimizationLevel != 0) { 1684 auto *FD = dyn_cast_or_null<FunctionDecl>(Call->getCalleeDecl()); 1685 if (FD && FD->getBuiltinID() == Builtin::BI__builtin_unpredictable) { 1686 llvm::MDBuilder MDHelper(getLLVMContext()); 1687 Unpredictable = MDHelper.createUnpredictable(); 1688 } 1689 } 1690 1691 // Create branch weights based on the number of times we get here and the 1692 // number of times the condition should be true. 1693 uint64_t CurrentCount = std::max(getCurrentProfileCount(), TrueCount); 1694 llvm::MDNode *Weights = 1695 createProfileWeights(TrueCount, CurrentCount - TrueCount); 1696 1697 // Emit the code with the fully general case. 1698 llvm::Value *CondV; 1699 { 1700 ApplyDebugLocation DL(*this, Cond); 1701 CondV = EvaluateExprAsBool(Cond); 1702 } 1703 Builder.CreateCondBr(CondV, TrueBlock, FalseBlock, Weights, Unpredictable); 1704 } 1705 1706 /// ErrorUnsupported - Print out an error that codegen doesn't support the 1707 /// specified stmt yet. 1708 void CodeGenFunction::ErrorUnsupported(const Stmt *S, const char *Type) { 1709 CGM.ErrorUnsupported(S, Type); 1710 } 1711 1712 /// emitNonZeroVLAInit - Emit the "zero" initialization of a 1713 /// variable-length array whose elements have a non-zero bit-pattern. 1714 /// 1715 /// \param baseType the inner-most element type of the array 1716 /// \param src - a char* pointing to the bit-pattern for a single 1717 /// base element of the array 1718 /// \param sizeInChars - the total size of the VLA, in chars 1719 static void emitNonZeroVLAInit(CodeGenFunction &CGF, QualType baseType, 1720 Address dest, Address src, 1721 llvm::Value *sizeInChars) { 1722 CGBuilderTy &Builder = CGF.Builder; 1723 1724 CharUnits baseSize = CGF.getContext().getTypeSizeInChars(baseType); 1725 llvm::Value *baseSizeInChars 1726 = llvm::ConstantInt::get(CGF.IntPtrTy, baseSize.getQuantity()); 1727 1728 Address begin = 1729 Builder.CreateElementBitCast(dest, CGF.Int8Ty, "vla.begin"); 1730 llvm::Value *end = 1731 Builder.CreateInBoundsGEP(begin.getPointer(), sizeInChars, "vla.end"); 1732 1733 llvm::BasicBlock *originBB = CGF.Builder.GetInsertBlock(); 1734 llvm::BasicBlock *loopBB = CGF.createBasicBlock("vla-init.loop"); 1735 llvm::BasicBlock *contBB = CGF.createBasicBlock("vla-init.cont"); 1736 1737 // Make a loop over the VLA. C99 guarantees that the VLA element 1738 // count must be nonzero. 1739 CGF.EmitBlock(loopBB); 1740 1741 llvm::PHINode *cur = Builder.CreatePHI(begin.getType(), 2, "vla.cur"); 1742 cur->addIncoming(begin.getPointer(), originBB); 1743 1744 CharUnits curAlign = 1745 dest.getAlignment().alignmentOfArrayElement(baseSize); 1746 1747 // memcpy the individual element bit-pattern. 1748 Builder.CreateMemCpy(Address(cur, curAlign), src, baseSizeInChars, 1749 /*volatile*/ false); 1750 1751 // Go to the next element. 1752 llvm::Value *next = 1753 Builder.CreateInBoundsGEP(CGF.Int8Ty, cur, baseSizeInChars, "vla.next"); 1754 1755 // Leave if that's the end of the VLA. 1756 llvm::Value *done = Builder.CreateICmpEQ(next, end, "vla-init.isdone"); 1757 Builder.CreateCondBr(done, contBB, loopBB); 1758 cur->addIncoming(next, loopBB); 1759 1760 CGF.EmitBlock(contBB); 1761 } 1762 1763 void 1764 CodeGenFunction::EmitNullInitialization(Address DestPtr, QualType Ty) { 1765 // Ignore empty classes in C++. 1766 if (getLangOpts().CPlusPlus) { 1767 if (const RecordType *RT = Ty->getAs<RecordType>()) { 1768 if (cast<CXXRecordDecl>(RT->getDecl())->isEmpty()) 1769 return; 1770 } 1771 } 1772 1773 // Cast the dest ptr to the appropriate i8 pointer type. 1774 if (DestPtr.getElementType() != Int8Ty) 1775 DestPtr = Builder.CreateElementBitCast(DestPtr, Int8Ty); 1776 1777 // Get size and alignment info for this aggregate. 1778 CharUnits size = getContext().getTypeSizeInChars(Ty); 1779 1780 llvm::Value *SizeVal; 1781 const VariableArrayType *vla; 1782 1783 // Don't bother emitting a zero-byte memset. 1784 if (size.isZero()) { 1785 // But note that getTypeInfo returns 0 for a VLA. 1786 if (const VariableArrayType *vlaType = 1787 dyn_cast_or_null<VariableArrayType>( 1788 getContext().getAsArrayType(Ty))) { 1789 auto VlaSize = getVLASize(vlaType); 1790 SizeVal = VlaSize.NumElts; 1791 CharUnits eltSize = getContext().getTypeSizeInChars(VlaSize.Type); 1792 if (!eltSize.isOne()) 1793 SizeVal = Builder.CreateNUWMul(SizeVal, CGM.getSize(eltSize)); 1794 vla = vlaType; 1795 } else { 1796 return; 1797 } 1798 } else { 1799 SizeVal = CGM.getSize(size); 1800 vla = nullptr; 1801 } 1802 1803 // If the type contains a pointer to data member we can't memset it to zero. 1804 // Instead, create a null constant and copy it to the destination. 1805 // TODO: there are other patterns besides zero that we can usefully memset, 1806 // like -1, which happens to be the pattern used by member-pointers. 1807 if (!CGM.getTypes().isZeroInitializable(Ty)) { 1808 // For a VLA, emit a single element, then splat that over the VLA. 1809 if (vla) Ty = getContext().getBaseElementType(vla); 1810 1811 llvm::Constant *NullConstant = CGM.EmitNullConstant(Ty); 1812 1813 llvm::GlobalVariable *NullVariable = 1814 new llvm::GlobalVariable(CGM.getModule(), NullConstant->getType(), 1815 /*isConstant=*/true, 1816 llvm::GlobalVariable::PrivateLinkage, 1817 NullConstant, Twine()); 1818 CharUnits NullAlign = DestPtr.getAlignment(); 1819 NullVariable->setAlignment(NullAlign.getQuantity()); 1820 Address SrcPtr(Builder.CreateBitCast(NullVariable, Builder.getInt8PtrTy()), 1821 NullAlign); 1822 1823 if (vla) return emitNonZeroVLAInit(*this, Ty, DestPtr, SrcPtr, SizeVal); 1824 1825 // Get and call the appropriate llvm.memcpy overload. 1826 Builder.CreateMemCpy(DestPtr, SrcPtr, SizeVal, false); 1827 return; 1828 } 1829 1830 // Otherwise, just memset the whole thing to zero. This is legal 1831 // because in LLVM, all default initializers (other than the ones we just 1832 // handled above) are guaranteed to have a bit pattern of all zeros. 1833 Builder.CreateMemSet(DestPtr, Builder.getInt8(0), SizeVal, false); 1834 } 1835 1836 llvm::BlockAddress *CodeGenFunction::GetAddrOfLabel(const LabelDecl *L) { 1837 // Make sure that there is a block for the indirect goto. 1838 if (!IndirectBranch) 1839 GetIndirectGotoBlock(); 1840 1841 llvm::BasicBlock *BB = getJumpDestForLabel(L).getBlock(); 1842 1843 // Make sure the indirect branch includes all of the address-taken blocks. 1844 IndirectBranch->addDestination(BB); 1845 return llvm::BlockAddress::get(CurFn, BB); 1846 } 1847 1848 llvm::BasicBlock *CodeGenFunction::GetIndirectGotoBlock() { 1849 // If we already made the indirect branch for indirect goto, return its block. 1850 if (IndirectBranch) return IndirectBranch->getParent(); 1851 1852 CGBuilderTy TmpBuilder(*this, createBasicBlock("indirectgoto")); 1853 1854 // Create the PHI node that indirect gotos will add entries to. 1855 llvm::Value *DestVal = TmpBuilder.CreatePHI(Int8PtrTy, 0, 1856 "indirect.goto.dest"); 1857 1858 // Create the indirect branch instruction. 1859 IndirectBranch = TmpBuilder.CreateIndirectBr(DestVal); 1860 return IndirectBranch->getParent(); 1861 } 1862 1863 /// Computes the length of an array in elements, as well as the base 1864 /// element type and a properly-typed first element pointer. 1865 llvm::Value *CodeGenFunction::emitArrayLength(const ArrayType *origArrayType, 1866 QualType &baseType, 1867 Address &addr) { 1868 const ArrayType *arrayType = origArrayType; 1869 1870 // If it's a VLA, we have to load the stored size. Note that 1871 // this is the size of the VLA in bytes, not its size in elements. 1872 llvm::Value *numVLAElements = nullptr; 1873 if (isa<VariableArrayType>(arrayType)) { 1874 numVLAElements = getVLASize(cast<VariableArrayType>(arrayType)).NumElts; 1875 1876 // Walk into all VLAs. This doesn't require changes to addr, 1877 // which has type T* where T is the first non-VLA element type. 1878 do { 1879 QualType elementType = arrayType->getElementType(); 1880 arrayType = getContext().getAsArrayType(elementType); 1881 1882 // If we only have VLA components, 'addr' requires no adjustment. 1883 if (!arrayType) { 1884 baseType = elementType; 1885 return numVLAElements; 1886 } 1887 } while (isa<VariableArrayType>(arrayType)); 1888 1889 // We get out here only if we find a constant array type 1890 // inside the VLA. 1891 } 1892 1893 // We have some number of constant-length arrays, so addr should 1894 // have LLVM type [M x [N x [...]]]*. Build a GEP that walks 1895 // down to the first element of addr. 1896 SmallVector<llvm::Value*, 8> gepIndices; 1897 1898 // GEP down to the array type. 1899 llvm::ConstantInt *zero = Builder.getInt32(0); 1900 gepIndices.push_back(zero); 1901 1902 uint64_t countFromCLAs = 1; 1903 QualType eltType; 1904 1905 llvm::ArrayType *llvmArrayType = 1906 dyn_cast<llvm::ArrayType>(addr.getElementType()); 1907 while (llvmArrayType) { 1908 assert(isa<ConstantArrayType>(arrayType)); 1909 assert(cast<ConstantArrayType>(arrayType)->getSize().getZExtValue() 1910 == llvmArrayType->getNumElements()); 1911 1912 gepIndices.push_back(zero); 1913 countFromCLAs *= llvmArrayType->getNumElements(); 1914 eltType = arrayType->getElementType(); 1915 1916 llvmArrayType = 1917 dyn_cast<llvm::ArrayType>(llvmArrayType->getElementType()); 1918 arrayType = getContext().getAsArrayType(arrayType->getElementType()); 1919 assert((!llvmArrayType || arrayType) && 1920 "LLVM and Clang types are out-of-synch"); 1921 } 1922 1923 if (arrayType) { 1924 // From this point onwards, the Clang array type has been emitted 1925 // as some other type (probably a packed struct). Compute the array 1926 // size, and just emit the 'begin' expression as a bitcast. 1927 while (arrayType) { 1928 countFromCLAs *= 1929 cast<ConstantArrayType>(arrayType)->getSize().getZExtValue(); 1930 eltType = arrayType->getElementType(); 1931 arrayType = getContext().getAsArrayType(eltType); 1932 } 1933 1934 llvm::Type *baseType = ConvertType(eltType); 1935 addr = Builder.CreateElementBitCast(addr, baseType, "array.begin"); 1936 } else { 1937 // Create the actual GEP. 1938 addr = Address(Builder.CreateInBoundsGEP(addr.getPointer(), 1939 gepIndices, "array.begin"), 1940 addr.getAlignment()); 1941 } 1942 1943 baseType = eltType; 1944 1945 llvm::Value *numElements 1946 = llvm::ConstantInt::get(SizeTy, countFromCLAs); 1947 1948 // If we had any VLA dimensions, factor them in. 1949 if (numVLAElements) 1950 numElements = Builder.CreateNUWMul(numVLAElements, numElements); 1951 1952 return numElements; 1953 } 1954 1955 CodeGenFunction::VlaSizePair CodeGenFunction::getVLASize(QualType type) { 1956 const VariableArrayType *vla = getContext().getAsVariableArrayType(type); 1957 assert(vla && "type was not a variable array type!"); 1958 return getVLASize(vla); 1959 } 1960 1961 CodeGenFunction::VlaSizePair 1962 CodeGenFunction::getVLASize(const VariableArrayType *type) { 1963 // The number of elements so far; always size_t. 1964 llvm::Value *numElements = nullptr; 1965 1966 QualType elementType; 1967 do { 1968 elementType = type->getElementType(); 1969 llvm::Value *vlaSize = VLASizeMap[type->getSizeExpr()]; 1970 assert(vlaSize && "no size for VLA!"); 1971 assert(vlaSize->getType() == SizeTy); 1972 1973 if (!numElements) { 1974 numElements = vlaSize; 1975 } else { 1976 // It's undefined behavior if this wraps around, so mark it that way. 1977 // FIXME: Teach -fsanitize=undefined to trap this. 1978 numElements = Builder.CreateNUWMul(numElements, vlaSize); 1979 } 1980 } while ((type = getContext().getAsVariableArrayType(elementType))); 1981 1982 return { numElements, elementType }; 1983 } 1984 1985 CodeGenFunction::VlaSizePair 1986 CodeGenFunction::getVLAElements1D(QualType type) { 1987 const VariableArrayType *vla = getContext().getAsVariableArrayType(type); 1988 assert(vla && "type was not a variable array type!"); 1989 return getVLAElements1D(vla); 1990 } 1991 1992 CodeGenFunction::VlaSizePair 1993 CodeGenFunction::getVLAElements1D(const VariableArrayType *Vla) { 1994 llvm::Value *VlaSize = VLASizeMap[Vla->getSizeExpr()]; 1995 assert(VlaSize && "no size for VLA!"); 1996 assert(VlaSize->getType() == SizeTy); 1997 return { VlaSize, Vla->getElementType() }; 1998 } 1999 2000 void CodeGenFunction::EmitVariablyModifiedType(QualType type) { 2001 assert(type->isVariablyModifiedType() && 2002 "Must pass variably modified type to EmitVLASizes!"); 2003 2004 EnsureInsertPoint(); 2005 2006 // We're going to walk down into the type and look for VLA 2007 // expressions. 2008 do { 2009 assert(type->isVariablyModifiedType()); 2010 2011 const Type *ty = type.getTypePtr(); 2012 switch (ty->getTypeClass()) { 2013 2014 #define TYPE(Class, Base) 2015 #define ABSTRACT_TYPE(Class, Base) 2016 #define NON_CANONICAL_TYPE(Class, Base) 2017 #define DEPENDENT_TYPE(Class, Base) case Type::Class: 2018 #define NON_CANONICAL_UNLESS_DEPENDENT_TYPE(Class, Base) 2019 #include "clang/AST/TypeNodes.def" 2020 llvm_unreachable("unexpected dependent type!"); 2021 2022 // These types are never variably-modified. 2023 case Type::Builtin: 2024 case Type::Complex: 2025 case Type::Vector: 2026 case Type::ExtVector: 2027 case Type::Record: 2028 case Type::Enum: 2029 case Type::Elaborated: 2030 case Type::TemplateSpecialization: 2031 case Type::ObjCTypeParam: 2032 case Type::ObjCObject: 2033 case Type::ObjCInterface: 2034 case Type::ObjCObjectPointer: 2035 llvm_unreachable("type class is never variably-modified!"); 2036 2037 case Type::Adjusted: 2038 type = cast<AdjustedType>(ty)->getAdjustedType(); 2039 break; 2040 2041 case Type::Decayed: 2042 type = cast<DecayedType>(ty)->getPointeeType(); 2043 break; 2044 2045 case Type::Pointer: 2046 type = cast<PointerType>(ty)->getPointeeType(); 2047 break; 2048 2049 case Type::BlockPointer: 2050 type = cast<BlockPointerType>(ty)->getPointeeType(); 2051 break; 2052 2053 case Type::LValueReference: 2054 case Type::RValueReference: 2055 type = cast<ReferenceType>(ty)->getPointeeType(); 2056 break; 2057 2058 case Type::MemberPointer: 2059 type = cast<MemberPointerType>(ty)->getPointeeType(); 2060 break; 2061 2062 case Type::ConstantArray: 2063 case Type::IncompleteArray: 2064 // Losing element qualification here is fine. 2065 type = cast<ArrayType>(ty)->getElementType(); 2066 break; 2067 2068 case Type::VariableArray: { 2069 // Losing element qualification here is fine. 2070 const VariableArrayType *vat = cast<VariableArrayType>(ty); 2071 2072 // Unknown size indication requires no size computation. 2073 // Otherwise, evaluate and record it. 2074 if (const Expr *size = vat->getSizeExpr()) { 2075 // It's possible that we might have emitted this already, 2076 // e.g. with a typedef and a pointer to it. 2077 llvm::Value *&entry = VLASizeMap[size]; 2078 if (!entry) { 2079 llvm::Value *Size = EmitScalarExpr(size); 2080 2081 // C11 6.7.6.2p5: 2082 // If the size is an expression that is not an integer constant 2083 // expression [...] each time it is evaluated it shall have a value 2084 // greater than zero. 2085 if (SanOpts.has(SanitizerKind::VLABound) && 2086 size->getType()->isSignedIntegerType()) { 2087 SanitizerScope SanScope(this); 2088 llvm::Value *Zero = llvm::Constant::getNullValue(Size->getType()); 2089 llvm::Constant *StaticArgs[] = { 2090 EmitCheckSourceLocation(size->getLocStart()), 2091 EmitCheckTypeDescriptor(size->getType()) 2092 }; 2093 EmitCheck(std::make_pair(Builder.CreateICmpSGT(Size, Zero), 2094 SanitizerKind::VLABound), 2095 SanitizerHandler::VLABoundNotPositive, StaticArgs, Size); 2096 } 2097 2098 // Always zexting here would be wrong if it weren't 2099 // undefined behavior to have a negative bound. 2100 entry = Builder.CreateIntCast(Size, SizeTy, /*signed*/ false); 2101 } 2102 } 2103 type = vat->getElementType(); 2104 break; 2105 } 2106 2107 case Type::FunctionProto: 2108 case Type::FunctionNoProto: 2109 type = cast<FunctionType>(ty)->getReturnType(); 2110 break; 2111 2112 case Type::Paren: 2113 case Type::TypeOf: 2114 case Type::UnaryTransform: 2115 case Type::Attributed: 2116 case Type::SubstTemplateTypeParm: 2117 case Type::PackExpansion: 2118 // Keep walking after single level desugaring. 2119 type = type.getSingleStepDesugaredType(getContext()); 2120 break; 2121 2122 case Type::Typedef: 2123 case Type::Decltype: 2124 case Type::Auto: 2125 case Type::DeducedTemplateSpecialization: 2126 // Stop walking: nothing to do. 2127 return; 2128 2129 case Type::TypeOfExpr: 2130 // Stop walking: emit typeof expression. 2131 EmitIgnoredExpr(cast<TypeOfExprType>(ty)->getUnderlyingExpr()); 2132 return; 2133 2134 case Type::Atomic: 2135 type = cast<AtomicType>(ty)->getValueType(); 2136 break; 2137 2138 case Type::Pipe: 2139 type = cast<PipeType>(ty)->getElementType(); 2140 break; 2141 } 2142 } while (type->isVariablyModifiedType()); 2143 } 2144 2145 Address CodeGenFunction::EmitVAListRef(const Expr* E) { 2146 if (getContext().getBuiltinVaListType()->isArrayType()) 2147 return EmitPointerWithAlignment(E); 2148 return EmitLValue(E).getAddress(); 2149 } 2150 2151 Address CodeGenFunction::EmitMSVAListRef(const Expr *E) { 2152 return EmitLValue(E).getAddress(); 2153 } 2154 2155 void CodeGenFunction::EmitDeclRefExprDbgValue(const DeclRefExpr *E, 2156 const APValue &Init) { 2157 assert(!Init.isUninit() && "Invalid DeclRefExpr initializer!"); 2158 if (CGDebugInfo *Dbg = getDebugInfo()) 2159 if (CGM.getCodeGenOpts().getDebugInfo() >= codegenoptions::LimitedDebugInfo) 2160 Dbg->EmitGlobalVariable(E->getDecl(), Init); 2161 } 2162 2163 CodeGenFunction::PeepholeProtection 2164 CodeGenFunction::protectFromPeepholes(RValue rvalue) { 2165 // At the moment, the only aggressive peephole we do in IR gen 2166 // is trunc(zext) folding, but if we add more, we can easily 2167 // extend this protection. 2168 2169 if (!rvalue.isScalar()) return PeepholeProtection(); 2170 llvm::Value *value = rvalue.getScalarVal(); 2171 if (!isa<llvm::ZExtInst>(value)) return PeepholeProtection(); 2172 2173 // Just make an extra bitcast. 2174 assert(HaveInsertPoint()); 2175 llvm::Instruction *inst = new llvm::BitCastInst(value, value->getType(), "", 2176 Builder.GetInsertBlock()); 2177 2178 PeepholeProtection protection; 2179 protection.Inst = inst; 2180 return protection; 2181 } 2182 2183 void CodeGenFunction::unprotectFromPeepholes(PeepholeProtection protection) { 2184 if (!protection.Inst) return; 2185 2186 // In theory, we could try to duplicate the peepholes now, but whatever. 2187 protection.Inst->eraseFromParent(); 2188 } 2189 2190 llvm::Value *CodeGenFunction::EmitAnnotationCall(llvm::Value *AnnotationFn, 2191 llvm::Value *AnnotatedVal, 2192 StringRef AnnotationStr, 2193 SourceLocation Location) { 2194 llvm::Value *Args[4] = { 2195 AnnotatedVal, 2196 Builder.CreateBitCast(CGM.EmitAnnotationString(AnnotationStr), Int8PtrTy), 2197 Builder.CreateBitCast(CGM.EmitAnnotationUnit(Location), Int8PtrTy), 2198 CGM.EmitAnnotationLineNo(Location) 2199 }; 2200 return Builder.CreateCall(AnnotationFn, Args); 2201 } 2202 2203 void CodeGenFunction::EmitVarAnnotations(const VarDecl *D, llvm::Value *V) { 2204 assert(D->hasAttr<AnnotateAttr>() && "no annotate attribute"); 2205 // FIXME We create a new bitcast for every annotation because that's what 2206 // llvm-gcc was doing. 2207 for (const auto *I : D->specific_attrs<AnnotateAttr>()) 2208 EmitAnnotationCall(CGM.getIntrinsic(llvm::Intrinsic::var_annotation), 2209 Builder.CreateBitCast(V, CGM.Int8PtrTy, V->getName()), 2210 I->getAnnotation(), D->getLocation()); 2211 } 2212 2213 Address CodeGenFunction::EmitFieldAnnotations(const FieldDecl *D, 2214 Address Addr) { 2215 assert(D->hasAttr<AnnotateAttr>() && "no annotate attribute"); 2216 llvm::Value *V = Addr.getPointer(); 2217 llvm::Type *VTy = V->getType(); 2218 llvm::Value *F = CGM.getIntrinsic(llvm::Intrinsic::ptr_annotation, 2219 CGM.Int8PtrTy); 2220 2221 for (const auto *I : D->specific_attrs<AnnotateAttr>()) { 2222 // FIXME Always emit the cast inst so we can differentiate between 2223 // annotation on the first field of a struct and annotation on the struct 2224 // itself. 2225 if (VTy != CGM.Int8PtrTy) 2226 V = Builder.Insert(new llvm::BitCastInst(V, CGM.Int8PtrTy)); 2227 V = EmitAnnotationCall(F, V, I->getAnnotation(), D->getLocation()); 2228 V = Builder.CreateBitCast(V, VTy); 2229 } 2230 2231 return Address(V, Addr.getAlignment()); 2232 } 2233 2234 CodeGenFunction::CGCapturedStmtInfo::~CGCapturedStmtInfo() { } 2235 2236 CodeGenFunction::SanitizerScope::SanitizerScope(CodeGenFunction *CGF) 2237 : CGF(CGF) { 2238 assert(!CGF->IsSanitizerScope); 2239 CGF->IsSanitizerScope = true; 2240 } 2241 2242 CodeGenFunction::SanitizerScope::~SanitizerScope() { 2243 CGF->IsSanitizerScope = false; 2244 } 2245 2246 void CodeGenFunction::InsertHelper(llvm::Instruction *I, 2247 const llvm::Twine &Name, 2248 llvm::BasicBlock *BB, 2249 llvm::BasicBlock::iterator InsertPt) const { 2250 LoopStack.InsertHelper(I); 2251 if (IsSanitizerScope) 2252 CGM.getSanitizerMetadata()->disableSanitizerForInstruction(I); 2253 } 2254 2255 void CGBuilderInserter::InsertHelper( 2256 llvm::Instruction *I, const llvm::Twine &Name, llvm::BasicBlock *BB, 2257 llvm::BasicBlock::iterator InsertPt) const { 2258 llvm::IRBuilderDefaultInserter::InsertHelper(I, Name, BB, InsertPt); 2259 if (CGF) 2260 CGF->InsertHelper(I, Name, BB, InsertPt); 2261 } 2262 2263 static bool hasRequiredFeatures(const SmallVectorImpl<StringRef> &ReqFeatures, 2264 CodeGenModule &CGM, const FunctionDecl *FD, 2265 std::string &FirstMissing) { 2266 // If there aren't any required features listed then go ahead and return. 2267 if (ReqFeatures.empty()) 2268 return false; 2269 2270 // Now build up the set of caller features and verify that all the required 2271 // features are there. 2272 llvm::StringMap<bool> CallerFeatureMap; 2273 CGM.getFunctionFeatureMap(CallerFeatureMap, FD); 2274 2275 // If we have at least one of the features in the feature list return 2276 // true, otherwise return false. 2277 return std::all_of( 2278 ReqFeatures.begin(), ReqFeatures.end(), [&](StringRef Feature) { 2279 SmallVector<StringRef, 1> OrFeatures; 2280 Feature.split(OrFeatures, "|"); 2281 return std::any_of(OrFeatures.begin(), OrFeatures.end(), 2282 [&](StringRef Feature) { 2283 if (!CallerFeatureMap.lookup(Feature)) { 2284 FirstMissing = Feature.str(); 2285 return false; 2286 } 2287 return true; 2288 }); 2289 }); 2290 } 2291 2292 // Emits an error if we don't have a valid set of target features for the 2293 // called function. 2294 void CodeGenFunction::checkTargetFeatures(const CallExpr *E, 2295 const FunctionDecl *TargetDecl) { 2296 // Early exit if this is an indirect call. 2297 if (!TargetDecl) 2298 return; 2299 2300 // Get the current enclosing function if it exists. If it doesn't 2301 // we can't check the target features anyhow. 2302 const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(CurFuncDecl); 2303 if (!FD) 2304 return; 2305 2306 // Grab the required features for the call. For a builtin this is listed in 2307 // the td file with the default cpu, for an always_inline function this is any 2308 // listed cpu and any listed features. 2309 unsigned BuiltinID = TargetDecl->getBuiltinID(); 2310 std::string MissingFeature; 2311 if (BuiltinID) { 2312 SmallVector<StringRef, 1> ReqFeatures; 2313 const char *FeatureList = 2314 CGM.getContext().BuiltinInfo.getRequiredFeatures(BuiltinID); 2315 // Return if the builtin doesn't have any required features. 2316 if (!FeatureList || StringRef(FeatureList) == "") 2317 return; 2318 StringRef(FeatureList).split(ReqFeatures, ","); 2319 if (!hasRequiredFeatures(ReqFeatures, CGM, FD, MissingFeature)) 2320 CGM.getDiags().Report(E->getLocStart(), diag::err_builtin_needs_feature) 2321 << TargetDecl->getDeclName() 2322 << CGM.getContext().BuiltinInfo.getRequiredFeatures(BuiltinID); 2323 2324 } else if (TargetDecl->hasAttr<TargetAttr>()) { 2325 // Get the required features for the callee. 2326 SmallVector<StringRef, 1> ReqFeatures; 2327 llvm::StringMap<bool> CalleeFeatureMap; 2328 CGM.getFunctionFeatureMap(CalleeFeatureMap, TargetDecl); 2329 for (const auto &F : CalleeFeatureMap) { 2330 // Only positive features are "required". 2331 if (F.getValue()) 2332 ReqFeatures.push_back(F.getKey()); 2333 } 2334 if (!hasRequiredFeatures(ReqFeatures, CGM, FD, MissingFeature)) 2335 CGM.getDiags().Report(E->getLocStart(), diag::err_function_needs_feature) 2336 << FD->getDeclName() << TargetDecl->getDeclName() << MissingFeature; 2337 } 2338 } 2339 2340 void CodeGenFunction::EmitSanitizerStatReport(llvm::SanitizerStatKind SSK) { 2341 if (!CGM.getCodeGenOpts().SanitizeStats) 2342 return; 2343 2344 llvm::IRBuilder<> IRB(Builder.GetInsertBlock(), Builder.GetInsertPoint()); 2345 IRB.SetCurrentDebugLocation(Builder.getCurrentDebugLocation()); 2346 CGM.getSanStats().create(IRB, SSK); 2347 } 2348 2349 llvm::Value * 2350 CodeGenFunction::FormResolverCondition(const MultiVersionResolverOption &RO) { 2351 llvm::Value *TrueCondition = nullptr; 2352 if (!RO.ParsedAttribute.Architecture.empty()) 2353 TrueCondition = EmitX86CpuIs(RO.ParsedAttribute.Architecture); 2354 2355 if (!RO.ParsedAttribute.Features.empty()) { 2356 SmallVector<StringRef, 8> FeatureList; 2357 llvm::for_each(RO.ParsedAttribute.Features, 2358 [&FeatureList](const std::string &Feature) { 2359 FeatureList.push_back(StringRef{Feature}.substr(1)); 2360 }); 2361 llvm::Value *FeatureCmp = EmitX86CpuSupports(FeatureList); 2362 TrueCondition = TrueCondition ? Builder.CreateAnd(TrueCondition, FeatureCmp) 2363 : FeatureCmp; 2364 } 2365 return TrueCondition; 2366 } 2367 2368 void CodeGenFunction::EmitMultiVersionResolver( 2369 llvm::Function *Resolver, ArrayRef<MultiVersionResolverOption> Options) { 2370 assert((getContext().getTargetInfo().getTriple().getArch() == 2371 llvm::Triple::x86 || 2372 getContext().getTargetInfo().getTriple().getArch() == 2373 llvm::Triple::x86_64) && 2374 "Only implemented for x86 targets"); 2375 2376 // Main function's basic block. 2377 llvm::BasicBlock *CurBlock = createBasicBlock("entry", Resolver); 2378 Builder.SetInsertPoint(CurBlock); 2379 EmitX86CpuInit(); 2380 2381 llvm::Function *DefaultFunc = nullptr; 2382 for (const MultiVersionResolverOption &RO : Options) { 2383 Builder.SetInsertPoint(CurBlock); 2384 llvm::Value *TrueCondition = FormResolverCondition(RO); 2385 2386 if (!TrueCondition) { 2387 DefaultFunc = RO.Function; 2388 } else { 2389 llvm::BasicBlock *RetBlock = createBasicBlock("ro_ret", Resolver); 2390 llvm::IRBuilder<> RetBuilder(RetBlock); 2391 RetBuilder.CreateRet(RO.Function); 2392 CurBlock = createBasicBlock("ro_else", Resolver); 2393 Builder.CreateCondBr(TrueCondition, RetBlock, CurBlock); 2394 } 2395 } 2396 2397 assert(DefaultFunc && "No default version?"); 2398 // Emit return from the 'else-ist' block. 2399 Builder.SetInsertPoint(CurBlock); 2400 Builder.CreateRet(DefaultFunc); 2401 } 2402 2403 llvm::DebugLoc CodeGenFunction::SourceLocToDebugLoc(SourceLocation Location) { 2404 if (CGDebugInfo *DI = getDebugInfo()) 2405 return DI->SourceLocToDebugLoc(Location); 2406 2407 return llvm::DebugLoc(); 2408 } 2409