1 //===--- CodeGenFunction.cpp - Emit LLVM Code from ASTs for a Function ----===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This coordinates the per-function state used while generating code. 10 // 11 //===----------------------------------------------------------------------===// 12 13 #include "CodeGenFunction.h" 14 #include "CGBlocks.h" 15 #include "CGCleanup.h" 16 #include "CGCUDARuntime.h" 17 #include "CGCXXABI.h" 18 #include "CGDebugInfo.h" 19 #include "CGOpenMPRuntime.h" 20 #include "CodeGenModule.h" 21 #include "CodeGenPGO.h" 22 #include "TargetInfo.h" 23 #include "clang/AST/ASTContext.h" 24 #include "clang/AST/ASTLambda.h" 25 #include "clang/AST/Decl.h" 26 #include "clang/AST/DeclCXX.h" 27 #include "clang/AST/StmtCXX.h" 28 #include "clang/AST/StmtObjC.h" 29 #include "clang/Basic/Builtins.h" 30 #include "clang/Basic/CodeGenOptions.h" 31 #include "clang/Basic/TargetInfo.h" 32 #include "clang/CodeGen/CGFunctionInfo.h" 33 #include "clang/Frontend/FrontendDiagnostic.h" 34 #include "llvm/IR/DataLayout.h" 35 #include "llvm/IR/Dominators.h" 36 #include "llvm/IR/Intrinsics.h" 37 #include "llvm/IR/MDBuilder.h" 38 #include "llvm/IR/Operator.h" 39 #include "llvm/Transforms/Utils/PromoteMemToReg.h" 40 using namespace clang; 41 using namespace CodeGen; 42 43 /// shouldEmitLifetimeMarkers - Decide whether we need emit the life-time 44 /// markers. 45 static bool shouldEmitLifetimeMarkers(const CodeGenOptions &CGOpts, 46 const LangOptions &LangOpts) { 47 if (CGOpts.DisableLifetimeMarkers) 48 return false; 49 50 // Disable lifetime markers in msan builds. 51 // FIXME: Remove this when msan works with lifetime markers. 52 if (LangOpts.Sanitize.has(SanitizerKind::Memory)) 53 return false; 54 55 // Asan uses markers for use-after-scope checks. 56 if (CGOpts.SanitizeAddressUseAfterScope) 57 return true; 58 59 // For now, only in optimized builds. 60 return CGOpts.OptimizationLevel != 0; 61 } 62 63 CodeGenFunction::CodeGenFunction(CodeGenModule &cgm, bool suppressNewContext) 64 : CodeGenTypeCache(cgm), CGM(cgm), Target(cgm.getTarget()), 65 Builder(cgm, cgm.getModule().getContext(), llvm::ConstantFolder(), 66 CGBuilderInserterTy(this)), 67 SanOpts(CGM.getLangOpts().Sanitize), DebugInfo(CGM.getModuleDebugInfo()), 68 PGO(cgm), ShouldEmitLifetimeMarkers(shouldEmitLifetimeMarkers( 69 CGM.getCodeGenOpts(), CGM.getLangOpts())) { 70 if (!suppressNewContext) 71 CGM.getCXXABI().getMangleContext().startNewFunction(); 72 73 llvm::FastMathFlags FMF; 74 if (CGM.getLangOpts().FastMath) 75 FMF.setFast(); 76 if (CGM.getLangOpts().FiniteMathOnly) { 77 FMF.setNoNaNs(); 78 FMF.setNoInfs(); 79 } 80 if (CGM.getCodeGenOpts().NoNaNsFPMath) { 81 FMF.setNoNaNs(); 82 } 83 if (CGM.getCodeGenOpts().NoSignedZeros) { 84 FMF.setNoSignedZeros(); 85 } 86 if (CGM.getCodeGenOpts().ReciprocalMath) { 87 FMF.setAllowReciprocal(); 88 } 89 if (CGM.getCodeGenOpts().Reassociate) { 90 FMF.setAllowReassoc(); 91 } 92 Builder.setFastMathFlags(FMF); 93 } 94 95 CodeGenFunction::~CodeGenFunction() { 96 assert(LifetimeExtendedCleanupStack.empty() && "failed to emit a cleanup"); 97 98 // If there are any unclaimed block infos, go ahead and destroy them 99 // now. This can happen if IR-gen gets clever and skips evaluating 100 // something. 101 if (FirstBlockInfo) 102 destroyBlockInfos(FirstBlockInfo); 103 104 if (getLangOpts().OpenMP && CurFn) 105 CGM.getOpenMPRuntime().functionFinished(*this); 106 } 107 108 CharUnits CodeGenFunction::getNaturalPointeeTypeAlignment(QualType T, 109 LValueBaseInfo *BaseInfo, 110 TBAAAccessInfo *TBAAInfo) { 111 return getNaturalTypeAlignment(T->getPointeeType(), BaseInfo, TBAAInfo, 112 /* forPointeeType= */ true); 113 } 114 115 CharUnits CodeGenFunction::getNaturalTypeAlignment(QualType T, 116 LValueBaseInfo *BaseInfo, 117 TBAAAccessInfo *TBAAInfo, 118 bool forPointeeType) { 119 if (TBAAInfo) 120 *TBAAInfo = CGM.getTBAAAccessInfo(T); 121 122 // Honor alignment typedef attributes even on incomplete types. 123 // We also honor them straight for C++ class types, even as pointees; 124 // there's an expressivity gap here. 125 if (auto TT = T->getAs<TypedefType>()) { 126 if (auto Align = TT->getDecl()->getMaxAlignment()) { 127 if (BaseInfo) 128 *BaseInfo = LValueBaseInfo(AlignmentSource::AttributedType); 129 return getContext().toCharUnitsFromBits(Align); 130 } 131 } 132 133 if (BaseInfo) 134 *BaseInfo = LValueBaseInfo(AlignmentSource::Type); 135 136 CharUnits Alignment; 137 if (T->isIncompleteType()) { 138 Alignment = CharUnits::One(); // Shouldn't be used, but pessimistic is best. 139 } else { 140 // For C++ class pointees, we don't know whether we're pointing at a 141 // base or a complete object, so we generally need to use the 142 // non-virtual alignment. 143 const CXXRecordDecl *RD; 144 if (forPointeeType && (RD = T->getAsCXXRecordDecl())) { 145 Alignment = CGM.getClassPointerAlignment(RD); 146 } else { 147 Alignment = getContext().getTypeAlignInChars(T); 148 if (T.getQualifiers().hasUnaligned()) 149 Alignment = CharUnits::One(); 150 } 151 152 // Cap to the global maximum type alignment unless the alignment 153 // was somehow explicit on the type. 154 if (unsigned MaxAlign = getLangOpts().MaxTypeAlign) { 155 if (Alignment.getQuantity() > MaxAlign && 156 !getContext().isAlignmentRequired(T)) 157 Alignment = CharUnits::fromQuantity(MaxAlign); 158 } 159 } 160 return Alignment; 161 } 162 163 LValue CodeGenFunction::MakeNaturalAlignAddrLValue(llvm::Value *V, QualType T) { 164 LValueBaseInfo BaseInfo; 165 TBAAAccessInfo TBAAInfo; 166 CharUnits Alignment = getNaturalTypeAlignment(T, &BaseInfo, &TBAAInfo); 167 return LValue::MakeAddr(Address(V, Alignment), T, getContext(), BaseInfo, 168 TBAAInfo); 169 } 170 171 /// Given a value of type T* that may not be to a complete object, 172 /// construct an l-value with the natural pointee alignment of T. 173 LValue 174 CodeGenFunction::MakeNaturalAlignPointeeAddrLValue(llvm::Value *V, QualType T) { 175 LValueBaseInfo BaseInfo; 176 TBAAAccessInfo TBAAInfo; 177 CharUnits Align = getNaturalTypeAlignment(T, &BaseInfo, &TBAAInfo, 178 /* forPointeeType= */ true); 179 return MakeAddrLValue(Address(V, Align), T, BaseInfo, TBAAInfo); 180 } 181 182 183 llvm::Type *CodeGenFunction::ConvertTypeForMem(QualType T) { 184 return CGM.getTypes().ConvertTypeForMem(T); 185 } 186 187 llvm::Type *CodeGenFunction::ConvertType(QualType T) { 188 return CGM.getTypes().ConvertType(T); 189 } 190 191 TypeEvaluationKind CodeGenFunction::getEvaluationKind(QualType type) { 192 type = type.getCanonicalType(); 193 while (true) { 194 switch (type->getTypeClass()) { 195 #define TYPE(name, parent) 196 #define ABSTRACT_TYPE(name, parent) 197 #define NON_CANONICAL_TYPE(name, parent) case Type::name: 198 #define DEPENDENT_TYPE(name, parent) case Type::name: 199 #define NON_CANONICAL_UNLESS_DEPENDENT_TYPE(name, parent) case Type::name: 200 #include "clang/AST/TypeNodes.def" 201 llvm_unreachable("non-canonical or dependent type in IR-generation"); 202 203 case Type::Auto: 204 case Type::DeducedTemplateSpecialization: 205 llvm_unreachable("undeduced type in IR-generation"); 206 207 // Various scalar types. 208 case Type::Builtin: 209 case Type::Pointer: 210 case Type::BlockPointer: 211 case Type::LValueReference: 212 case Type::RValueReference: 213 case Type::MemberPointer: 214 case Type::Vector: 215 case Type::ExtVector: 216 case Type::FunctionProto: 217 case Type::FunctionNoProto: 218 case Type::Enum: 219 case Type::ObjCObjectPointer: 220 case Type::Pipe: 221 return TEK_Scalar; 222 223 // Complexes. 224 case Type::Complex: 225 return TEK_Complex; 226 227 // Arrays, records, and Objective-C objects. 228 case Type::ConstantArray: 229 case Type::IncompleteArray: 230 case Type::VariableArray: 231 case Type::Record: 232 case Type::ObjCObject: 233 case Type::ObjCInterface: 234 return TEK_Aggregate; 235 236 // We operate on atomic values according to their underlying type. 237 case Type::Atomic: 238 type = cast<AtomicType>(type)->getValueType(); 239 continue; 240 } 241 llvm_unreachable("unknown type kind!"); 242 } 243 } 244 245 llvm::DebugLoc CodeGenFunction::EmitReturnBlock() { 246 // For cleanliness, we try to avoid emitting the return block for 247 // simple cases. 248 llvm::BasicBlock *CurBB = Builder.GetInsertBlock(); 249 250 if (CurBB) { 251 assert(!CurBB->getTerminator() && "Unexpected terminated block."); 252 253 // We have a valid insert point, reuse it if it is empty or there are no 254 // explicit jumps to the return block. 255 if (CurBB->empty() || ReturnBlock.getBlock()->use_empty()) { 256 ReturnBlock.getBlock()->replaceAllUsesWith(CurBB); 257 delete ReturnBlock.getBlock(); 258 ReturnBlock = JumpDest(); 259 } else 260 EmitBlock(ReturnBlock.getBlock()); 261 return llvm::DebugLoc(); 262 } 263 264 // Otherwise, if the return block is the target of a single direct 265 // branch then we can just put the code in that block instead. This 266 // cleans up functions which started with a unified return block. 267 if (ReturnBlock.getBlock()->hasOneUse()) { 268 llvm::BranchInst *BI = 269 dyn_cast<llvm::BranchInst>(*ReturnBlock.getBlock()->user_begin()); 270 if (BI && BI->isUnconditional() && 271 BI->getSuccessor(0) == ReturnBlock.getBlock()) { 272 // Record/return the DebugLoc of the simple 'return' expression to be used 273 // later by the actual 'ret' instruction. 274 llvm::DebugLoc Loc = BI->getDebugLoc(); 275 Builder.SetInsertPoint(BI->getParent()); 276 BI->eraseFromParent(); 277 delete ReturnBlock.getBlock(); 278 ReturnBlock = JumpDest(); 279 return Loc; 280 } 281 } 282 283 // FIXME: We are at an unreachable point, there is no reason to emit the block 284 // unless it has uses. However, we still need a place to put the debug 285 // region.end for now. 286 287 EmitBlock(ReturnBlock.getBlock()); 288 return llvm::DebugLoc(); 289 } 290 291 static void EmitIfUsed(CodeGenFunction &CGF, llvm::BasicBlock *BB) { 292 if (!BB) return; 293 if (!BB->use_empty()) 294 return CGF.CurFn->getBasicBlockList().push_back(BB); 295 delete BB; 296 } 297 298 void CodeGenFunction::FinishFunction(SourceLocation EndLoc) { 299 assert(BreakContinueStack.empty() && 300 "mismatched push/pop in break/continue stack!"); 301 302 bool OnlySimpleReturnStmts = NumSimpleReturnExprs > 0 303 && NumSimpleReturnExprs == NumReturnExprs 304 && ReturnBlock.getBlock()->use_empty(); 305 // Usually the return expression is evaluated before the cleanup 306 // code. If the function contains only a simple return statement, 307 // such as a constant, the location before the cleanup code becomes 308 // the last useful breakpoint in the function, because the simple 309 // return expression will be evaluated after the cleanup code. To be 310 // safe, set the debug location for cleanup code to the location of 311 // the return statement. Otherwise the cleanup code should be at the 312 // end of the function's lexical scope. 313 // 314 // If there are multiple branches to the return block, the branch 315 // instructions will get the location of the return statements and 316 // all will be fine. 317 if (CGDebugInfo *DI = getDebugInfo()) { 318 if (OnlySimpleReturnStmts) 319 DI->EmitLocation(Builder, LastStopPoint); 320 else 321 DI->EmitLocation(Builder, EndLoc); 322 } 323 324 // Pop any cleanups that might have been associated with the 325 // parameters. Do this in whatever block we're currently in; it's 326 // important to do this before we enter the return block or return 327 // edges will be *really* confused. 328 bool HasCleanups = EHStack.stable_begin() != PrologueCleanupDepth; 329 bool HasOnlyLifetimeMarkers = 330 HasCleanups && EHStack.containsOnlyLifetimeMarkers(PrologueCleanupDepth); 331 bool EmitRetDbgLoc = !HasCleanups || HasOnlyLifetimeMarkers; 332 if (HasCleanups) { 333 // Make sure the line table doesn't jump back into the body for 334 // the ret after it's been at EndLoc. 335 if (CGDebugInfo *DI = getDebugInfo()) 336 if (OnlySimpleReturnStmts) 337 DI->EmitLocation(Builder, EndLoc); 338 339 PopCleanupBlocks(PrologueCleanupDepth); 340 } 341 342 // Emit function epilog (to return). 343 llvm::DebugLoc Loc = EmitReturnBlock(); 344 345 if (ShouldInstrumentFunction()) { 346 if (CGM.getCodeGenOpts().InstrumentFunctions) 347 CurFn->addFnAttr("instrument-function-exit", "__cyg_profile_func_exit"); 348 if (CGM.getCodeGenOpts().InstrumentFunctionsAfterInlining) 349 CurFn->addFnAttr("instrument-function-exit-inlined", 350 "__cyg_profile_func_exit"); 351 } 352 353 // Emit debug descriptor for function end. 354 if (CGDebugInfo *DI = getDebugInfo()) 355 DI->EmitFunctionEnd(Builder, CurFn); 356 357 // Reset the debug location to that of the simple 'return' expression, if any 358 // rather than that of the end of the function's scope '}'. 359 ApplyDebugLocation AL(*this, Loc); 360 EmitFunctionEpilog(*CurFnInfo, EmitRetDbgLoc, EndLoc); 361 EmitEndEHSpec(CurCodeDecl); 362 363 assert(EHStack.empty() && 364 "did not remove all scopes from cleanup stack!"); 365 366 // If someone did an indirect goto, emit the indirect goto block at the end of 367 // the function. 368 if (IndirectBranch) { 369 EmitBlock(IndirectBranch->getParent()); 370 Builder.ClearInsertionPoint(); 371 } 372 373 // If some of our locals escaped, insert a call to llvm.localescape in the 374 // entry block. 375 if (!EscapedLocals.empty()) { 376 // Invert the map from local to index into a simple vector. There should be 377 // no holes. 378 SmallVector<llvm::Value *, 4> EscapeArgs; 379 EscapeArgs.resize(EscapedLocals.size()); 380 for (auto &Pair : EscapedLocals) 381 EscapeArgs[Pair.second] = Pair.first; 382 llvm::Function *FrameEscapeFn = llvm::Intrinsic::getDeclaration( 383 &CGM.getModule(), llvm::Intrinsic::localescape); 384 CGBuilderTy(*this, AllocaInsertPt).CreateCall(FrameEscapeFn, EscapeArgs); 385 } 386 387 // Remove the AllocaInsertPt instruction, which is just a convenience for us. 388 llvm::Instruction *Ptr = AllocaInsertPt; 389 AllocaInsertPt = nullptr; 390 Ptr->eraseFromParent(); 391 392 // If someone took the address of a label but never did an indirect goto, we 393 // made a zero entry PHI node, which is illegal, zap it now. 394 if (IndirectBranch) { 395 llvm::PHINode *PN = cast<llvm::PHINode>(IndirectBranch->getAddress()); 396 if (PN->getNumIncomingValues() == 0) { 397 PN->replaceAllUsesWith(llvm::UndefValue::get(PN->getType())); 398 PN->eraseFromParent(); 399 } 400 } 401 402 EmitIfUsed(*this, EHResumeBlock); 403 EmitIfUsed(*this, TerminateLandingPad); 404 EmitIfUsed(*this, TerminateHandler); 405 EmitIfUsed(*this, UnreachableBlock); 406 407 for (const auto &FuncletAndParent : TerminateFunclets) 408 EmitIfUsed(*this, FuncletAndParent.second); 409 410 if (CGM.getCodeGenOpts().EmitDeclMetadata) 411 EmitDeclMetadata(); 412 413 for (SmallVectorImpl<std::pair<llvm::Instruction *, llvm::Value *> >::iterator 414 I = DeferredReplacements.begin(), 415 E = DeferredReplacements.end(); 416 I != E; ++I) { 417 I->first->replaceAllUsesWith(I->second); 418 I->first->eraseFromParent(); 419 } 420 421 // Eliminate CleanupDestSlot alloca by replacing it with SSA values and 422 // PHIs if the current function is a coroutine. We don't do it for all 423 // functions as it may result in slight increase in numbers of instructions 424 // if compiled with no optimizations. We do it for coroutine as the lifetime 425 // of CleanupDestSlot alloca make correct coroutine frame building very 426 // difficult. 427 if (NormalCleanupDest.isValid() && isCoroutine()) { 428 llvm::DominatorTree DT(*CurFn); 429 llvm::PromoteMemToReg( 430 cast<llvm::AllocaInst>(NormalCleanupDest.getPointer()), DT); 431 NormalCleanupDest = Address::invalid(); 432 } 433 434 // Scan function arguments for vector width. 435 for (llvm::Argument &A : CurFn->args()) 436 if (auto *VT = dyn_cast<llvm::VectorType>(A.getType())) 437 LargestVectorWidth = std::max(LargestVectorWidth, 438 VT->getPrimitiveSizeInBits()); 439 440 // Update vector width based on return type. 441 if (auto *VT = dyn_cast<llvm::VectorType>(CurFn->getReturnType())) 442 LargestVectorWidth = std::max(LargestVectorWidth, 443 VT->getPrimitiveSizeInBits()); 444 445 // Add the required-vector-width attribute. This contains the max width from: 446 // 1. min-vector-width attribute used in the source program. 447 // 2. Any builtins used that have a vector width specified. 448 // 3. Values passed in and out of inline assembly. 449 // 4. Width of vector arguments and return types for this function. 450 // 5. Width of vector aguments and return types for functions called by this 451 // function. 452 CurFn->addFnAttr("min-legal-vector-width", llvm::utostr(LargestVectorWidth)); 453 454 // If we generated an unreachable return block, delete it now. 455 if (ReturnBlock.isValid() && ReturnBlock.getBlock()->use_empty()) { 456 Builder.ClearInsertionPoint(); 457 ReturnBlock.getBlock()->eraseFromParent(); 458 } 459 if (ReturnValue.isValid()) { 460 auto *RetAlloca = dyn_cast<llvm::AllocaInst>(ReturnValue.getPointer()); 461 if (RetAlloca && RetAlloca->use_empty()) { 462 RetAlloca->eraseFromParent(); 463 ReturnValue = Address::invalid(); 464 } 465 } 466 } 467 468 /// ShouldInstrumentFunction - Return true if the current function should be 469 /// instrumented with __cyg_profile_func_* calls 470 bool CodeGenFunction::ShouldInstrumentFunction() { 471 if (!CGM.getCodeGenOpts().InstrumentFunctions && 472 !CGM.getCodeGenOpts().InstrumentFunctionsAfterInlining && 473 !CGM.getCodeGenOpts().InstrumentFunctionEntryBare) 474 return false; 475 if (!CurFuncDecl || CurFuncDecl->hasAttr<NoInstrumentFunctionAttr>()) 476 return false; 477 return true; 478 } 479 480 /// ShouldXRayInstrument - Return true if the current function should be 481 /// instrumented with XRay nop sleds. 482 bool CodeGenFunction::ShouldXRayInstrumentFunction() const { 483 return CGM.getCodeGenOpts().XRayInstrumentFunctions; 484 } 485 486 /// AlwaysEmitXRayCustomEvents - Return true if we should emit IR for calls to 487 /// the __xray_customevent(...) builtin calls, when doing XRay instrumentation. 488 bool CodeGenFunction::AlwaysEmitXRayCustomEvents() const { 489 return CGM.getCodeGenOpts().XRayInstrumentFunctions && 490 (CGM.getCodeGenOpts().XRayAlwaysEmitCustomEvents || 491 CGM.getCodeGenOpts().XRayInstrumentationBundle.Mask == 492 XRayInstrKind::Custom); 493 } 494 495 bool CodeGenFunction::AlwaysEmitXRayTypedEvents() const { 496 return CGM.getCodeGenOpts().XRayInstrumentFunctions && 497 (CGM.getCodeGenOpts().XRayAlwaysEmitTypedEvents || 498 CGM.getCodeGenOpts().XRayInstrumentationBundle.Mask == 499 XRayInstrKind::Typed); 500 } 501 502 llvm::Constant * 503 CodeGenFunction::EncodeAddrForUseInPrologue(llvm::Function *F, 504 llvm::Constant *Addr) { 505 // Addresses stored in prologue data can't require run-time fixups and must 506 // be PC-relative. Run-time fixups are undesirable because they necessitate 507 // writable text segments, which are unsafe. And absolute addresses are 508 // undesirable because they break PIE mode. 509 510 // Add a layer of indirection through a private global. Taking its address 511 // won't result in a run-time fixup, even if Addr has linkonce_odr linkage. 512 auto *GV = new llvm::GlobalVariable(CGM.getModule(), Addr->getType(), 513 /*isConstant=*/true, 514 llvm::GlobalValue::PrivateLinkage, Addr); 515 516 // Create a PC-relative address. 517 auto *GOTAsInt = llvm::ConstantExpr::getPtrToInt(GV, IntPtrTy); 518 auto *FuncAsInt = llvm::ConstantExpr::getPtrToInt(F, IntPtrTy); 519 auto *PCRelAsInt = llvm::ConstantExpr::getSub(GOTAsInt, FuncAsInt); 520 return (IntPtrTy == Int32Ty) 521 ? PCRelAsInt 522 : llvm::ConstantExpr::getTrunc(PCRelAsInt, Int32Ty); 523 } 524 525 llvm::Value * 526 CodeGenFunction::DecodeAddrUsedInPrologue(llvm::Value *F, 527 llvm::Value *EncodedAddr) { 528 // Reconstruct the address of the global. 529 auto *PCRelAsInt = Builder.CreateSExt(EncodedAddr, IntPtrTy); 530 auto *FuncAsInt = Builder.CreatePtrToInt(F, IntPtrTy, "func_addr.int"); 531 auto *GOTAsInt = Builder.CreateAdd(PCRelAsInt, FuncAsInt, "global_addr.int"); 532 auto *GOTAddr = Builder.CreateIntToPtr(GOTAsInt, Int8PtrPtrTy, "global_addr"); 533 534 // Load the original pointer through the global. 535 return Builder.CreateLoad(Address(GOTAddr, getPointerAlign()), 536 "decoded_addr"); 537 } 538 539 void CodeGenFunction::EmitOpenCLKernelMetadata(const FunctionDecl *FD, 540 llvm::Function *Fn) 541 { 542 if (!FD->hasAttr<OpenCLKernelAttr>()) 543 return; 544 545 llvm::LLVMContext &Context = getLLVMContext(); 546 547 CGM.GenOpenCLArgMetadata(Fn, FD, this); 548 549 if (const VecTypeHintAttr *A = FD->getAttr<VecTypeHintAttr>()) { 550 QualType HintQTy = A->getTypeHint(); 551 const ExtVectorType *HintEltQTy = HintQTy->getAs<ExtVectorType>(); 552 bool IsSignedInteger = 553 HintQTy->isSignedIntegerType() || 554 (HintEltQTy && HintEltQTy->getElementType()->isSignedIntegerType()); 555 llvm::Metadata *AttrMDArgs[] = { 556 llvm::ConstantAsMetadata::get(llvm::UndefValue::get( 557 CGM.getTypes().ConvertType(A->getTypeHint()))), 558 llvm::ConstantAsMetadata::get(llvm::ConstantInt::get( 559 llvm::IntegerType::get(Context, 32), 560 llvm::APInt(32, (uint64_t)(IsSignedInteger ? 1 : 0))))}; 561 Fn->setMetadata("vec_type_hint", llvm::MDNode::get(Context, AttrMDArgs)); 562 } 563 564 if (const WorkGroupSizeHintAttr *A = FD->getAttr<WorkGroupSizeHintAttr>()) { 565 llvm::Metadata *AttrMDArgs[] = { 566 llvm::ConstantAsMetadata::get(Builder.getInt32(A->getXDim())), 567 llvm::ConstantAsMetadata::get(Builder.getInt32(A->getYDim())), 568 llvm::ConstantAsMetadata::get(Builder.getInt32(A->getZDim()))}; 569 Fn->setMetadata("work_group_size_hint", llvm::MDNode::get(Context, AttrMDArgs)); 570 } 571 572 if (const ReqdWorkGroupSizeAttr *A = FD->getAttr<ReqdWorkGroupSizeAttr>()) { 573 llvm::Metadata *AttrMDArgs[] = { 574 llvm::ConstantAsMetadata::get(Builder.getInt32(A->getXDim())), 575 llvm::ConstantAsMetadata::get(Builder.getInt32(A->getYDim())), 576 llvm::ConstantAsMetadata::get(Builder.getInt32(A->getZDim()))}; 577 Fn->setMetadata("reqd_work_group_size", llvm::MDNode::get(Context, AttrMDArgs)); 578 } 579 580 if (const OpenCLIntelReqdSubGroupSizeAttr *A = 581 FD->getAttr<OpenCLIntelReqdSubGroupSizeAttr>()) { 582 llvm::Metadata *AttrMDArgs[] = { 583 llvm::ConstantAsMetadata::get(Builder.getInt32(A->getSubGroupSize()))}; 584 Fn->setMetadata("intel_reqd_sub_group_size", 585 llvm::MDNode::get(Context, AttrMDArgs)); 586 } 587 } 588 589 /// Determine whether the function F ends with a return stmt. 590 static bool endsWithReturn(const Decl* F) { 591 const Stmt *Body = nullptr; 592 if (auto *FD = dyn_cast_or_null<FunctionDecl>(F)) 593 Body = FD->getBody(); 594 else if (auto *OMD = dyn_cast_or_null<ObjCMethodDecl>(F)) 595 Body = OMD->getBody(); 596 597 if (auto *CS = dyn_cast_or_null<CompoundStmt>(Body)) { 598 auto LastStmt = CS->body_rbegin(); 599 if (LastStmt != CS->body_rend()) 600 return isa<ReturnStmt>(*LastStmt); 601 } 602 return false; 603 } 604 605 void CodeGenFunction::markAsIgnoreThreadCheckingAtRuntime(llvm::Function *Fn) { 606 if (SanOpts.has(SanitizerKind::Thread)) { 607 Fn->addFnAttr("sanitize_thread_no_checking_at_run_time"); 608 Fn->removeFnAttr(llvm::Attribute::SanitizeThread); 609 } 610 } 611 612 static bool matchesStlAllocatorFn(const Decl *D, const ASTContext &Ctx) { 613 auto *MD = dyn_cast_or_null<CXXMethodDecl>(D); 614 if (!MD || !MD->getDeclName().getAsIdentifierInfo() || 615 !MD->getDeclName().getAsIdentifierInfo()->isStr("allocate") || 616 (MD->getNumParams() != 1 && MD->getNumParams() != 2)) 617 return false; 618 619 if (MD->parameters()[0]->getType().getCanonicalType() != Ctx.getSizeType()) 620 return false; 621 622 if (MD->getNumParams() == 2) { 623 auto *PT = MD->parameters()[1]->getType()->getAs<PointerType>(); 624 if (!PT || !PT->isVoidPointerType() || 625 !PT->getPointeeType().isConstQualified()) 626 return false; 627 } 628 629 return true; 630 } 631 632 /// Return the UBSan prologue signature for \p FD if one is available. 633 static llvm::Constant *getPrologueSignature(CodeGenModule &CGM, 634 const FunctionDecl *FD) { 635 if (const auto *MD = dyn_cast<CXXMethodDecl>(FD)) 636 if (!MD->isStatic()) 637 return nullptr; 638 return CGM.getTargetCodeGenInfo().getUBSanFunctionSignature(CGM); 639 } 640 641 void CodeGenFunction::StartFunction(GlobalDecl GD, 642 QualType RetTy, 643 llvm::Function *Fn, 644 const CGFunctionInfo &FnInfo, 645 const FunctionArgList &Args, 646 SourceLocation Loc, 647 SourceLocation StartLoc) { 648 assert(!CurFn && 649 "Do not use a CodeGenFunction object for more than one function"); 650 651 const Decl *D = GD.getDecl(); 652 653 DidCallStackSave = false; 654 CurCodeDecl = D; 655 if (const auto *FD = dyn_cast_or_null<FunctionDecl>(D)) 656 if (FD->usesSEHTry()) 657 CurSEHParent = FD; 658 CurFuncDecl = (D ? D->getNonClosureContext() : nullptr); 659 FnRetTy = RetTy; 660 CurFn = Fn; 661 CurFnInfo = &FnInfo; 662 assert(CurFn->isDeclaration() && "Function already has body?"); 663 664 // If this function has been blacklisted for any of the enabled sanitizers, 665 // disable the sanitizer for the function. 666 do { 667 #define SANITIZER(NAME, ID) \ 668 if (SanOpts.empty()) \ 669 break; \ 670 if (SanOpts.has(SanitizerKind::ID)) \ 671 if (CGM.isInSanitizerBlacklist(SanitizerKind::ID, Fn, Loc)) \ 672 SanOpts.set(SanitizerKind::ID, false); 673 674 #include "clang/Basic/Sanitizers.def" 675 #undef SANITIZER 676 } while (0); 677 678 if (D) { 679 // Apply the no_sanitize* attributes to SanOpts. 680 for (auto Attr : D->specific_attrs<NoSanitizeAttr>()) { 681 SanitizerMask mask = Attr->getMask(); 682 SanOpts.Mask &= ~mask; 683 if (mask & SanitizerKind::Address) 684 SanOpts.set(SanitizerKind::KernelAddress, false); 685 if (mask & SanitizerKind::KernelAddress) 686 SanOpts.set(SanitizerKind::Address, false); 687 if (mask & SanitizerKind::HWAddress) 688 SanOpts.set(SanitizerKind::KernelHWAddress, false); 689 if (mask & SanitizerKind::KernelHWAddress) 690 SanOpts.set(SanitizerKind::HWAddress, false); 691 } 692 } 693 694 // Apply sanitizer attributes to the function. 695 if (SanOpts.hasOneOf(SanitizerKind::Address | SanitizerKind::KernelAddress)) 696 Fn->addFnAttr(llvm::Attribute::SanitizeAddress); 697 if (SanOpts.hasOneOf(SanitizerKind::HWAddress | SanitizerKind::KernelHWAddress)) 698 Fn->addFnAttr(llvm::Attribute::SanitizeHWAddress); 699 if (SanOpts.has(SanitizerKind::MemTag)) 700 Fn->addFnAttr(llvm::Attribute::SanitizeMemTag); 701 if (SanOpts.has(SanitizerKind::Thread)) 702 Fn->addFnAttr(llvm::Attribute::SanitizeThread); 703 if (SanOpts.hasOneOf(SanitizerKind::Memory | SanitizerKind::KernelMemory)) 704 Fn->addFnAttr(llvm::Attribute::SanitizeMemory); 705 if (SanOpts.has(SanitizerKind::SafeStack)) 706 Fn->addFnAttr(llvm::Attribute::SafeStack); 707 if (SanOpts.has(SanitizerKind::ShadowCallStack)) 708 Fn->addFnAttr(llvm::Attribute::ShadowCallStack); 709 710 // Apply fuzzing attribute to the function. 711 if (SanOpts.hasOneOf(SanitizerKind::Fuzzer | SanitizerKind::FuzzerNoLink)) 712 Fn->addFnAttr(llvm::Attribute::OptForFuzzing); 713 714 // Ignore TSan memory acesses from within ObjC/ObjC++ dealloc, initialize, 715 // .cxx_destruct, __destroy_helper_block_ and all of their calees at run time. 716 if (SanOpts.has(SanitizerKind::Thread)) { 717 if (const auto *OMD = dyn_cast_or_null<ObjCMethodDecl>(D)) { 718 IdentifierInfo *II = OMD->getSelector().getIdentifierInfoForSlot(0); 719 if (OMD->getMethodFamily() == OMF_dealloc || 720 OMD->getMethodFamily() == OMF_initialize || 721 (OMD->getSelector().isUnarySelector() && II->isStr(".cxx_destruct"))) { 722 markAsIgnoreThreadCheckingAtRuntime(Fn); 723 } 724 } 725 } 726 727 // Ignore unrelated casts in STL allocate() since the allocator must cast 728 // from void* to T* before object initialization completes. Don't match on the 729 // namespace because not all allocators are in std:: 730 if (D && SanOpts.has(SanitizerKind::CFIUnrelatedCast)) { 731 if (matchesStlAllocatorFn(D, getContext())) 732 SanOpts.Mask &= ~SanitizerKind::CFIUnrelatedCast; 733 } 734 735 // Ignore null checks in coroutine functions since the coroutines passes 736 // are not aware of how to move the extra UBSan instructions across the split 737 // coroutine boundaries. 738 if (D && SanOpts.has(SanitizerKind::Null)) 739 if (const auto *FD = dyn_cast<FunctionDecl>(D)) 740 if (FD->getBody() && 741 FD->getBody()->getStmtClass() == Stmt::CoroutineBodyStmtClass) 742 SanOpts.Mask &= ~SanitizerKind::Null; 743 744 // Apply xray attributes to the function (as a string, for now) 745 if (D) { 746 if (const auto *XRayAttr = D->getAttr<XRayInstrumentAttr>()) { 747 if (CGM.getCodeGenOpts().XRayInstrumentationBundle.has( 748 XRayInstrKind::Function)) { 749 if (XRayAttr->alwaysXRayInstrument() && ShouldXRayInstrumentFunction()) 750 Fn->addFnAttr("function-instrument", "xray-always"); 751 if (XRayAttr->neverXRayInstrument()) 752 Fn->addFnAttr("function-instrument", "xray-never"); 753 if (const auto *LogArgs = D->getAttr<XRayLogArgsAttr>()) 754 if (ShouldXRayInstrumentFunction()) 755 Fn->addFnAttr("xray-log-args", 756 llvm::utostr(LogArgs->getArgumentCount())); 757 } 758 } else { 759 if (ShouldXRayInstrumentFunction() && !CGM.imbueXRayAttrs(Fn, Loc)) 760 Fn->addFnAttr( 761 "xray-instruction-threshold", 762 llvm::itostr(CGM.getCodeGenOpts().XRayInstructionThreshold)); 763 } 764 } 765 766 // Add no-jump-tables value. 767 Fn->addFnAttr("no-jump-tables", 768 llvm::toStringRef(CGM.getCodeGenOpts().NoUseJumpTables)); 769 770 // Add profile-sample-accurate value. 771 if (CGM.getCodeGenOpts().ProfileSampleAccurate) 772 Fn->addFnAttr("profile-sample-accurate"); 773 774 if (D && D->hasAttr<CFICanonicalJumpTableAttr>()) 775 Fn->addFnAttr("cfi-canonical-jump-table"); 776 777 if (getLangOpts().OpenCL) { 778 // Add metadata for a kernel function. 779 if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D)) 780 EmitOpenCLKernelMetadata(FD, Fn); 781 } 782 783 // If we are checking function types, emit a function type signature as 784 // prologue data. 785 if (getLangOpts().CPlusPlus && SanOpts.has(SanitizerKind::Function)) { 786 if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D)) { 787 if (llvm::Constant *PrologueSig = getPrologueSignature(CGM, FD)) { 788 // Remove any (C++17) exception specifications, to allow calling e.g. a 789 // noexcept function through a non-noexcept pointer. 790 auto ProtoTy = 791 getContext().getFunctionTypeWithExceptionSpec(FD->getType(), 792 EST_None); 793 llvm::Constant *FTRTTIConst = 794 CGM.GetAddrOfRTTIDescriptor(ProtoTy, /*ForEH=*/true); 795 llvm::Constant *FTRTTIConstEncoded = 796 EncodeAddrForUseInPrologue(Fn, FTRTTIConst); 797 llvm::Constant *PrologueStructElems[] = {PrologueSig, 798 FTRTTIConstEncoded}; 799 llvm::Constant *PrologueStructConst = 800 llvm::ConstantStruct::getAnon(PrologueStructElems, /*Packed=*/true); 801 Fn->setPrologueData(PrologueStructConst); 802 } 803 } 804 } 805 806 // If we're checking nullability, we need to know whether we can check the 807 // return value. Initialize the flag to 'true' and refine it in EmitParmDecl. 808 if (SanOpts.has(SanitizerKind::NullabilityReturn)) { 809 auto Nullability = FnRetTy->getNullability(getContext()); 810 if (Nullability && *Nullability == NullabilityKind::NonNull) { 811 if (!(SanOpts.has(SanitizerKind::ReturnsNonnullAttribute) && 812 CurCodeDecl && CurCodeDecl->getAttr<ReturnsNonNullAttr>())) 813 RetValNullabilityPrecondition = 814 llvm::ConstantInt::getTrue(getLLVMContext()); 815 } 816 } 817 818 // If we're in C++ mode and the function name is "main", it is guaranteed 819 // to be norecurse by the standard (3.6.1.3 "The function main shall not be 820 // used within a program"). 821 if (getLangOpts().CPlusPlus) 822 if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D)) 823 if (FD->isMain()) 824 Fn->addFnAttr(llvm::Attribute::NoRecurse); 825 826 // If a custom alignment is used, force realigning to this alignment on 827 // any main function which certainly will need it. 828 if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D)) 829 if ((FD->isMain() || FD->isMSVCRTEntryPoint()) && 830 CGM.getCodeGenOpts().StackAlignment) 831 Fn->addFnAttr("stackrealign"); 832 833 llvm::BasicBlock *EntryBB = createBasicBlock("entry", CurFn); 834 835 // Create a marker to make it easy to insert allocas into the entryblock 836 // later. Don't create this with the builder, because we don't want it 837 // folded. 838 llvm::Value *Undef = llvm::UndefValue::get(Int32Ty); 839 AllocaInsertPt = new llvm::BitCastInst(Undef, Int32Ty, "allocapt", EntryBB); 840 841 ReturnBlock = getJumpDestInCurrentScope("return"); 842 843 Builder.SetInsertPoint(EntryBB); 844 845 // If we're checking the return value, allocate space for a pointer to a 846 // precise source location of the checked return statement. 847 if (requiresReturnValueCheck()) { 848 ReturnLocation = CreateDefaultAlignTempAlloca(Int8PtrTy, "return.sloc.ptr"); 849 InitTempAlloca(ReturnLocation, llvm::ConstantPointerNull::get(Int8PtrTy)); 850 } 851 852 // Emit subprogram debug descriptor. 853 if (CGDebugInfo *DI = getDebugInfo()) { 854 // Reconstruct the type from the argument list so that implicit parameters, 855 // such as 'this' and 'vtt', show up in the debug info. Preserve the calling 856 // convention. 857 CallingConv CC = CallingConv::CC_C; 858 if (auto *FD = dyn_cast_or_null<FunctionDecl>(D)) 859 if (const auto *SrcFnTy = FD->getType()->getAs<FunctionType>()) 860 CC = SrcFnTy->getCallConv(); 861 SmallVector<QualType, 16> ArgTypes; 862 for (const VarDecl *VD : Args) 863 ArgTypes.push_back(VD->getType()); 864 QualType FnType = getContext().getFunctionType( 865 RetTy, ArgTypes, FunctionProtoType::ExtProtoInfo(CC)); 866 DI->EmitFunctionStart(GD, Loc, StartLoc, FnType, CurFn, CurFuncIsThunk, 867 Builder); 868 } 869 870 if (ShouldInstrumentFunction()) { 871 if (CGM.getCodeGenOpts().InstrumentFunctions) 872 CurFn->addFnAttr("instrument-function-entry", "__cyg_profile_func_enter"); 873 if (CGM.getCodeGenOpts().InstrumentFunctionsAfterInlining) 874 CurFn->addFnAttr("instrument-function-entry-inlined", 875 "__cyg_profile_func_enter"); 876 if (CGM.getCodeGenOpts().InstrumentFunctionEntryBare) 877 CurFn->addFnAttr("instrument-function-entry-inlined", 878 "__cyg_profile_func_enter_bare"); 879 } 880 881 // Since emitting the mcount call here impacts optimizations such as function 882 // inlining, we just add an attribute to insert a mcount call in backend. 883 // The attribute "counting-function" is set to mcount function name which is 884 // architecture dependent. 885 if (CGM.getCodeGenOpts().InstrumentForProfiling) { 886 // Calls to fentry/mcount should not be generated if function has 887 // the no_instrument_function attribute. 888 if (!CurFuncDecl || !CurFuncDecl->hasAttr<NoInstrumentFunctionAttr>()) { 889 if (CGM.getCodeGenOpts().CallFEntry) 890 Fn->addFnAttr("fentry-call", "true"); 891 else { 892 Fn->addFnAttr("instrument-function-entry-inlined", 893 getTarget().getMCountName()); 894 } 895 } 896 } 897 898 if (RetTy->isVoidType()) { 899 // Void type; nothing to return. 900 ReturnValue = Address::invalid(); 901 902 // Count the implicit return. 903 if (!endsWithReturn(D)) 904 ++NumReturnExprs; 905 } else if (CurFnInfo->getReturnInfo().getKind() == ABIArgInfo::Indirect) { 906 // Indirect return; emit returned value directly into sret slot. 907 // This reduces code size, and affects correctness in C++. 908 auto AI = CurFn->arg_begin(); 909 if (CurFnInfo->getReturnInfo().isSRetAfterThis()) 910 ++AI; 911 ReturnValue = Address(&*AI, CurFnInfo->getReturnInfo().getIndirectAlign()); 912 if (!CurFnInfo->getReturnInfo().getIndirectByVal()) { 913 ReturnValuePointer = 914 CreateDefaultAlignTempAlloca(Int8PtrTy, "result.ptr"); 915 Builder.CreateStore(Builder.CreatePointerBitCastOrAddrSpaceCast( 916 ReturnValue.getPointer(), Int8PtrTy), 917 ReturnValuePointer); 918 } 919 } else if (CurFnInfo->getReturnInfo().getKind() == ABIArgInfo::InAlloca && 920 !hasScalarEvaluationKind(CurFnInfo->getReturnType())) { 921 // Load the sret pointer from the argument struct and return into that. 922 unsigned Idx = CurFnInfo->getReturnInfo().getInAllocaFieldIndex(); 923 llvm::Function::arg_iterator EI = CurFn->arg_end(); 924 --EI; 925 llvm::Value *Addr = Builder.CreateStructGEP(nullptr, &*EI, Idx); 926 ReturnValuePointer = Address(Addr, getPointerAlign()); 927 Addr = Builder.CreateAlignedLoad(Addr, getPointerAlign(), "agg.result"); 928 ReturnValue = Address(Addr, getNaturalTypeAlignment(RetTy)); 929 } else { 930 ReturnValue = CreateIRTemp(RetTy, "retval"); 931 932 // Tell the epilog emitter to autorelease the result. We do this 933 // now so that various specialized functions can suppress it 934 // during their IR-generation. 935 if (getLangOpts().ObjCAutoRefCount && 936 !CurFnInfo->isReturnsRetained() && 937 RetTy->isObjCRetainableType()) 938 AutoreleaseResult = true; 939 } 940 941 EmitStartEHSpec(CurCodeDecl); 942 943 PrologueCleanupDepth = EHStack.stable_begin(); 944 945 // Emit OpenMP specific initialization of the device functions. 946 if (getLangOpts().OpenMP && CurCodeDecl) 947 CGM.getOpenMPRuntime().emitFunctionProlog(*this, CurCodeDecl); 948 949 EmitFunctionProlog(*CurFnInfo, CurFn, Args); 950 951 if (D && isa<CXXMethodDecl>(D) && cast<CXXMethodDecl>(D)->isInstance()) { 952 CGM.getCXXABI().EmitInstanceFunctionProlog(*this); 953 const CXXMethodDecl *MD = cast<CXXMethodDecl>(D); 954 if (MD->getParent()->isLambda() && 955 MD->getOverloadedOperator() == OO_Call) { 956 // We're in a lambda; figure out the captures. 957 MD->getParent()->getCaptureFields(LambdaCaptureFields, 958 LambdaThisCaptureField); 959 if (LambdaThisCaptureField) { 960 // If the lambda captures the object referred to by '*this' - either by 961 // value or by reference, make sure CXXThisValue points to the correct 962 // object. 963 964 // Get the lvalue for the field (which is a copy of the enclosing object 965 // or contains the address of the enclosing object). 966 LValue ThisFieldLValue = EmitLValueForLambdaField(LambdaThisCaptureField); 967 if (!LambdaThisCaptureField->getType()->isPointerType()) { 968 // If the enclosing object was captured by value, just use its address. 969 CXXThisValue = ThisFieldLValue.getAddress().getPointer(); 970 } else { 971 // Load the lvalue pointed to by the field, since '*this' was captured 972 // by reference. 973 CXXThisValue = 974 EmitLoadOfLValue(ThisFieldLValue, SourceLocation()).getScalarVal(); 975 } 976 } 977 for (auto *FD : MD->getParent()->fields()) { 978 if (FD->hasCapturedVLAType()) { 979 auto *ExprArg = EmitLoadOfLValue(EmitLValueForLambdaField(FD), 980 SourceLocation()).getScalarVal(); 981 auto VAT = FD->getCapturedVLAType(); 982 VLASizeMap[VAT->getSizeExpr()] = ExprArg; 983 } 984 } 985 } else { 986 // Not in a lambda; just use 'this' from the method. 987 // FIXME: Should we generate a new load for each use of 'this'? The 988 // fast register allocator would be happier... 989 CXXThisValue = CXXABIThisValue; 990 } 991 992 // Check the 'this' pointer once per function, if it's available. 993 if (CXXABIThisValue) { 994 SanitizerSet SkippedChecks; 995 SkippedChecks.set(SanitizerKind::ObjectSize, true); 996 QualType ThisTy = MD->getThisType(); 997 998 // If this is the call operator of a lambda with no capture-default, it 999 // may have a static invoker function, which may call this operator with 1000 // a null 'this' pointer. 1001 if (isLambdaCallOperator(MD) && 1002 MD->getParent()->getLambdaCaptureDefault() == LCD_None) 1003 SkippedChecks.set(SanitizerKind::Null, true); 1004 1005 EmitTypeCheck(isa<CXXConstructorDecl>(MD) ? TCK_ConstructorCall 1006 : TCK_MemberCall, 1007 Loc, CXXABIThisValue, ThisTy, 1008 getContext().getTypeAlignInChars(ThisTy->getPointeeType()), 1009 SkippedChecks); 1010 } 1011 } 1012 1013 // If any of the arguments have a variably modified type, make sure to 1014 // emit the type size. 1015 for (FunctionArgList::const_iterator i = Args.begin(), e = Args.end(); 1016 i != e; ++i) { 1017 const VarDecl *VD = *i; 1018 1019 // Dig out the type as written from ParmVarDecls; it's unclear whether 1020 // the standard (C99 6.9.1p10) requires this, but we're following the 1021 // precedent set by gcc. 1022 QualType Ty; 1023 if (const ParmVarDecl *PVD = dyn_cast<ParmVarDecl>(VD)) 1024 Ty = PVD->getOriginalType(); 1025 else 1026 Ty = VD->getType(); 1027 1028 if (Ty->isVariablyModifiedType()) 1029 EmitVariablyModifiedType(Ty); 1030 } 1031 // Emit a location at the end of the prologue. 1032 if (CGDebugInfo *DI = getDebugInfo()) 1033 DI->EmitLocation(Builder, StartLoc); 1034 1035 // TODO: Do we need to handle this in two places like we do with 1036 // target-features/target-cpu? 1037 if (CurFuncDecl) 1038 if (const auto *VecWidth = CurFuncDecl->getAttr<MinVectorWidthAttr>()) 1039 LargestVectorWidth = VecWidth->getVectorWidth(); 1040 } 1041 1042 void CodeGenFunction::EmitFunctionBody(const Stmt *Body) { 1043 incrementProfileCounter(Body); 1044 if (const CompoundStmt *S = dyn_cast<CompoundStmt>(Body)) 1045 EmitCompoundStmtWithoutScope(*S); 1046 else 1047 EmitStmt(Body); 1048 } 1049 1050 /// When instrumenting to collect profile data, the counts for some blocks 1051 /// such as switch cases need to not include the fall-through counts, so 1052 /// emit a branch around the instrumentation code. When not instrumenting, 1053 /// this just calls EmitBlock(). 1054 void CodeGenFunction::EmitBlockWithFallThrough(llvm::BasicBlock *BB, 1055 const Stmt *S) { 1056 llvm::BasicBlock *SkipCountBB = nullptr; 1057 if (HaveInsertPoint() && CGM.getCodeGenOpts().hasProfileClangInstr()) { 1058 // When instrumenting for profiling, the fallthrough to certain 1059 // statements needs to skip over the instrumentation code so that we 1060 // get an accurate count. 1061 SkipCountBB = createBasicBlock("skipcount"); 1062 EmitBranch(SkipCountBB); 1063 } 1064 EmitBlock(BB); 1065 uint64_t CurrentCount = getCurrentProfileCount(); 1066 incrementProfileCounter(S); 1067 setCurrentProfileCount(getCurrentProfileCount() + CurrentCount); 1068 if (SkipCountBB) 1069 EmitBlock(SkipCountBB); 1070 } 1071 1072 /// Tries to mark the given function nounwind based on the 1073 /// non-existence of any throwing calls within it. We believe this is 1074 /// lightweight enough to do at -O0. 1075 static void TryMarkNoThrow(llvm::Function *F) { 1076 // LLVM treats 'nounwind' on a function as part of the type, so we 1077 // can't do this on functions that can be overwritten. 1078 if (F->isInterposable()) return; 1079 1080 for (llvm::BasicBlock &BB : *F) 1081 for (llvm::Instruction &I : BB) 1082 if (I.mayThrow()) 1083 return; 1084 1085 F->setDoesNotThrow(); 1086 } 1087 1088 QualType CodeGenFunction::BuildFunctionArgList(GlobalDecl GD, 1089 FunctionArgList &Args) { 1090 const FunctionDecl *FD = cast<FunctionDecl>(GD.getDecl()); 1091 QualType ResTy = FD->getReturnType(); 1092 1093 const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(FD); 1094 if (MD && MD->isInstance()) { 1095 if (CGM.getCXXABI().HasThisReturn(GD)) 1096 ResTy = MD->getThisType(); 1097 else if (CGM.getCXXABI().hasMostDerivedReturn(GD)) 1098 ResTy = CGM.getContext().VoidPtrTy; 1099 CGM.getCXXABI().buildThisParam(*this, Args); 1100 } 1101 1102 // The base version of an inheriting constructor whose constructed base is a 1103 // virtual base is not passed any arguments (because it doesn't actually call 1104 // the inherited constructor). 1105 bool PassedParams = true; 1106 if (const CXXConstructorDecl *CD = dyn_cast<CXXConstructorDecl>(FD)) 1107 if (auto Inherited = CD->getInheritedConstructor()) 1108 PassedParams = 1109 getTypes().inheritingCtorHasParams(Inherited, GD.getCtorType()); 1110 1111 if (PassedParams) { 1112 for (auto *Param : FD->parameters()) { 1113 Args.push_back(Param); 1114 if (!Param->hasAttr<PassObjectSizeAttr>()) 1115 continue; 1116 1117 auto *Implicit = ImplicitParamDecl::Create( 1118 getContext(), Param->getDeclContext(), Param->getLocation(), 1119 /*Id=*/nullptr, getContext().getSizeType(), ImplicitParamDecl::Other); 1120 SizeArguments[Param] = Implicit; 1121 Args.push_back(Implicit); 1122 } 1123 } 1124 1125 if (MD && (isa<CXXConstructorDecl>(MD) || isa<CXXDestructorDecl>(MD))) 1126 CGM.getCXXABI().addImplicitStructorParams(*this, ResTy, Args); 1127 1128 return ResTy; 1129 } 1130 1131 static bool 1132 shouldUseUndefinedBehaviorReturnOptimization(const FunctionDecl *FD, 1133 const ASTContext &Context) { 1134 QualType T = FD->getReturnType(); 1135 // Avoid the optimization for functions that return a record type with a 1136 // trivial destructor or another trivially copyable type. 1137 if (const RecordType *RT = T.getCanonicalType()->getAs<RecordType>()) { 1138 if (const auto *ClassDecl = dyn_cast<CXXRecordDecl>(RT->getDecl())) 1139 return !ClassDecl->hasTrivialDestructor(); 1140 } 1141 return !T.isTriviallyCopyableType(Context); 1142 } 1143 1144 void CodeGenFunction::GenerateCode(GlobalDecl GD, llvm::Function *Fn, 1145 const CGFunctionInfo &FnInfo) { 1146 const FunctionDecl *FD = cast<FunctionDecl>(GD.getDecl()); 1147 CurGD = GD; 1148 1149 FunctionArgList Args; 1150 QualType ResTy = BuildFunctionArgList(GD, Args); 1151 1152 // Check if we should generate debug info for this function. 1153 if (FD->hasAttr<NoDebugAttr>()) 1154 DebugInfo = nullptr; // disable debug info indefinitely for this function 1155 1156 // The function might not have a body if we're generating thunks for a 1157 // function declaration. 1158 SourceRange BodyRange; 1159 if (Stmt *Body = FD->getBody()) 1160 BodyRange = Body->getSourceRange(); 1161 else 1162 BodyRange = FD->getLocation(); 1163 CurEHLocation = BodyRange.getEnd(); 1164 1165 // Use the location of the start of the function to determine where 1166 // the function definition is located. By default use the location 1167 // of the declaration as the location for the subprogram. A function 1168 // may lack a declaration in the source code if it is created by code 1169 // gen. (examples: _GLOBAL__I_a, __cxx_global_array_dtor, thunk). 1170 SourceLocation Loc = FD->getLocation(); 1171 1172 // If this is a function specialization then use the pattern body 1173 // as the location for the function. 1174 if (const FunctionDecl *SpecDecl = FD->getTemplateInstantiationPattern()) 1175 if (SpecDecl->hasBody(SpecDecl)) 1176 Loc = SpecDecl->getLocation(); 1177 1178 Stmt *Body = FD->getBody(); 1179 1180 // Initialize helper which will detect jumps which can cause invalid lifetime 1181 // markers. 1182 if (Body && ShouldEmitLifetimeMarkers) 1183 Bypasses.Init(Body); 1184 1185 // Emit the standard function prologue. 1186 StartFunction(GD, ResTy, Fn, FnInfo, Args, Loc, BodyRange.getBegin()); 1187 1188 // Generate the body of the function. 1189 PGO.assignRegionCounters(GD, CurFn); 1190 if (isa<CXXDestructorDecl>(FD)) 1191 EmitDestructorBody(Args); 1192 else if (isa<CXXConstructorDecl>(FD)) 1193 EmitConstructorBody(Args); 1194 else if (getLangOpts().CUDA && 1195 !getLangOpts().CUDAIsDevice && 1196 FD->hasAttr<CUDAGlobalAttr>()) 1197 CGM.getCUDARuntime().emitDeviceStub(*this, Args); 1198 else if (isa<CXXMethodDecl>(FD) && 1199 cast<CXXMethodDecl>(FD)->isLambdaStaticInvoker()) { 1200 // The lambda static invoker function is special, because it forwards or 1201 // clones the body of the function call operator (but is actually static). 1202 EmitLambdaStaticInvokeBody(cast<CXXMethodDecl>(FD)); 1203 } else if (FD->isDefaulted() && isa<CXXMethodDecl>(FD) && 1204 (cast<CXXMethodDecl>(FD)->isCopyAssignmentOperator() || 1205 cast<CXXMethodDecl>(FD)->isMoveAssignmentOperator())) { 1206 // Implicit copy-assignment gets the same special treatment as implicit 1207 // copy-constructors. 1208 emitImplicitAssignmentOperatorBody(Args); 1209 } else if (Body) { 1210 EmitFunctionBody(Body); 1211 } else 1212 llvm_unreachable("no definition for emitted function"); 1213 1214 // C++11 [stmt.return]p2: 1215 // Flowing off the end of a function [...] results in undefined behavior in 1216 // a value-returning function. 1217 // C11 6.9.1p12: 1218 // If the '}' that terminates a function is reached, and the value of the 1219 // function call is used by the caller, the behavior is undefined. 1220 if (getLangOpts().CPlusPlus && !FD->hasImplicitReturnZero() && !SawAsmBlock && 1221 !FD->getReturnType()->isVoidType() && Builder.GetInsertBlock()) { 1222 bool ShouldEmitUnreachable = 1223 CGM.getCodeGenOpts().StrictReturn || 1224 shouldUseUndefinedBehaviorReturnOptimization(FD, getContext()); 1225 if (SanOpts.has(SanitizerKind::Return)) { 1226 SanitizerScope SanScope(this); 1227 llvm::Value *IsFalse = Builder.getFalse(); 1228 EmitCheck(std::make_pair(IsFalse, SanitizerKind::Return), 1229 SanitizerHandler::MissingReturn, 1230 EmitCheckSourceLocation(FD->getLocation()), None); 1231 } else if (ShouldEmitUnreachable) { 1232 if (CGM.getCodeGenOpts().OptimizationLevel == 0) 1233 EmitTrapCall(llvm::Intrinsic::trap); 1234 } 1235 if (SanOpts.has(SanitizerKind::Return) || ShouldEmitUnreachable) { 1236 Builder.CreateUnreachable(); 1237 Builder.ClearInsertionPoint(); 1238 } 1239 } 1240 1241 // Emit the standard function epilogue. 1242 FinishFunction(BodyRange.getEnd()); 1243 1244 // If we haven't marked the function nothrow through other means, do 1245 // a quick pass now to see if we can. 1246 if (!CurFn->doesNotThrow()) 1247 TryMarkNoThrow(CurFn); 1248 } 1249 1250 /// ContainsLabel - Return true if the statement contains a label in it. If 1251 /// this statement is not executed normally, it not containing a label means 1252 /// that we can just remove the code. 1253 bool CodeGenFunction::ContainsLabel(const Stmt *S, bool IgnoreCaseStmts) { 1254 // Null statement, not a label! 1255 if (!S) return false; 1256 1257 // If this is a label, we have to emit the code, consider something like: 1258 // if (0) { ... foo: bar(); } goto foo; 1259 // 1260 // TODO: If anyone cared, we could track __label__'s, since we know that you 1261 // can't jump to one from outside their declared region. 1262 if (isa<LabelStmt>(S)) 1263 return true; 1264 1265 // If this is a case/default statement, and we haven't seen a switch, we have 1266 // to emit the code. 1267 if (isa<SwitchCase>(S) && !IgnoreCaseStmts) 1268 return true; 1269 1270 // If this is a switch statement, we want to ignore cases below it. 1271 if (isa<SwitchStmt>(S)) 1272 IgnoreCaseStmts = true; 1273 1274 // Scan subexpressions for verboten labels. 1275 for (const Stmt *SubStmt : S->children()) 1276 if (ContainsLabel(SubStmt, IgnoreCaseStmts)) 1277 return true; 1278 1279 return false; 1280 } 1281 1282 /// containsBreak - Return true if the statement contains a break out of it. 1283 /// If the statement (recursively) contains a switch or loop with a break 1284 /// inside of it, this is fine. 1285 bool CodeGenFunction::containsBreak(const Stmt *S) { 1286 // Null statement, not a label! 1287 if (!S) return false; 1288 1289 // If this is a switch or loop that defines its own break scope, then we can 1290 // include it and anything inside of it. 1291 if (isa<SwitchStmt>(S) || isa<WhileStmt>(S) || isa<DoStmt>(S) || 1292 isa<ForStmt>(S)) 1293 return false; 1294 1295 if (isa<BreakStmt>(S)) 1296 return true; 1297 1298 // Scan subexpressions for verboten breaks. 1299 for (const Stmt *SubStmt : S->children()) 1300 if (containsBreak(SubStmt)) 1301 return true; 1302 1303 return false; 1304 } 1305 1306 bool CodeGenFunction::mightAddDeclToScope(const Stmt *S) { 1307 if (!S) return false; 1308 1309 // Some statement kinds add a scope and thus never add a decl to the current 1310 // scope. Note, this list is longer than the list of statements that might 1311 // have an unscoped decl nested within them, but this way is conservatively 1312 // correct even if more statement kinds are added. 1313 if (isa<IfStmt>(S) || isa<SwitchStmt>(S) || isa<WhileStmt>(S) || 1314 isa<DoStmt>(S) || isa<ForStmt>(S) || isa<CompoundStmt>(S) || 1315 isa<CXXForRangeStmt>(S) || isa<CXXTryStmt>(S) || 1316 isa<ObjCForCollectionStmt>(S) || isa<ObjCAtTryStmt>(S)) 1317 return false; 1318 1319 if (isa<DeclStmt>(S)) 1320 return true; 1321 1322 for (const Stmt *SubStmt : S->children()) 1323 if (mightAddDeclToScope(SubStmt)) 1324 return true; 1325 1326 return false; 1327 } 1328 1329 /// ConstantFoldsToSimpleInteger - If the specified expression does not fold 1330 /// to a constant, or if it does but contains a label, return false. If it 1331 /// constant folds return true and set the boolean result in Result. 1332 bool CodeGenFunction::ConstantFoldsToSimpleInteger(const Expr *Cond, 1333 bool &ResultBool, 1334 bool AllowLabels) { 1335 llvm::APSInt ResultInt; 1336 if (!ConstantFoldsToSimpleInteger(Cond, ResultInt, AllowLabels)) 1337 return false; 1338 1339 ResultBool = ResultInt.getBoolValue(); 1340 return true; 1341 } 1342 1343 /// ConstantFoldsToSimpleInteger - If the specified expression does not fold 1344 /// to a constant, or if it does but contains a label, return false. If it 1345 /// constant folds return true and set the folded value. 1346 bool CodeGenFunction::ConstantFoldsToSimpleInteger(const Expr *Cond, 1347 llvm::APSInt &ResultInt, 1348 bool AllowLabels) { 1349 // FIXME: Rename and handle conversion of other evaluatable things 1350 // to bool. 1351 Expr::EvalResult Result; 1352 if (!Cond->EvaluateAsInt(Result, getContext())) 1353 return false; // Not foldable, not integer or not fully evaluatable. 1354 1355 llvm::APSInt Int = Result.Val.getInt(); 1356 if (!AllowLabels && CodeGenFunction::ContainsLabel(Cond)) 1357 return false; // Contains a label. 1358 1359 ResultInt = Int; 1360 return true; 1361 } 1362 1363 1364 1365 /// EmitBranchOnBoolExpr - Emit a branch on a boolean condition (e.g. for an if 1366 /// statement) to the specified blocks. Based on the condition, this might try 1367 /// to simplify the codegen of the conditional based on the branch. 1368 /// 1369 void CodeGenFunction::EmitBranchOnBoolExpr(const Expr *Cond, 1370 llvm::BasicBlock *TrueBlock, 1371 llvm::BasicBlock *FalseBlock, 1372 uint64_t TrueCount) { 1373 Cond = Cond->IgnoreParens(); 1374 1375 if (const BinaryOperator *CondBOp = dyn_cast<BinaryOperator>(Cond)) { 1376 1377 // Handle X && Y in a condition. 1378 if (CondBOp->getOpcode() == BO_LAnd) { 1379 // If we have "1 && X", simplify the code. "0 && X" would have constant 1380 // folded if the case was simple enough. 1381 bool ConstantBool = false; 1382 if (ConstantFoldsToSimpleInteger(CondBOp->getLHS(), ConstantBool) && 1383 ConstantBool) { 1384 // br(1 && X) -> br(X). 1385 incrementProfileCounter(CondBOp); 1386 return EmitBranchOnBoolExpr(CondBOp->getRHS(), TrueBlock, FalseBlock, 1387 TrueCount); 1388 } 1389 1390 // If we have "X && 1", simplify the code to use an uncond branch. 1391 // "X && 0" would have been constant folded to 0. 1392 if (ConstantFoldsToSimpleInteger(CondBOp->getRHS(), ConstantBool) && 1393 ConstantBool) { 1394 // br(X && 1) -> br(X). 1395 return EmitBranchOnBoolExpr(CondBOp->getLHS(), TrueBlock, FalseBlock, 1396 TrueCount); 1397 } 1398 1399 // Emit the LHS as a conditional. If the LHS conditional is false, we 1400 // want to jump to the FalseBlock. 1401 llvm::BasicBlock *LHSTrue = createBasicBlock("land.lhs.true"); 1402 // The counter tells us how often we evaluate RHS, and all of TrueCount 1403 // can be propagated to that branch. 1404 uint64_t RHSCount = getProfileCount(CondBOp->getRHS()); 1405 1406 ConditionalEvaluation eval(*this); 1407 { 1408 ApplyDebugLocation DL(*this, Cond); 1409 EmitBranchOnBoolExpr(CondBOp->getLHS(), LHSTrue, FalseBlock, RHSCount); 1410 EmitBlock(LHSTrue); 1411 } 1412 1413 incrementProfileCounter(CondBOp); 1414 setCurrentProfileCount(getProfileCount(CondBOp->getRHS())); 1415 1416 // Any temporaries created here are conditional. 1417 eval.begin(*this); 1418 EmitBranchOnBoolExpr(CondBOp->getRHS(), TrueBlock, FalseBlock, TrueCount); 1419 eval.end(*this); 1420 1421 return; 1422 } 1423 1424 if (CondBOp->getOpcode() == BO_LOr) { 1425 // If we have "0 || X", simplify the code. "1 || X" would have constant 1426 // folded if the case was simple enough. 1427 bool ConstantBool = false; 1428 if (ConstantFoldsToSimpleInteger(CondBOp->getLHS(), ConstantBool) && 1429 !ConstantBool) { 1430 // br(0 || X) -> br(X). 1431 incrementProfileCounter(CondBOp); 1432 return EmitBranchOnBoolExpr(CondBOp->getRHS(), TrueBlock, FalseBlock, 1433 TrueCount); 1434 } 1435 1436 // If we have "X || 0", simplify the code to use an uncond branch. 1437 // "X || 1" would have been constant folded to 1. 1438 if (ConstantFoldsToSimpleInteger(CondBOp->getRHS(), ConstantBool) && 1439 !ConstantBool) { 1440 // br(X || 0) -> br(X). 1441 return EmitBranchOnBoolExpr(CondBOp->getLHS(), TrueBlock, FalseBlock, 1442 TrueCount); 1443 } 1444 1445 // Emit the LHS as a conditional. If the LHS conditional is true, we 1446 // want to jump to the TrueBlock. 1447 llvm::BasicBlock *LHSFalse = createBasicBlock("lor.lhs.false"); 1448 // We have the count for entry to the RHS and for the whole expression 1449 // being true, so we can divy up True count between the short circuit and 1450 // the RHS. 1451 uint64_t LHSCount = 1452 getCurrentProfileCount() - getProfileCount(CondBOp->getRHS()); 1453 uint64_t RHSCount = TrueCount - LHSCount; 1454 1455 ConditionalEvaluation eval(*this); 1456 { 1457 ApplyDebugLocation DL(*this, Cond); 1458 EmitBranchOnBoolExpr(CondBOp->getLHS(), TrueBlock, LHSFalse, LHSCount); 1459 EmitBlock(LHSFalse); 1460 } 1461 1462 incrementProfileCounter(CondBOp); 1463 setCurrentProfileCount(getProfileCount(CondBOp->getRHS())); 1464 1465 // Any temporaries created here are conditional. 1466 eval.begin(*this); 1467 EmitBranchOnBoolExpr(CondBOp->getRHS(), TrueBlock, FalseBlock, RHSCount); 1468 1469 eval.end(*this); 1470 1471 return; 1472 } 1473 } 1474 1475 if (const UnaryOperator *CondUOp = dyn_cast<UnaryOperator>(Cond)) { 1476 // br(!x, t, f) -> br(x, f, t) 1477 if (CondUOp->getOpcode() == UO_LNot) { 1478 // Negate the count. 1479 uint64_t FalseCount = getCurrentProfileCount() - TrueCount; 1480 // Negate the condition and swap the destination blocks. 1481 return EmitBranchOnBoolExpr(CondUOp->getSubExpr(), FalseBlock, TrueBlock, 1482 FalseCount); 1483 } 1484 } 1485 1486 if (const ConditionalOperator *CondOp = dyn_cast<ConditionalOperator>(Cond)) { 1487 // br(c ? x : y, t, f) -> br(c, br(x, t, f), br(y, t, f)) 1488 llvm::BasicBlock *LHSBlock = createBasicBlock("cond.true"); 1489 llvm::BasicBlock *RHSBlock = createBasicBlock("cond.false"); 1490 1491 ConditionalEvaluation cond(*this); 1492 EmitBranchOnBoolExpr(CondOp->getCond(), LHSBlock, RHSBlock, 1493 getProfileCount(CondOp)); 1494 1495 // When computing PGO branch weights, we only know the overall count for 1496 // the true block. This code is essentially doing tail duplication of the 1497 // naive code-gen, introducing new edges for which counts are not 1498 // available. Divide the counts proportionally between the LHS and RHS of 1499 // the conditional operator. 1500 uint64_t LHSScaledTrueCount = 0; 1501 if (TrueCount) { 1502 double LHSRatio = 1503 getProfileCount(CondOp) / (double)getCurrentProfileCount(); 1504 LHSScaledTrueCount = TrueCount * LHSRatio; 1505 } 1506 1507 cond.begin(*this); 1508 EmitBlock(LHSBlock); 1509 incrementProfileCounter(CondOp); 1510 { 1511 ApplyDebugLocation DL(*this, Cond); 1512 EmitBranchOnBoolExpr(CondOp->getLHS(), TrueBlock, FalseBlock, 1513 LHSScaledTrueCount); 1514 } 1515 cond.end(*this); 1516 1517 cond.begin(*this); 1518 EmitBlock(RHSBlock); 1519 EmitBranchOnBoolExpr(CondOp->getRHS(), TrueBlock, FalseBlock, 1520 TrueCount - LHSScaledTrueCount); 1521 cond.end(*this); 1522 1523 return; 1524 } 1525 1526 if (const CXXThrowExpr *Throw = dyn_cast<CXXThrowExpr>(Cond)) { 1527 // Conditional operator handling can give us a throw expression as a 1528 // condition for a case like: 1529 // br(c ? throw x : y, t, f) -> br(c, br(throw x, t, f), br(y, t, f) 1530 // Fold this to: 1531 // br(c, throw x, br(y, t, f)) 1532 EmitCXXThrowExpr(Throw, /*KeepInsertionPoint*/false); 1533 return; 1534 } 1535 1536 // If the branch has a condition wrapped by __builtin_unpredictable, 1537 // create metadata that specifies that the branch is unpredictable. 1538 // Don't bother if not optimizing because that metadata would not be used. 1539 llvm::MDNode *Unpredictable = nullptr; 1540 auto *Call = dyn_cast<CallExpr>(Cond->IgnoreImpCasts()); 1541 if (Call && CGM.getCodeGenOpts().OptimizationLevel != 0) { 1542 auto *FD = dyn_cast_or_null<FunctionDecl>(Call->getCalleeDecl()); 1543 if (FD && FD->getBuiltinID() == Builtin::BI__builtin_unpredictable) { 1544 llvm::MDBuilder MDHelper(getLLVMContext()); 1545 Unpredictable = MDHelper.createUnpredictable(); 1546 } 1547 } 1548 1549 // Create branch weights based on the number of times we get here and the 1550 // number of times the condition should be true. 1551 uint64_t CurrentCount = std::max(getCurrentProfileCount(), TrueCount); 1552 llvm::MDNode *Weights = 1553 createProfileWeights(TrueCount, CurrentCount - TrueCount); 1554 1555 // Emit the code with the fully general case. 1556 llvm::Value *CondV; 1557 { 1558 ApplyDebugLocation DL(*this, Cond); 1559 CondV = EvaluateExprAsBool(Cond); 1560 } 1561 Builder.CreateCondBr(CondV, TrueBlock, FalseBlock, Weights, Unpredictable); 1562 } 1563 1564 /// ErrorUnsupported - Print out an error that codegen doesn't support the 1565 /// specified stmt yet. 1566 void CodeGenFunction::ErrorUnsupported(const Stmt *S, const char *Type) { 1567 CGM.ErrorUnsupported(S, Type); 1568 } 1569 1570 /// emitNonZeroVLAInit - Emit the "zero" initialization of a 1571 /// variable-length array whose elements have a non-zero bit-pattern. 1572 /// 1573 /// \param baseType the inner-most element type of the array 1574 /// \param src - a char* pointing to the bit-pattern for a single 1575 /// base element of the array 1576 /// \param sizeInChars - the total size of the VLA, in chars 1577 static void emitNonZeroVLAInit(CodeGenFunction &CGF, QualType baseType, 1578 Address dest, Address src, 1579 llvm::Value *sizeInChars) { 1580 CGBuilderTy &Builder = CGF.Builder; 1581 1582 CharUnits baseSize = CGF.getContext().getTypeSizeInChars(baseType); 1583 llvm::Value *baseSizeInChars 1584 = llvm::ConstantInt::get(CGF.IntPtrTy, baseSize.getQuantity()); 1585 1586 Address begin = 1587 Builder.CreateElementBitCast(dest, CGF.Int8Ty, "vla.begin"); 1588 llvm::Value *end = 1589 Builder.CreateInBoundsGEP(begin.getPointer(), sizeInChars, "vla.end"); 1590 1591 llvm::BasicBlock *originBB = CGF.Builder.GetInsertBlock(); 1592 llvm::BasicBlock *loopBB = CGF.createBasicBlock("vla-init.loop"); 1593 llvm::BasicBlock *contBB = CGF.createBasicBlock("vla-init.cont"); 1594 1595 // Make a loop over the VLA. C99 guarantees that the VLA element 1596 // count must be nonzero. 1597 CGF.EmitBlock(loopBB); 1598 1599 llvm::PHINode *cur = Builder.CreatePHI(begin.getType(), 2, "vla.cur"); 1600 cur->addIncoming(begin.getPointer(), originBB); 1601 1602 CharUnits curAlign = 1603 dest.getAlignment().alignmentOfArrayElement(baseSize); 1604 1605 // memcpy the individual element bit-pattern. 1606 Builder.CreateMemCpy(Address(cur, curAlign), src, baseSizeInChars, 1607 /*volatile*/ false); 1608 1609 // Go to the next element. 1610 llvm::Value *next = 1611 Builder.CreateInBoundsGEP(CGF.Int8Ty, cur, baseSizeInChars, "vla.next"); 1612 1613 // Leave if that's the end of the VLA. 1614 llvm::Value *done = Builder.CreateICmpEQ(next, end, "vla-init.isdone"); 1615 Builder.CreateCondBr(done, contBB, loopBB); 1616 cur->addIncoming(next, loopBB); 1617 1618 CGF.EmitBlock(contBB); 1619 } 1620 1621 void 1622 CodeGenFunction::EmitNullInitialization(Address DestPtr, QualType Ty) { 1623 // Ignore empty classes in C++. 1624 if (getLangOpts().CPlusPlus) { 1625 if (const RecordType *RT = Ty->getAs<RecordType>()) { 1626 if (cast<CXXRecordDecl>(RT->getDecl())->isEmpty()) 1627 return; 1628 } 1629 } 1630 1631 // Cast the dest ptr to the appropriate i8 pointer type. 1632 if (DestPtr.getElementType() != Int8Ty) 1633 DestPtr = Builder.CreateElementBitCast(DestPtr, Int8Ty); 1634 1635 // Get size and alignment info for this aggregate. 1636 CharUnits size = getContext().getTypeSizeInChars(Ty); 1637 1638 llvm::Value *SizeVal; 1639 const VariableArrayType *vla; 1640 1641 // Don't bother emitting a zero-byte memset. 1642 if (size.isZero()) { 1643 // But note that getTypeInfo returns 0 for a VLA. 1644 if (const VariableArrayType *vlaType = 1645 dyn_cast_or_null<VariableArrayType>( 1646 getContext().getAsArrayType(Ty))) { 1647 auto VlaSize = getVLASize(vlaType); 1648 SizeVal = VlaSize.NumElts; 1649 CharUnits eltSize = getContext().getTypeSizeInChars(VlaSize.Type); 1650 if (!eltSize.isOne()) 1651 SizeVal = Builder.CreateNUWMul(SizeVal, CGM.getSize(eltSize)); 1652 vla = vlaType; 1653 } else { 1654 return; 1655 } 1656 } else { 1657 SizeVal = CGM.getSize(size); 1658 vla = nullptr; 1659 } 1660 1661 // If the type contains a pointer to data member we can't memset it to zero. 1662 // Instead, create a null constant and copy it to the destination. 1663 // TODO: there are other patterns besides zero that we can usefully memset, 1664 // like -1, which happens to be the pattern used by member-pointers. 1665 if (!CGM.getTypes().isZeroInitializable(Ty)) { 1666 // For a VLA, emit a single element, then splat that over the VLA. 1667 if (vla) Ty = getContext().getBaseElementType(vla); 1668 1669 llvm::Constant *NullConstant = CGM.EmitNullConstant(Ty); 1670 1671 llvm::GlobalVariable *NullVariable = 1672 new llvm::GlobalVariable(CGM.getModule(), NullConstant->getType(), 1673 /*isConstant=*/true, 1674 llvm::GlobalVariable::PrivateLinkage, 1675 NullConstant, Twine()); 1676 CharUnits NullAlign = DestPtr.getAlignment(); 1677 NullVariable->setAlignment(NullAlign.getQuantity()); 1678 Address SrcPtr(Builder.CreateBitCast(NullVariable, Builder.getInt8PtrTy()), 1679 NullAlign); 1680 1681 if (vla) return emitNonZeroVLAInit(*this, Ty, DestPtr, SrcPtr, SizeVal); 1682 1683 // Get and call the appropriate llvm.memcpy overload. 1684 Builder.CreateMemCpy(DestPtr, SrcPtr, SizeVal, false); 1685 return; 1686 } 1687 1688 // Otherwise, just memset the whole thing to zero. This is legal 1689 // because in LLVM, all default initializers (other than the ones we just 1690 // handled above) are guaranteed to have a bit pattern of all zeros. 1691 Builder.CreateMemSet(DestPtr, Builder.getInt8(0), SizeVal, false); 1692 } 1693 1694 llvm::BlockAddress *CodeGenFunction::GetAddrOfLabel(const LabelDecl *L) { 1695 // Make sure that there is a block for the indirect goto. 1696 if (!IndirectBranch) 1697 GetIndirectGotoBlock(); 1698 1699 llvm::BasicBlock *BB = getJumpDestForLabel(L).getBlock(); 1700 1701 // Make sure the indirect branch includes all of the address-taken blocks. 1702 IndirectBranch->addDestination(BB); 1703 return llvm::BlockAddress::get(CurFn, BB); 1704 } 1705 1706 llvm::BasicBlock *CodeGenFunction::GetIndirectGotoBlock() { 1707 // If we already made the indirect branch for indirect goto, return its block. 1708 if (IndirectBranch) return IndirectBranch->getParent(); 1709 1710 CGBuilderTy TmpBuilder(*this, createBasicBlock("indirectgoto")); 1711 1712 // Create the PHI node that indirect gotos will add entries to. 1713 llvm::Value *DestVal = TmpBuilder.CreatePHI(Int8PtrTy, 0, 1714 "indirect.goto.dest"); 1715 1716 // Create the indirect branch instruction. 1717 IndirectBranch = TmpBuilder.CreateIndirectBr(DestVal); 1718 return IndirectBranch->getParent(); 1719 } 1720 1721 /// Computes the length of an array in elements, as well as the base 1722 /// element type and a properly-typed first element pointer. 1723 llvm::Value *CodeGenFunction::emitArrayLength(const ArrayType *origArrayType, 1724 QualType &baseType, 1725 Address &addr) { 1726 const ArrayType *arrayType = origArrayType; 1727 1728 // If it's a VLA, we have to load the stored size. Note that 1729 // this is the size of the VLA in bytes, not its size in elements. 1730 llvm::Value *numVLAElements = nullptr; 1731 if (isa<VariableArrayType>(arrayType)) { 1732 numVLAElements = getVLASize(cast<VariableArrayType>(arrayType)).NumElts; 1733 1734 // Walk into all VLAs. This doesn't require changes to addr, 1735 // which has type T* where T is the first non-VLA element type. 1736 do { 1737 QualType elementType = arrayType->getElementType(); 1738 arrayType = getContext().getAsArrayType(elementType); 1739 1740 // If we only have VLA components, 'addr' requires no adjustment. 1741 if (!arrayType) { 1742 baseType = elementType; 1743 return numVLAElements; 1744 } 1745 } while (isa<VariableArrayType>(arrayType)); 1746 1747 // We get out here only if we find a constant array type 1748 // inside the VLA. 1749 } 1750 1751 // We have some number of constant-length arrays, so addr should 1752 // have LLVM type [M x [N x [...]]]*. Build a GEP that walks 1753 // down to the first element of addr. 1754 SmallVector<llvm::Value*, 8> gepIndices; 1755 1756 // GEP down to the array type. 1757 llvm::ConstantInt *zero = Builder.getInt32(0); 1758 gepIndices.push_back(zero); 1759 1760 uint64_t countFromCLAs = 1; 1761 QualType eltType; 1762 1763 llvm::ArrayType *llvmArrayType = 1764 dyn_cast<llvm::ArrayType>(addr.getElementType()); 1765 while (llvmArrayType) { 1766 assert(isa<ConstantArrayType>(arrayType)); 1767 assert(cast<ConstantArrayType>(arrayType)->getSize().getZExtValue() 1768 == llvmArrayType->getNumElements()); 1769 1770 gepIndices.push_back(zero); 1771 countFromCLAs *= llvmArrayType->getNumElements(); 1772 eltType = arrayType->getElementType(); 1773 1774 llvmArrayType = 1775 dyn_cast<llvm::ArrayType>(llvmArrayType->getElementType()); 1776 arrayType = getContext().getAsArrayType(arrayType->getElementType()); 1777 assert((!llvmArrayType || arrayType) && 1778 "LLVM and Clang types are out-of-synch"); 1779 } 1780 1781 if (arrayType) { 1782 // From this point onwards, the Clang array type has been emitted 1783 // as some other type (probably a packed struct). Compute the array 1784 // size, and just emit the 'begin' expression as a bitcast. 1785 while (arrayType) { 1786 countFromCLAs *= 1787 cast<ConstantArrayType>(arrayType)->getSize().getZExtValue(); 1788 eltType = arrayType->getElementType(); 1789 arrayType = getContext().getAsArrayType(eltType); 1790 } 1791 1792 llvm::Type *baseType = ConvertType(eltType); 1793 addr = Builder.CreateElementBitCast(addr, baseType, "array.begin"); 1794 } else { 1795 // Create the actual GEP. 1796 addr = Address(Builder.CreateInBoundsGEP(addr.getPointer(), 1797 gepIndices, "array.begin"), 1798 addr.getAlignment()); 1799 } 1800 1801 baseType = eltType; 1802 1803 llvm::Value *numElements 1804 = llvm::ConstantInt::get(SizeTy, countFromCLAs); 1805 1806 // If we had any VLA dimensions, factor them in. 1807 if (numVLAElements) 1808 numElements = Builder.CreateNUWMul(numVLAElements, numElements); 1809 1810 return numElements; 1811 } 1812 1813 CodeGenFunction::VlaSizePair CodeGenFunction::getVLASize(QualType type) { 1814 const VariableArrayType *vla = getContext().getAsVariableArrayType(type); 1815 assert(vla && "type was not a variable array type!"); 1816 return getVLASize(vla); 1817 } 1818 1819 CodeGenFunction::VlaSizePair 1820 CodeGenFunction::getVLASize(const VariableArrayType *type) { 1821 // The number of elements so far; always size_t. 1822 llvm::Value *numElements = nullptr; 1823 1824 QualType elementType; 1825 do { 1826 elementType = type->getElementType(); 1827 llvm::Value *vlaSize = VLASizeMap[type->getSizeExpr()]; 1828 assert(vlaSize && "no size for VLA!"); 1829 assert(vlaSize->getType() == SizeTy); 1830 1831 if (!numElements) { 1832 numElements = vlaSize; 1833 } else { 1834 // It's undefined behavior if this wraps around, so mark it that way. 1835 // FIXME: Teach -fsanitize=undefined to trap this. 1836 numElements = Builder.CreateNUWMul(numElements, vlaSize); 1837 } 1838 } while ((type = getContext().getAsVariableArrayType(elementType))); 1839 1840 return { numElements, elementType }; 1841 } 1842 1843 CodeGenFunction::VlaSizePair 1844 CodeGenFunction::getVLAElements1D(QualType type) { 1845 const VariableArrayType *vla = getContext().getAsVariableArrayType(type); 1846 assert(vla && "type was not a variable array type!"); 1847 return getVLAElements1D(vla); 1848 } 1849 1850 CodeGenFunction::VlaSizePair 1851 CodeGenFunction::getVLAElements1D(const VariableArrayType *Vla) { 1852 llvm::Value *VlaSize = VLASizeMap[Vla->getSizeExpr()]; 1853 assert(VlaSize && "no size for VLA!"); 1854 assert(VlaSize->getType() == SizeTy); 1855 return { VlaSize, Vla->getElementType() }; 1856 } 1857 1858 void CodeGenFunction::EmitVariablyModifiedType(QualType type) { 1859 assert(type->isVariablyModifiedType() && 1860 "Must pass variably modified type to EmitVLASizes!"); 1861 1862 EnsureInsertPoint(); 1863 1864 // We're going to walk down into the type and look for VLA 1865 // expressions. 1866 do { 1867 assert(type->isVariablyModifiedType()); 1868 1869 const Type *ty = type.getTypePtr(); 1870 switch (ty->getTypeClass()) { 1871 1872 #define TYPE(Class, Base) 1873 #define ABSTRACT_TYPE(Class, Base) 1874 #define NON_CANONICAL_TYPE(Class, Base) 1875 #define DEPENDENT_TYPE(Class, Base) case Type::Class: 1876 #define NON_CANONICAL_UNLESS_DEPENDENT_TYPE(Class, Base) 1877 #include "clang/AST/TypeNodes.def" 1878 llvm_unreachable("unexpected dependent type!"); 1879 1880 // These types are never variably-modified. 1881 case Type::Builtin: 1882 case Type::Complex: 1883 case Type::Vector: 1884 case Type::ExtVector: 1885 case Type::Record: 1886 case Type::Enum: 1887 case Type::Elaborated: 1888 case Type::TemplateSpecialization: 1889 case Type::ObjCTypeParam: 1890 case Type::ObjCObject: 1891 case Type::ObjCInterface: 1892 case Type::ObjCObjectPointer: 1893 llvm_unreachable("type class is never variably-modified!"); 1894 1895 case Type::Adjusted: 1896 type = cast<AdjustedType>(ty)->getAdjustedType(); 1897 break; 1898 1899 case Type::Decayed: 1900 type = cast<DecayedType>(ty)->getPointeeType(); 1901 break; 1902 1903 case Type::Pointer: 1904 type = cast<PointerType>(ty)->getPointeeType(); 1905 break; 1906 1907 case Type::BlockPointer: 1908 type = cast<BlockPointerType>(ty)->getPointeeType(); 1909 break; 1910 1911 case Type::LValueReference: 1912 case Type::RValueReference: 1913 type = cast<ReferenceType>(ty)->getPointeeType(); 1914 break; 1915 1916 case Type::MemberPointer: 1917 type = cast<MemberPointerType>(ty)->getPointeeType(); 1918 break; 1919 1920 case Type::ConstantArray: 1921 case Type::IncompleteArray: 1922 // Losing element qualification here is fine. 1923 type = cast<ArrayType>(ty)->getElementType(); 1924 break; 1925 1926 case Type::VariableArray: { 1927 // Losing element qualification here is fine. 1928 const VariableArrayType *vat = cast<VariableArrayType>(ty); 1929 1930 // Unknown size indication requires no size computation. 1931 // Otherwise, evaluate and record it. 1932 if (const Expr *size = vat->getSizeExpr()) { 1933 // It's possible that we might have emitted this already, 1934 // e.g. with a typedef and a pointer to it. 1935 llvm::Value *&entry = VLASizeMap[size]; 1936 if (!entry) { 1937 llvm::Value *Size = EmitScalarExpr(size); 1938 1939 // C11 6.7.6.2p5: 1940 // If the size is an expression that is not an integer constant 1941 // expression [...] each time it is evaluated it shall have a value 1942 // greater than zero. 1943 if (SanOpts.has(SanitizerKind::VLABound) && 1944 size->getType()->isSignedIntegerType()) { 1945 SanitizerScope SanScope(this); 1946 llvm::Value *Zero = llvm::Constant::getNullValue(Size->getType()); 1947 llvm::Constant *StaticArgs[] = { 1948 EmitCheckSourceLocation(size->getBeginLoc()), 1949 EmitCheckTypeDescriptor(size->getType())}; 1950 EmitCheck(std::make_pair(Builder.CreateICmpSGT(Size, Zero), 1951 SanitizerKind::VLABound), 1952 SanitizerHandler::VLABoundNotPositive, StaticArgs, Size); 1953 } 1954 1955 // Always zexting here would be wrong if it weren't 1956 // undefined behavior to have a negative bound. 1957 entry = Builder.CreateIntCast(Size, SizeTy, /*signed*/ false); 1958 } 1959 } 1960 type = vat->getElementType(); 1961 break; 1962 } 1963 1964 case Type::FunctionProto: 1965 case Type::FunctionNoProto: 1966 type = cast<FunctionType>(ty)->getReturnType(); 1967 break; 1968 1969 case Type::Paren: 1970 case Type::TypeOf: 1971 case Type::UnaryTransform: 1972 case Type::Attributed: 1973 case Type::SubstTemplateTypeParm: 1974 case Type::PackExpansion: 1975 case Type::MacroQualified: 1976 // Keep walking after single level desugaring. 1977 type = type.getSingleStepDesugaredType(getContext()); 1978 break; 1979 1980 case Type::Typedef: 1981 case Type::Decltype: 1982 case Type::Auto: 1983 case Type::DeducedTemplateSpecialization: 1984 // Stop walking: nothing to do. 1985 return; 1986 1987 case Type::TypeOfExpr: 1988 // Stop walking: emit typeof expression. 1989 EmitIgnoredExpr(cast<TypeOfExprType>(ty)->getUnderlyingExpr()); 1990 return; 1991 1992 case Type::Atomic: 1993 type = cast<AtomicType>(ty)->getValueType(); 1994 break; 1995 1996 case Type::Pipe: 1997 type = cast<PipeType>(ty)->getElementType(); 1998 break; 1999 } 2000 } while (type->isVariablyModifiedType()); 2001 } 2002 2003 Address CodeGenFunction::EmitVAListRef(const Expr* E) { 2004 if (getContext().getBuiltinVaListType()->isArrayType()) 2005 return EmitPointerWithAlignment(E); 2006 return EmitLValue(E).getAddress(); 2007 } 2008 2009 Address CodeGenFunction::EmitMSVAListRef(const Expr *E) { 2010 return EmitLValue(E).getAddress(); 2011 } 2012 2013 void CodeGenFunction::EmitDeclRefExprDbgValue(const DeclRefExpr *E, 2014 const APValue &Init) { 2015 assert(Init.hasValue() && "Invalid DeclRefExpr initializer!"); 2016 if (CGDebugInfo *Dbg = getDebugInfo()) 2017 if (CGM.getCodeGenOpts().getDebugInfo() >= codegenoptions::LimitedDebugInfo) 2018 Dbg->EmitGlobalVariable(E->getDecl(), Init); 2019 } 2020 2021 CodeGenFunction::PeepholeProtection 2022 CodeGenFunction::protectFromPeepholes(RValue rvalue) { 2023 // At the moment, the only aggressive peephole we do in IR gen 2024 // is trunc(zext) folding, but if we add more, we can easily 2025 // extend this protection. 2026 2027 if (!rvalue.isScalar()) return PeepholeProtection(); 2028 llvm::Value *value = rvalue.getScalarVal(); 2029 if (!isa<llvm::ZExtInst>(value)) return PeepholeProtection(); 2030 2031 // Just make an extra bitcast. 2032 assert(HaveInsertPoint()); 2033 llvm::Instruction *inst = new llvm::BitCastInst(value, value->getType(), "", 2034 Builder.GetInsertBlock()); 2035 2036 PeepholeProtection protection; 2037 protection.Inst = inst; 2038 return protection; 2039 } 2040 2041 void CodeGenFunction::unprotectFromPeepholes(PeepholeProtection protection) { 2042 if (!protection.Inst) return; 2043 2044 // In theory, we could try to duplicate the peepholes now, but whatever. 2045 protection.Inst->eraseFromParent(); 2046 } 2047 2048 void CodeGenFunction::EmitAlignmentAssumption(llvm::Value *PtrValue, 2049 QualType Ty, SourceLocation Loc, 2050 SourceLocation AssumptionLoc, 2051 llvm::Value *Alignment, 2052 llvm::Value *OffsetValue) { 2053 llvm::Value *TheCheck; 2054 llvm::Instruction *Assumption = Builder.CreateAlignmentAssumption( 2055 CGM.getDataLayout(), PtrValue, Alignment, OffsetValue, &TheCheck); 2056 if (SanOpts.has(SanitizerKind::Alignment)) { 2057 EmitAlignmentAssumptionCheck(PtrValue, Ty, Loc, AssumptionLoc, Alignment, 2058 OffsetValue, TheCheck, Assumption); 2059 } 2060 } 2061 2062 void CodeGenFunction::EmitAlignmentAssumption(llvm::Value *PtrValue, 2063 QualType Ty, SourceLocation Loc, 2064 SourceLocation AssumptionLoc, 2065 unsigned Alignment, 2066 llvm::Value *OffsetValue) { 2067 llvm::Value *TheCheck; 2068 llvm::Instruction *Assumption = Builder.CreateAlignmentAssumption( 2069 CGM.getDataLayout(), PtrValue, Alignment, OffsetValue, &TheCheck); 2070 if (SanOpts.has(SanitizerKind::Alignment)) { 2071 llvm::Value *AlignmentVal = llvm::ConstantInt::get(IntPtrTy, Alignment); 2072 EmitAlignmentAssumptionCheck(PtrValue, Ty, Loc, AssumptionLoc, AlignmentVal, 2073 OffsetValue, TheCheck, Assumption); 2074 } 2075 } 2076 2077 void CodeGenFunction::EmitAlignmentAssumption(llvm::Value *PtrValue, 2078 const Expr *E, 2079 SourceLocation AssumptionLoc, 2080 unsigned Alignment, 2081 llvm::Value *OffsetValue) { 2082 if (auto *CE = dyn_cast<CastExpr>(E)) 2083 E = CE->getSubExprAsWritten(); 2084 QualType Ty = E->getType(); 2085 SourceLocation Loc = E->getExprLoc(); 2086 2087 EmitAlignmentAssumption(PtrValue, Ty, Loc, AssumptionLoc, Alignment, 2088 OffsetValue); 2089 } 2090 2091 llvm::Value *CodeGenFunction::EmitAnnotationCall(llvm::Function *AnnotationFn, 2092 llvm::Value *AnnotatedVal, 2093 StringRef AnnotationStr, 2094 SourceLocation Location) { 2095 llvm::Value *Args[4] = { 2096 AnnotatedVal, 2097 Builder.CreateBitCast(CGM.EmitAnnotationString(AnnotationStr), Int8PtrTy), 2098 Builder.CreateBitCast(CGM.EmitAnnotationUnit(Location), Int8PtrTy), 2099 CGM.EmitAnnotationLineNo(Location) 2100 }; 2101 return Builder.CreateCall(AnnotationFn, Args); 2102 } 2103 2104 void CodeGenFunction::EmitVarAnnotations(const VarDecl *D, llvm::Value *V) { 2105 assert(D->hasAttr<AnnotateAttr>() && "no annotate attribute"); 2106 // FIXME We create a new bitcast for every annotation because that's what 2107 // llvm-gcc was doing. 2108 for (const auto *I : D->specific_attrs<AnnotateAttr>()) 2109 EmitAnnotationCall(CGM.getIntrinsic(llvm::Intrinsic::var_annotation), 2110 Builder.CreateBitCast(V, CGM.Int8PtrTy, V->getName()), 2111 I->getAnnotation(), D->getLocation()); 2112 } 2113 2114 Address CodeGenFunction::EmitFieldAnnotations(const FieldDecl *D, 2115 Address Addr) { 2116 assert(D->hasAttr<AnnotateAttr>() && "no annotate attribute"); 2117 llvm::Value *V = Addr.getPointer(); 2118 llvm::Type *VTy = V->getType(); 2119 llvm::Function *F = CGM.getIntrinsic(llvm::Intrinsic::ptr_annotation, 2120 CGM.Int8PtrTy); 2121 2122 for (const auto *I : D->specific_attrs<AnnotateAttr>()) { 2123 // FIXME Always emit the cast inst so we can differentiate between 2124 // annotation on the first field of a struct and annotation on the struct 2125 // itself. 2126 if (VTy != CGM.Int8PtrTy) 2127 V = Builder.CreateBitCast(V, CGM.Int8PtrTy); 2128 V = EmitAnnotationCall(F, V, I->getAnnotation(), D->getLocation()); 2129 V = Builder.CreateBitCast(V, VTy); 2130 } 2131 2132 return Address(V, Addr.getAlignment()); 2133 } 2134 2135 CodeGenFunction::CGCapturedStmtInfo::~CGCapturedStmtInfo() { } 2136 2137 CodeGenFunction::SanitizerScope::SanitizerScope(CodeGenFunction *CGF) 2138 : CGF(CGF) { 2139 assert(!CGF->IsSanitizerScope); 2140 CGF->IsSanitizerScope = true; 2141 } 2142 2143 CodeGenFunction::SanitizerScope::~SanitizerScope() { 2144 CGF->IsSanitizerScope = false; 2145 } 2146 2147 void CodeGenFunction::InsertHelper(llvm::Instruction *I, 2148 const llvm::Twine &Name, 2149 llvm::BasicBlock *BB, 2150 llvm::BasicBlock::iterator InsertPt) const { 2151 LoopStack.InsertHelper(I); 2152 if (IsSanitizerScope) 2153 CGM.getSanitizerMetadata()->disableSanitizerForInstruction(I); 2154 } 2155 2156 void CGBuilderInserter::InsertHelper( 2157 llvm::Instruction *I, const llvm::Twine &Name, llvm::BasicBlock *BB, 2158 llvm::BasicBlock::iterator InsertPt) const { 2159 llvm::IRBuilderDefaultInserter::InsertHelper(I, Name, BB, InsertPt); 2160 if (CGF) 2161 CGF->InsertHelper(I, Name, BB, InsertPt); 2162 } 2163 2164 static bool hasRequiredFeatures(const SmallVectorImpl<StringRef> &ReqFeatures, 2165 CodeGenModule &CGM, const FunctionDecl *FD, 2166 std::string &FirstMissing) { 2167 // If there aren't any required features listed then go ahead and return. 2168 if (ReqFeatures.empty()) 2169 return false; 2170 2171 // Now build up the set of caller features and verify that all the required 2172 // features are there. 2173 llvm::StringMap<bool> CallerFeatureMap; 2174 CGM.getFunctionFeatureMap(CallerFeatureMap, GlobalDecl().getWithDecl(FD)); 2175 2176 // If we have at least one of the features in the feature list return 2177 // true, otherwise return false. 2178 return std::all_of( 2179 ReqFeatures.begin(), ReqFeatures.end(), [&](StringRef Feature) { 2180 SmallVector<StringRef, 1> OrFeatures; 2181 Feature.split(OrFeatures, '|'); 2182 return llvm::any_of(OrFeatures, [&](StringRef Feature) { 2183 if (!CallerFeatureMap.lookup(Feature)) { 2184 FirstMissing = Feature.str(); 2185 return false; 2186 } 2187 return true; 2188 }); 2189 }); 2190 } 2191 2192 // Emits an error if we don't have a valid set of target features for the 2193 // called function. 2194 void CodeGenFunction::checkTargetFeatures(const CallExpr *E, 2195 const FunctionDecl *TargetDecl) { 2196 return checkTargetFeatures(E->getBeginLoc(), TargetDecl); 2197 } 2198 2199 // Emits an error if we don't have a valid set of target features for the 2200 // called function. 2201 void CodeGenFunction::checkTargetFeatures(SourceLocation Loc, 2202 const FunctionDecl *TargetDecl) { 2203 // Early exit if this is an indirect call. 2204 if (!TargetDecl) 2205 return; 2206 2207 // Get the current enclosing function if it exists. If it doesn't 2208 // we can't check the target features anyhow. 2209 const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(CurFuncDecl); 2210 if (!FD) 2211 return; 2212 2213 // Grab the required features for the call. For a builtin this is listed in 2214 // the td file with the default cpu, for an always_inline function this is any 2215 // listed cpu and any listed features. 2216 unsigned BuiltinID = TargetDecl->getBuiltinID(); 2217 std::string MissingFeature; 2218 if (BuiltinID) { 2219 SmallVector<StringRef, 1> ReqFeatures; 2220 const char *FeatureList = 2221 CGM.getContext().BuiltinInfo.getRequiredFeatures(BuiltinID); 2222 // Return if the builtin doesn't have any required features. 2223 if (!FeatureList || StringRef(FeatureList) == "") 2224 return; 2225 StringRef(FeatureList).split(ReqFeatures, ','); 2226 if (!hasRequiredFeatures(ReqFeatures, CGM, FD, MissingFeature)) 2227 CGM.getDiags().Report(Loc, diag::err_builtin_needs_feature) 2228 << TargetDecl->getDeclName() 2229 << CGM.getContext().BuiltinInfo.getRequiredFeatures(BuiltinID); 2230 2231 } else if (TargetDecl->hasAttr<TargetAttr>() || 2232 TargetDecl->hasAttr<CPUSpecificAttr>()) { 2233 // Get the required features for the callee. 2234 2235 const TargetAttr *TD = TargetDecl->getAttr<TargetAttr>(); 2236 TargetAttr::ParsedTargetAttr ParsedAttr = CGM.filterFunctionTargetAttrs(TD); 2237 2238 SmallVector<StringRef, 1> ReqFeatures; 2239 llvm::StringMap<bool> CalleeFeatureMap; 2240 CGM.getFunctionFeatureMap(CalleeFeatureMap, TargetDecl); 2241 2242 for (const auto &F : ParsedAttr.Features) { 2243 if (F[0] == '+' && CalleeFeatureMap.lookup(F.substr(1))) 2244 ReqFeatures.push_back(StringRef(F).substr(1)); 2245 } 2246 2247 for (const auto &F : CalleeFeatureMap) { 2248 // Only positive features are "required". 2249 if (F.getValue()) 2250 ReqFeatures.push_back(F.getKey()); 2251 } 2252 if (!hasRequiredFeatures(ReqFeatures, CGM, FD, MissingFeature)) 2253 CGM.getDiags().Report(Loc, diag::err_function_needs_feature) 2254 << FD->getDeclName() << TargetDecl->getDeclName() << MissingFeature; 2255 } 2256 } 2257 2258 void CodeGenFunction::EmitSanitizerStatReport(llvm::SanitizerStatKind SSK) { 2259 if (!CGM.getCodeGenOpts().SanitizeStats) 2260 return; 2261 2262 llvm::IRBuilder<> IRB(Builder.GetInsertBlock(), Builder.GetInsertPoint()); 2263 IRB.SetCurrentDebugLocation(Builder.getCurrentDebugLocation()); 2264 CGM.getSanStats().create(IRB, SSK); 2265 } 2266 2267 llvm::Value * 2268 CodeGenFunction::FormResolverCondition(const MultiVersionResolverOption &RO) { 2269 llvm::Value *Condition = nullptr; 2270 2271 if (!RO.Conditions.Architecture.empty()) 2272 Condition = EmitX86CpuIs(RO.Conditions.Architecture); 2273 2274 if (!RO.Conditions.Features.empty()) { 2275 llvm::Value *FeatureCond = EmitX86CpuSupports(RO.Conditions.Features); 2276 Condition = 2277 Condition ? Builder.CreateAnd(Condition, FeatureCond) : FeatureCond; 2278 } 2279 return Condition; 2280 } 2281 2282 static void CreateMultiVersionResolverReturn(CodeGenModule &CGM, 2283 llvm::Function *Resolver, 2284 CGBuilderTy &Builder, 2285 llvm::Function *FuncToReturn, 2286 bool SupportsIFunc) { 2287 if (SupportsIFunc) { 2288 Builder.CreateRet(FuncToReturn); 2289 return; 2290 } 2291 2292 llvm::SmallVector<llvm::Value *, 10> Args; 2293 llvm::for_each(Resolver->args(), 2294 [&](llvm::Argument &Arg) { Args.push_back(&Arg); }); 2295 2296 llvm::CallInst *Result = Builder.CreateCall(FuncToReturn, Args); 2297 Result->setTailCallKind(llvm::CallInst::TCK_MustTail); 2298 2299 if (Resolver->getReturnType()->isVoidTy()) 2300 Builder.CreateRetVoid(); 2301 else 2302 Builder.CreateRet(Result); 2303 } 2304 2305 void CodeGenFunction::EmitMultiVersionResolver( 2306 llvm::Function *Resolver, ArrayRef<MultiVersionResolverOption> Options) { 2307 assert((getContext().getTargetInfo().getTriple().getArch() == 2308 llvm::Triple::x86 || 2309 getContext().getTargetInfo().getTriple().getArch() == 2310 llvm::Triple::x86_64) && 2311 "Only implemented for x86 targets"); 2312 2313 bool SupportsIFunc = getContext().getTargetInfo().supportsIFunc(); 2314 2315 // Main function's basic block. 2316 llvm::BasicBlock *CurBlock = createBasicBlock("resolver_entry", Resolver); 2317 Builder.SetInsertPoint(CurBlock); 2318 EmitX86CpuInit(); 2319 2320 for (const MultiVersionResolverOption &RO : Options) { 2321 Builder.SetInsertPoint(CurBlock); 2322 llvm::Value *Condition = FormResolverCondition(RO); 2323 2324 // The 'default' or 'generic' case. 2325 if (!Condition) { 2326 assert(&RO == Options.end() - 1 && 2327 "Default or Generic case must be last"); 2328 CreateMultiVersionResolverReturn(CGM, Resolver, Builder, RO.Function, 2329 SupportsIFunc); 2330 return; 2331 } 2332 2333 llvm::BasicBlock *RetBlock = createBasicBlock("resolver_return", Resolver); 2334 CGBuilderTy RetBuilder(*this, RetBlock); 2335 CreateMultiVersionResolverReturn(CGM, Resolver, RetBuilder, RO.Function, 2336 SupportsIFunc); 2337 CurBlock = createBasicBlock("resolver_else", Resolver); 2338 Builder.CreateCondBr(Condition, RetBlock, CurBlock); 2339 } 2340 2341 // If no generic/default, emit an unreachable. 2342 Builder.SetInsertPoint(CurBlock); 2343 llvm::CallInst *TrapCall = EmitTrapCall(llvm::Intrinsic::trap); 2344 TrapCall->setDoesNotReturn(); 2345 TrapCall->setDoesNotThrow(); 2346 Builder.CreateUnreachable(); 2347 Builder.ClearInsertionPoint(); 2348 } 2349 2350 // Loc - where the diagnostic will point, where in the source code this 2351 // alignment has failed. 2352 // SecondaryLoc - if present (will be present if sufficiently different from 2353 // Loc), the diagnostic will additionally point a "Note:" to this location. 2354 // It should be the location where the __attribute__((assume_aligned)) 2355 // was written e.g. 2356 void CodeGenFunction::EmitAlignmentAssumptionCheck( 2357 llvm::Value *Ptr, QualType Ty, SourceLocation Loc, 2358 SourceLocation SecondaryLoc, llvm::Value *Alignment, 2359 llvm::Value *OffsetValue, llvm::Value *TheCheck, 2360 llvm::Instruction *Assumption) { 2361 assert(Assumption && isa<llvm::CallInst>(Assumption) && 2362 cast<llvm::CallInst>(Assumption)->getCalledValue() == 2363 llvm::Intrinsic::getDeclaration( 2364 Builder.GetInsertBlock()->getParent()->getParent(), 2365 llvm::Intrinsic::assume) && 2366 "Assumption should be a call to llvm.assume()."); 2367 assert(&(Builder.GetInsertBlock()->back()) == Assumption && 2368 "Assumption should be the last instruction of the basic block, " 2369 "since the basic block is still being generated."); 2370 2371 if (!SanOpts.has(SanitizerKind::Alignment)) 2372 return; 2373 2374 // Don't check pointers to volatile data. The behavior here is implementation- 2375 // defined. 2376 if (Ty->getPointeeType().isVolatileQualified()) 2377 return; 2378 2379 // We need to temorairly remove the assumption so we can insert the 2380 // sanitizer check before it, else the check will be dropped by optimizations. 2381 Assumption->removeFromParent(); 2382 2383 { 2384 SanitizerScope SanScope(this); 2385 2386 if (!OffsetValue) 2387 OffsetValue = Builder.getInt1(0); // no offset. 2388 2389 llvm::Constant *StaticData[] = {EmitCheckSourceLocation(Loc), 2390 EmitCheckSourceLocation(SecondaryLoc), 2391 EmitCheckTypeDescriptor(Ty)}; 2392 llvm::Value *DynamicData[] = {EmitCheckValue(Ptr), 2393 EmitCheckValue(Alignment), 2394 EmitCheckValue(OffsetValue)}; 2395 EmitCheck({std::make_pair(TheCheck, SanitizerKind::Alignment)}, 2396 SanitizerHandler::AlignmentAssumption, StaticData, DynamicData); 2397 } 2398 2399 // We are now in the (new, empty) "cont" basic block. 2400 // Reintroduce the assumption. 2401 Builder.Insert(Assumption); 2402 // FIXME: Assumption still has it's original basic block as it's Parent. 2403 } 2404 2405 llvm::DebugLoc CodeGenFunction::SourceLocToDebugLoc(SourceLocation Location) { 2406 if (CGDebugInfo *DI = getDebugInfo()) 2407 return DI->SourceLocToDebugLoc(Location); 2408 2409 return llvm::DebugLoc(); 2410 } 2411