1 //===--- CodeGenFunction.cpp - Emit LLVM Code from ASTs for a Function ----===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This coordinates the per-function state used while generating code.
10 //
11 //===----------------------------------------------------------------------===//
12 
13 #include "CodeGenFunction.h"
14 #include "CGBlocks.h"
15 #include "CGCleanup.h"
16 #include "CGCUDARuntime.h"
17 #include "CGCXXABI.h"
18 #include "CGDebugInfo.h"
19 #include "CGOpenMPRuntime.h"
20 #include "CodeGenModule.h"
21 #include "CodeGenPGO.h"
22 #include "TargetInfo.h"
23 #include "clang/AST/ASTContext.h"
24 #include "clang/AST/ASTLambda.h"
25 #include "clang/AST/Decl.h"
26 #include "clang/AST/DeclCXX.h"
27 #include "clang/AST/StmtCXX.h"
28 #include "clang/AST/StmtObjC.h"
29 #include "clang/Basic/Builtins.h"
30 #include "clang/Basic/CodeGenOptions.h"
31 #include "clang/Basic/TargetInfo.h"
32 #include "clang/CodeGen/CGFunctionInfo.h"
33 #include "clang/Frontend/FrontendDiagnostic.h"
34 #include "llvm/IR/DataLayout.h"
35 #include "llvm/IR/Dominators.h"
36 #include "llvm/IR/Intrinsics.h"
37 #include "llvm/IR/MDBuilder.h"
38 #include "llvm/IR/Operator.h"
39 #include "llvm/Transforms/Utils/PromoteMemToReg.h"
40 using namespace clang;
41 using namespace CodeGen;
42 
43 /// shouldEmitLifetimeMarkers - Decide whether we need emit the life-time
44 /// markers.
45 static bool shouldEmitLifetimeMarkers(const CodeGenOptions &CGOpts,
46                                       const LangOptions &LangOpts) {
47   if (CGOpts.DisableLifetimeMarkers)
48     return false;
49 
50   // Sanitizers may use markers.
51   if (CGOpts.SanitizeAddressUseAfterScope ||
52       LangOpts.Sanitize.has(SanitizerKind::HWAddress) ||
53       LangOpts.Sanitize.has(SanitizerKind::Memory))
54     return true;
55 
56   // For now, only in optimized builds.
57   return CGOpts.OptimizationLevel != 0;
58 }
59 
60 CodeGenFunction::CodeGenFunction(CodeGenModule &cgm, bool suppressNewContext)
61     : CodeGenTypeCache(cgm), CGM(cgm), Target(cgm.getTarget()),
62       Builder(cgm, cgm.getModule().getContext(), llvm::ConstantFolder(),
63               CGBuilderInserterTy(this)),
64       SanOpts(CGM.getLangOpts().Sanitize), DebugInfo(CGM.getModuleDebugInfo()),
65       PGO(cgm), ShouldEmitLifetimeMarkers(shouldEmitLifetimeMarkers(
66                     CGM.getCodeGenOpts(), CGM.getLangOpts())) {
67   if (!suppressNewContext)
68     CGM.getCXXABI().getMangleContext().startNewFunction();
69 
70   llvm::FastMathFlags FMF;
71   if (CGM.getLangOpts().FastMath)
72     FMF.setFast();
73   if (CGM.getLangOpts().FiniteMathOnly) {
74     FMF.setNoNaNs();
75     FMF.setNoInfs();
76   }
77   if (CGM.getCodeGenOpts().NoNaNsFPMath) {
78     FMF.setNoNaNs();
79   }
80   if (CGM.getCodeGenOpts().NoSignedZeros) {
81     FMF.setNoSignedZeros();
82   }
83   if (CGM.getCodeGenOpts().ReciprocalMath) {
84     FMF.setAllowReciprocal();
85   }
86   if (CGM.getCodeGenOpts().Reassociate) {
87     FMF.setAllowReassoc();
88   }
89   Builder.setFastMathFlags(FMF);
90 }
91 
92 CodeGenFunction::~CodeGenFunction() {
93   assert(LifetimeExtendedCleanupStack.empty() && "failed to emit a cleanup");
94 
95   // If there are any unclaimed block infos, go ahead and destroy them
96   // now.  This can happen if IR-gen gets clever and skips evaluating
97   // something.
98   if (FirstBlockInfo)
99     destroyBlockInfos(FirstBlockInfo);
100 
101   if (getLangOpts().OpenMP && CurFn)
102     CGM.getOpenMPRuntime().functionFinished(*this);
103 }
104 
105 CharUnits CodeGenFunction::getNaturalPointeeTypeAlignment(QualType T,
106                                                     LValueBaseInfo *BaseInfo,
107                                                     TBAAAccessInfo *TBAAInfo) {
108   return getNaturalTypeAlignment(T->getPointeeType(), BaseInfo, TBAAInfo,
109                                  /* forPointeeType= */ true);
110 }
111 
112 CharUnits CodeGenFunction::getNaturalTypeAlignment(QualType T,
113                                                    LValueBaseInfo *BaseInfo,
114                                                    TBAAAccessInfo *TBAAInfo,
115                                                    bool forPointeeType) {
116   if (TBAAInfo)
117     *TBAAInfo = CGM.getTBAAAccessInfo(T);
118 
119   // Honor alignment typedef attributes even on incomplete types.
120   // We also honor them straight for C++ class types, even as pointees;
121   // there's an expressivity gap here.
122   if (auto TT = T->getAs<TypedefType>()) {
123     if (auto Align = TT->getDecl()->getMaxAlignment()) {
124       if (BaseInfo)
125         *BaseInfo = LValueBaseInfo(AlignmentSource::AttributedType);
126       return getContext().toCharUnitsFromBits(Align);
127     }
128   }
129 
130   if (BaseInfo)
131     *BaseInfo = LValueBaseInfo(AlignmentSource::Type);
132 
133   CharUnits Alignment;
134   if (T->isIncompleteType()) {
135     Alignment = CharUnits::One(); // Shouldn't be used, but pessimistic is best.
136   } else {
137     // For C++ class pointees, we don't know whether we're pointing at a
138     // base or a complete object, so we generally need to use the
139     // non-virtual alignment.
140     const CXXRecordDecl *RD;
141     if (forPointeeType && (RD = T->getAsCXXRecordDecl())) {
142       Alignment = CGM.getClassPointerAlignment(RD);
143     } else {
144       Alignment = getContext().getTypeAlignInChars(T);
145       if (T.getQualifiers().hasUnaligned())
146         Alignment = CharUnits::One();
147     }
148 
149     // Cap to the global maximum type alignment unless the alignment
150     // was somehow explicit on the type.
151     if (unsigned MaxAlign = getLangOpts().MaxTypeAlign) {
152       if (Alignment.getQuantity() > MaxAlign &&
153           !getContext().isAlignmentRequired(T))
154         Alignment = CharUnits::fromQuantity(MaxAlign);
155     }
156   }
157   return Alignment;
158 }
159 
160 LValue CodeGenFunction::MakeNaturalAlignAddrLValue(llvm::Value *V, QualType T) {
161   LValueBaseInfo BaseInfo;
162   TBAAAccessInfo TBAAInfo;
163   CharUnits Alignment = getNaturalTypeAlignment(T, &BaseInfo, &TBAAInfo);
164   return LValue::MakeAddr(Address(V, Alignment), T, getContext(), BaseInfo,
165                           TBAAInfo);
166 }
167 
168 /// Given a value of type T* that may not be to a complete object,
169 /// construct an l-value with the natural pointee alignment of T.
170 LValue
171 CodeGenFunction::MakeNaturalAlignPointeeAddrLValue(llvm::Value *V, QualType T) {
172   LValueBaseInfo BaseInfo;
173   TBAAAccessInfo TBAAInfo;
174   CharUnits Align = getNaturalTypeAlignment(T, &BaseInfo, &TBAAInfo,
175                                             /* forPointeeType= */ true);
176   return MakeAddrLValue(Address(V, Align), T, BaseInfo, TBAAInfo);
177 }
178 
179 
180 llvm::Type *CodeGenFunction::ConvertTypeForMem(QualType T) {
181   return CGM.getTypes().ConvertTypeForMem(T);
182 }
183 
184 llvm::Type *CodeGenFunction::ConvertType(QualType T) {
185   return CGM.getTypes().ConvertType(T);
186 }
187 
188 TypeEvaluationKind CodeGenFunction::getEvaluationKind(QualType type) {
189   type = type.getCanonicalType();
190   while (true) {
191     switch (type->getTypeClass()) {
192 #define TYPE(name, parent)
193 #define ABSTRACT_TYPE(name, parent)
194 #define NON_CANONICAL_TYPE(name, parent) case Type::name:
195 #define DEPENDENT_TYPE(name, parent) case Type::name:
196 #define NON_CANONICAL_UNLESS_DEPENDENT_TYPE(name, parent) case Type::name:
197 #include "clang/AST/TypeNodes.inc"
198       llvm_unreachable("non-canonical or dependent type in IR-generation");
199 
200     case Type::Auto:
201     case Type::DeducedTemplateSpecialization:
202       llvm_unreachable("undeduced type in IR-generation");
203 
204     // Various scalar types.
205     case Type::Builtin:
206     case Type::Pointer:
207     case Type::BlockPointer:
208     case Type::LValueReference:
209     case Type::RValueReference:
210     case Type::MemberPointer:
211     case Type::Vector:
212     case Type::ExtVector:
213     case Type::FunctionProto:
214     case Type::FunctionNoProto:
215     case Type::Enum:
216     case Type::ObjCObjectPointer:
217     case Type::Pipe:
218       return TEK_Scalar;
219 
220     // Complexes.
221     case Type::Complex:
222       return TEK_Complex;
223 
224     // Arrays, records, and Objective-C objects.
225     case Type::ConstantArray:
226     case Type::IncompleteArray:
227     case Type::VariableArray:
228     case Type::Record:
229     case Type::ObjCObject:
230     case Type::ObjCInterface:
231       return TEK_Aggregate;
232 
233     // We operate on atomic values according to their underlying type.
234     case Type::Atomic:
235       type = cast<AtomicType>(type)->getValueType();
236       continue;
237     }
238     llvm_unreachable("unknown type kind!");
239   }
240 }
241 
242 llvm::DebugLoc CodeGenFunction::EmitReturnBlock() {
243   // For cleanliness, we try to avoid emitting the return block for
244   // simple cases.
245   llvm::BasicBlock *CurBB = Builder.GetInsertBlock();
246 
247   if (CurBB) {
248     assert(!CurBB->getTerminator() && "Unexpected terminated block.");
249 
250     // We have a valid insert point, reuse it if it is empty or there are no
251     // explicit jumps to the return block.
252     if (CurBB->empty() || ReturnBlock.getBlock()->use_empty()) {
253       ReturnBlock.getBlock()->replaceAllUsesWith(CurBB);
254       delete ReturnBlock.getBlock();
255       ReturnBlock = JumpDest();
256     } else
257       EmitBlock(ReturnBlock.getBlock());
258     return llvm::DebugLoc();
259   }
260 
261   // Otherwise, if the return block is the target of a single direct
262   // branch then we can just put the code in that block instead. This
263   // cleans up functions which started with a unified return block.
264   if (ReturnBlock.getBlock()->hasOneUse()) {
265     llvm::BranchInst *BI =
266       dyn_cast<llvm::BranchInst>(*ReturnBlock.getBlock()->user_begin());
267     if (BI && BI->isUnconditional() &&
268         BI->getSuccessor(0) == ReturnBlock.getBlock()) {
269       // Record/return the DebugLoc of the simple 'return' expression to be used
270       // later by the actual 'ret' instruction.
271       llvm::DebugLoc Loc = BI->getDebugLoc();
272       Builder.SetInsertPoint(BI->getParent());
273       BI->eraseFromParent();
274       delete ReturnBlock.getBlock();
275       ReturnBlock = JumpDest();
276       return Loc;
277     }
278   }
279 
280   // FIXME: We are at an unreachable point, there is no reason to emit the block
281   // unless it has uses. However, we still need a place to put the debug
282   // region.end for now.
283 
284   EmitBlock(ReturnBlock.getBlock());
285   return llvm::DebugLoc();
286 }
287 
288 static void EmitIfUsed(CodeGenFunction &CGF, llvm::BasicBlock *BB) {
289   if (!BB) return;
290   if (!BB->use_empty())
291     return CGF.CurFn->getBasicBlockList().push_back(BB);
292   delete BB;
293 }
294 
295 void CodeGenFunction::FinishFunction(SourceLocation EndLoc) {
296   assert(BreakContinueStack.empty() &&
297          "mismatched push/pop in break/continue stack!");
298 
299   bool OnlySimpleReturnStmts = NumSimpleReturnExprs > 0
300     && NumSimpleReturnExprs == NumReturnExprs
301     && ReturnBlock.getBlock()->use_empty();
302   // Usually the return expression is evaluated before the cleanup
303   // code.  If the function contains only a simple return statement,
304   // such as a constant, the location before the cleanup code becomes
305   // the last useful breakpoint in the function, because the simple
306   // return expression will be evaluated after the cleanup code. To be
307   // safe, set the debug location for cleanup code to the location of
308   // the return statement.  Otherwise the cleanup code should be at the
309   // end of the function's lexical scope.
310   //
311   // If there are multiple branches to the return block, the branch
312   // instructions will get the location of the return statements and
313   // all will be fine.
314   if (CGDebugInfo *DI = getDebugInfo()) {
315     if (OnlySimpleReturnStmts)
316       DI->EmitLocation(Builder, LastStopPoint);
317     else
318       DI->EmitLocation(Builder, EndLoc);
319   }
320 
321   // Pop any cleanups that might have been associated with the
322   // parameters.  Do this in whatever block we're currently in; it's
323   // important to do this before we enter the return block or return
324   // edges will be *really* confused.
325   bool HasCleanups = EHStack.stable_begin() != PrologueCleanupDepth;
326   bool HasOnlyLifetimeMarkers =
327       HasCleanups && EHStack.containsOnlyLifetimeMarkers(PrologueCleanupDepth);
328   bool EmitRetDbgLoc = !HasCleanups || HasOnlyLifetimeMarkers;
329   if (HasCleanups) {
330     // Make sure the line table doesn't jump back into the body for
331     // the ret after it's been at EndLoc.
332     if (CGDebugInfo *DI = getDebugInfo())
333       if (OnlySimpleReturnStmts)
334         DI->EmitLocation(Builder, EndLoc);
335 
336     PopCleanupBlocks(PrologueCleanupDepth);
337   }
338 
339   // Emit function epilog (to return).
340   llvm::DebugLoc Loc = EmitReturnBlock();
341 
342   if (ShouldInstrumentFunction()) {
343     if (CGM.getCodeGenOpts().InstrumentFunctions)
344       CurFn->addFnAttr("instrument-function-exit", "__cyg_profile_func_exit");
345     if (CGM.getCodeGenOpts().InstrumentFunctionsAfterInlining)
346       CurFn->addFnAttr("instrument-function-exit-inlined",
347                        "__cyg_profile_func_exit");
348   }
349 
350   // Emit debug descriptor for function end.
351   if (CGDebugInfo *DI = getDebugInfo())
352     DI->EmitFunctionEnd(Builder, CurFn);
353 
354   // Reset the debug location to that of the simple 'return' expression, if any
355   // rather than that of the end of the function's scope '}'.
356   ApplyDebugLocation AL(*this, Loc);
357   EmitFunctionEpilog(*CurFnInfo, EmitRetDbgLoc, EndLoc);
358   EmitEndEHSpec(CurCodeDecl);
359 
360   assert(EHStack.empty() &&
361          "did not remove all scopes from cleanup stack!");
362 
363   // If someone did an indirect goto, emit the indirect goto block at the end of
364   // the function.
365   if (IndirectBranch) {
366     EmitBlock(IndirectBranch->getParent());
367     Builder.ClearInsertionPoint();
368   }
369 
370   // If some of our locals escaped, insert a call to llvm.localescape in the
371   // entry block.
372   if (!EscapedLocals.empty()) {
373     // Invert the map from local to index into a simple vector. There should be
374     // no holes.
375     SmallVector<llvm::Value *, 4> EscapeArgs;
376     EscapeArgs.resize(EscapedLocals.size());
377     for (auto &Pair : EscapedLocals)
378       EscapeArgs[Pair.second] = Pair.first;
379     llvm::Function *FrameEscapeFn = llvm::Intrinsic::getDeclaration(
380         &CGM.getModule(), llvm::Intrinsic::localescape);
381     CGBuilderTy(*this, AllocaInsertPt).CreateCall(FrameEscapeFn, EscapeArgs);
382   }
383 
384   // Remove the AllocaInsertPt instruction, which is just a convenience for us.
385   llvm::Instruction *Ptr = AllocaInsertPt;
386   AllocaInsertPt = nullptr;
387   Ptr->eraseFromParent();
388 
389   // If someone took the address of a label but never did an indirect goto, we
390   // made a zero entry PHI node, which is illegal, zap it now.
391   if (IndirectBranch) {
392     llvm::PHINode *PN = cast<llvm::PHINode>(IndirectBranch->getAddress());
393     if (PN->getNumIncomingValues() == 0) {
394       PN->replaceAllUsesWith(llvm::UndefValue::get(PN->getType()));
395       PN->eraseFromParent();
396     }
397   }
398 
399   EmitIfUsed(*this, EHResumeBlock);
400   EmitIfUsed(*this, TerminateLandingPad);
401   EmitIfUsed(*this, TerminateHandler);
402   EmitIfUsed(*this, UnreachableBlock);
403 
404   for (const auto &FuncletAndParent : TerminateFunclets)
405     EmitIfUsed(*this, FuncletAndParent.second);
406 
407   if (CGM.getCodeGenOpts().EmitDeclMetadata)
408     EmitDeclMetadata();
409 
410   for (SmallVectorImpl<std::pair<llvm::Instruction *, llvm::Value *> >::iterator
411            I = DeferredReplacements.begin(),
412            E = DeferredReplacements.end();
413        I != E; ++I) {
414     I->first->replaceAllUsesWith(I->second);
415     I->first->eraseFromParent();
416   }
417 
418   // Eliminate CleanupDestSlot alloca by replacing it with SSA values and
419   // PHIs if the current function is a coroutine. We don't do it for all
420   // functions as it may result in slight increase in numbers of instructions
421   // if compiled with no optimizations. We do it for coroutine as the lifetime
422   // of CleanupDestSlot alloca make correct coroutine frame building very
423   // difficult.
424   if (NormalCleanupDest.isValid() && isCoroutine()) {
425     llvm::DominatorTree DT(*CurFn);
426     llvm::PromoteMemToReg(
427         cast<llvm::AllocaInst>(NormalCleanupDest.getPointer()), DT);
428     NormalCleanupDest = Address::invalid();
429   }
430 
431   // Scan function arguments for vector width.
432   for (llvm::Argument &A : CurFn->args())
433     if (auto *VT = dyn_cast<llvm::VectorType>(A.getType()))
434       LargestVectorWidth = std::max((uint64_t)LargestVectorWidth,
435                                    VT->getPrimitiveSizeInBits().getFixedSize());
436 
437   // Update vector width based on return type.
438   if (auto *VT = dyn_cast<llvm::VectorType>(CurFn->getReturnType()))
439     LargestVectorWidth = std::max((uint64_t)LargestVectorWidth,
440                                   VT->getPrimitiveSizeInBits().getFixedSize());
441 
442   // Add the required-vector-width attribute. This contains the max width from:
443   // 1. min-vector-width attribute used in the source program.
444   // 2. Any builtins used that have a vector width specified.
445   // 3. Values passed in and out of inline assembly.
446   // 4. Width of vector arguments and return types for this function.
447   // 5. Width of vector aguments and return types for functions called by this
448   //    function.
449   CurFn->addFnAttr("min-legal-vector-width", llvm::utostr(LargestVectorWidth));
450 
451   // If we generated an unreachable return block, delete it now.
452   if (ReturnBlock.isValid() && ReturnBlock.getBlock()->use_empty()) {
453     Builder.ClearInsertionPoint();
454     ReturnBlock.getBlock()->eraseFromParent();
455   }
456   if (ReturnValue.isValid()) {
457     auto *RetAlloca = dyn_cast<llvm::AllocaInst>(ReturnValue.getPointer());
458     if (RetAlloca && RetAlloca->use_empty()) {
459       RetAlloca->eraseFromParent();
460       ReturnValue = Address::invalid();
461     }
462   }
463 }
464 
465 /// ShouldInstrumentFunction - Return true if the current function should be
466 /// instrumented with __cyg_profile_func_* calls
467 bool CodeGenFunction::ShouldInstrumentFunction() {
468   if (!CGM.getCodeGenOpts().InstrumentFunctions &&
469       !CGM.getCodeGenOpts().InstrumentFunctionsAfterInlining &&
470       !CGM.getCodeGenOpts().InstrumentFunctionEntryBare)
471     return false;
472   if (!CurFuncDecl || CurFuncDecl->hasAttr<NoInstrumentFunctionAttr>())
473     return false;
474   return true;
475 }
476 
477 /// ShouldXRayInstrument - Return true if the current function should be
478 /// instrumented with XRay nop sleds.
479 bool CodeGenFunction::ShouldXRayInstrumentFunction() const {
480   return CGM.getCodeGenOpts().XRayInstrumentFunctions;
481 }
482 
483 /// AlwaysEmitXRayCustomEvents - Return true if we should emit IR for calls to
484 /// the __xray_customevent(...) builtin calls, when doing XRay instrumentation.
485 bool CodeGenFunction::AlwaysEmitXRayCustomEvents() const {
486   return CGM.getCodeGenOpts().XRayInstrumentFunctions &&
487          (CGM.getCodeGenOpts().XRayAlwaysEmitCustomEvents ||
488           CGM.getCodeGenOpts().XRayInstrumentationBundle.Mask ==
489               XRayInstrKind::Custom);
490 }
491 
492 bool CodeGenFunction::AlwaysEmitXRayTypedEvents() const {
493   return CGM.getCodeGenOpts().XRayInstrumentFunctions &&
494          (CGM.getCodeGenOpts().XRayAlwaysEmitTypedEvents ||
495           CGM.getCodeGenOpts().XRayInstrumentationBundle.Mask ==
496               XRayInstrKind::Typed);
497 }
498 
499 llvm::Constant *
500 CodeGenFunction::EncodeAddrForUseInPrologue(llvm::Function *F,
501                                             llvm::Constant *Addr) {
502   // Addresses stored in prologue data can't require run-time fixups and must
503   // be PC-relative. Run-time fixups are undesirable because they necessitate
504   // writable text segments, which are unsafe. And absolute addresses are
505   // undesirable because they break PIE mode.
506 
507   // Add a layer of indirection through a private global. Taking its address
508   // won't result in a run-time fixup, even if Addr has linkonce_odr linkage.
509   auto *GV = new llvm::GlobalVariable(CGM.getModule(), Addr->getType(),
510                                       /*isConstant=*/true,
511                                       llvm::GlobalValue::PrivateLinkage, Addr);
512 
513   // Create a PC-relative address.
514   auto *GOTAsInt = llvm::ConstantExpr::getPtrToInt(GV, IntPtrTy);
515   auto *FuncAsInt = llvm::ConstantExpr::getPtrToInt(F, IntPtrTy);
516   auto *PCRelAsInt = llvm::ConstantExpr::getSub(GOTAsInt, FuncAsInt);
517   return (IntPtrTy == Int32Ty)
518              ? PCRelAsInt
519              : llvm::ConstantExpr::getTrunc(PCRelAsInt, Int32Ty);
520 }
521 
522 llvm::Value *
523 CodeGenFunction::DecodeAddrUsedInPrologue(llvm::Value *F,
524                                           llvm::Value *EncodedAddr) {
525   // Reconstruct the address of the global.
526   auto *PCRelAsInt = Builder.CreateSExt(EncodedAddr, IntPtrTy);
527   auto *FuncAsInt = Builder.CreatePtrToInt(F, IntPtrTy, "func_addr.int");
528   auto *GOTAsInt = Builder.CreateAdd(PCRelAsInt, FuncAsInt, "global_addr.int");
529   auto *GOTAddr = Builder.CreateIntToPtr(GOTAsInt, Int8PtrPtrTy, "global_addr");
530 
531   // Load the original pointer through the global.
532   return Builder.CreateLoad(Address(GOTAddr, getPointerAlign()),
533                             "decoded_addr");
534 }
535 
536 void CodeGenFunction::EmitOpenCLKernelMetadata(const FunctionDecl *FD,
537                                                llvm::Function *Fn)
538 {
539   if (!FD->hasAttr<OpenCLKernelAttr>())
540     return;
541 
542   llvm::LLVMContext &Context = getLLVMContext();
543 
544   CGM.GenOpenCLArgMetadata(Fn, FD, this);
545 
546   if (const VecTypeHintAttr *A = FD->getAttr<VecTypeHintAttr>()) {
547     QualType HintQTy = A->getTypeHint();
548     const ExtVectorType *HintEltQTy = HintQTy->getAs<ExtVectorType>();
549     bool IsSignedInteger =
550         HintQTy->isSignedIntegerType() ||
551         (HintEltQTy && HintEltQTy->getElementType()->isSignedIntegerType());
552     llvm::Metadata *AttrMDArgs[] = {
553         llvm::ConstantAsMetadata::get(llvm::UndefValue::get(
554             CGM.getTypes().ConvertType(A->getTypeHint()))),
555         llvm::ConstantAsMetadata::get(llvm::ConstantInt::get(
556             llvm::IntegerType::get(Context, 32),
557             llvm::APInt(32, (uint64_t)(IsSignedInteger ? 1 : 0))))};
558     Fn->setMetadata("vec_type_hint", llvm::MDNode::get(Context, AttrMDArgs));
559   }
560 
561   if (const WorkGroupSizeHintAttr *A = FD->getAttr<WorkGroupSizeHintAttr>()) {
562     llvm::Metadata *AttrMDArgs[] = {
563         llvm::ConstantAsMetadata::get(Builder.getInt32(A->getXDim())),
564         llvm::ConstantAsMetadata::get(Builder.getInt32(A->getYDim())),
565         llvm::ConstantAsMetadata::get(Builder.getInt32(A->getZDim()))};
566     Fn->setMetadata("work_group_size_hint", llvm::MDNode::get(Context, AttrMDArgs));
567   }
568 
569   if (const ReqdWorkGroupSizeAttr *A = FD->getAttr<ReqdWorkGroupSizeAttr>()) {
570     llvm::Metadata *AttrMDArgs[] = {
571         llvm::ConstantAsMetadata::get(Builder.getInt32(A->getXDim())),
572         llvm::ConstantAsMetadata::get(Builder.getInt32(A->getYDim())),
573         llvm::ConstantAsMetadata::get(Builder.getInt32(A->getZDim()))};
574     Fn->setMetadata("reqd_work_group_size", llvm::MDNode::get(Context, AttrMDArgs));
575   }
576 
577   if (const OpenCLIntelReqdSubGroupSizeAttr *A =
578           FD->getAttr<OpenCLIntelReqdSubGroupSizeAttr>()) {
579     llvm::Metadata *AttrMDArgs[] = {
580         llvm::ConstantAsMetadata::get(Builder.getInt32(A->getSubGroupSize()))};
581     Fn->setMetadata("intel_reqd_sub_group_size",
582                     llvm::MDNode::get(Context, AttrMDArgs));
583   }
584 }
585 
586 /// Determine whether the function F ends with a return stmt.
587 static bool endsWithReturn(const Decl* F) {
588   const Stmt *Body = nullptr;
589   if (auto *FD = dyn_cast_or_null<FunctionDecl>(F))
590     Body = FD->getBody();
591   else if (auto *OMD = dyn_cast_or_null<ObjCMethodDecl>(F))
592     Body = OMD->getBody();
593 
594   if (auto *CS = dyn_cast_or_null<CompoundStmt>(Body)) {
595     auto LastStmt = CS->body_rbegin();
596     if (LastStmt != CS->body_rend())
597       return isa<ReturnStmt>(*LastStmt);
598   }
599   return false;
600 }
601 
602 void CodeGenFunction::markAsIgnoreThreadCheckingAtRuntime(llvm::Function *Fn) {
603   if (SanOpts.has(SanitizerKind::Thread)) {
604     Fn->addFnAttr("sanitize_thread_no_checking_at_run_time");
605     Fn->removeFnAttr(llvm::Attribute::SanitizeThread);
606   }
607 }
608 
609 static bool matchesStlAllocatorFn(const Decl *D, const ASTContext &Ctx) {
610   auto *MD = dyn_cast_or_null<CXXMethodDecl>(D);
611   if (!MD || !MD->getDeclName().getAsIdentifierInfo() ||
612       !MD->getDeclName().getAsIdentifierInfo()->isStr("allocate") ||
613       (MD->getNumParams() != 1 && MD->getNumParams() != 2))
614     return false;
615 
616   if (MD->parameters()[0]->getType().getCanonicalType() != Ctx.getSizeType())
617     return false;
618 
619   if (MD->getNumParams() == 2) {
620     auto *PT = MD->parameters()[1]->getType()->getAs<PointerType>();
621     if (!PT || !PT->isVoidPointerType() ||
622         !PT->getPointeeType().isConstQualified())
623       return false;
624   }
625 
626   return true;
627 }
628 
629 /// Return the UBSan prologue signature for \p FD if one is available.
630 static llvm::Constant *getPrologueSignature(CodeGenModule &CGM,
631                                             const FunctionDecl *FD) {
632   if (const auto *MD = dyn_cast<CXXMethodDecl>(FD))
633     if (!MD->isStatic())
634       return nullptr;
635   return CGM.getTargetCodeGenInfo().getUBSanFunctionSignature(CGM);
636 }
637 
638 void CodeGenFunction::StartFunction(GlobalDecl GD,
639                                     QualType RetTy,
640                                     llvm::Function *Fn,
641                                     const CGFunctionInfo &FnInfo,
642                                     const FunctionArgList &Args,
643                                     SourceLocation Loc,
644                                     SourceLocation StartLoc) {
645   assert(!CurFn &&
646          "Do not use a CodeGenFunction object for more than one function");
647 
648   const Decl *D = GD.getDecl();
649 
650   DidCallStackSave = false;
651   CurCodeDecl = D;
652   if (const auto *FD = dyn_cast_or_null<FunctionDecl>(D))
653     if (FD->usesSEHTry())
654       CurSEHParent = FD;
655   CurFuncDecl = (D ? D->getNonClosureContext() : nullptr);
656   FnRetTy = RetTy;
657   CurFn = Fn;
658   CurFnInfo = &FnInfo;
659   assert(CurFn->isDeclaration() && "Function already has body?");
660 
661   // If this function has been blacklisted for any of the enabled sanitizers,
662   // disable the sanitizer for the function.
663   do {
664 #define SANITIZER(NAME, ID)                                                    \
665   if (SanOpts.empty())                                                         \
666     break;                                                                     \
667   if (SanOpts.has(SanitizerKind::ID))                                          \
668     if (CGM.isInSanitizerBlacklist(SanitizerKind::ID, Fn, Loc))                \
669       SanOpts.set(SanitizerKind::ID, false);
670 
671 #include "clang/Basic/Sanitizers.def"
672 #undef SANITIZER
673   } while (0);
674 
675   if (D) {
676     // Apply the no_sanitize* attributes to SanOpts.
677     for (auto Attr : D->specific_attrs<NoSanitizeAttr>()) {
678       SanitizerMask mask = Attr->getMask();
679       SanOpts.Mask &= ~mask;
680       if (mask & SanitizerKind::Address)
681         SanOpts.set(SanitizerKind::KernelAddress, false);
682       if (mask & SanitizerKind::KernelAddress)
683         SanOpts.set(SanitizerKind::Address, false);
684       if (mask & SanitizerKind::HWAddress)
685         SanOpts.set(SanitizerKind::KernelHWAddress, false);
686       if (mask & SanitizerKind::KernelHWAddress)
687         SanOpts.set(SanitizerKind::HWAddress, false);
688     }
689   }
690 
691   // Apply sanitizer attributes to the function.
692   if (SanOpts.hasOneOf(SanitizerKind::Address | SanitizerKind::KernelAddress))
693     Fn->addFnAttr(llvm::Attribute::SanitizeAddress);
694   if (SanOpts.hasOneOf(SanitizerKind::HWAddress | SanitizerKind::KernelHWAddress))
695     Fn->addFnAttr(llvm::Attribute::SanitizeHWAddress);
696   if (SanOpts.has(SanitizerKind::MemTag))
697     Fn->addFnAttr(llvm::Attribute::SanitizeMemTag);
698   if (SanOpts.has(SanitizerKind::Thread))
699     Fn->addFnAttr(llvm::Attribute::SanitizeThread);
700   if (SanOpts.hasOneOf(SanitizerKind::Memory | SanitizerKind::KernelMemory))
701     Fn->addFnAttr(llvm::Attribute::SanitizeMemory);
702   if (SanOpts.has(SanitizerKind::SafeStack))
703     Fn->addFnAttr(llvm::Attribute::SafeStack);
704   if (SanOpts.has(SanitizerKind::ShadowCallStack))
705     Fn->addFnAttr(llvm::Attribute::ShadowCallStack);
706 
707   // Apply fuzzing attribute to the function.
708   if (SanOpts.hasOneOf(SanitizerKind::Fuzzer | SanitizerKind::FuzzerNoLink))
709     Fn->addFnAttr(llvm::Attribute::OptForFuzzing);
710 
711   // Ignore TSan memory acesses from within ObjC/ObjC++ dealloc, initialize,
712   // .cxx_destruct, __destroy_helper_block_ and all of their calees at run time.
713   if (SanOpts.has(SanitizerKind::Thread)) {
714     if (const auto *OMD = dyn_cast_or_null<ObjCMethodDecl>(D)) {
715       IdentifierInfo *II = OMD->getSelector().getIdentifierInfoForSlot(0);
716       if (OMD->getMethodFamily() == OMF_dealloc ||
717           OMD->getMethodFamily() == OMF_initialize ||
718           (OMD->getSelector().isUnarySelector() && II->isStr(".cxx_destruct"))) {
719         markAsIgnoreThreadCheckingAtRuntime(Fn);
720       }
721     }
722   }
723 
724   // Ignore unrelated casts in STL allocate() since the allocator must cast
725   // from void* to T* before object initialization completes. Don't match on the
726   // namespace because not all allocators are in std::
727   if (D && SanOpts.has(SanitizerKind::CFIUnrelatedCast)) {
728     if (matchesStlAllocatorFn(D, getContext()))
729       SanOpts.Mask &= ~SanitizerKind::CFIUnrelatedCast;
730   }
731 
732   // Ignore null checks in coroutine functions since the coroutines passes
733   // are not aware of how to move the extra UBSan instructions across the split
734   // coroutine boundaries.
735   if (D && SanOpts.has(SanitizerKind::Null))
736     if (const auto *FD = dyn_cast<FunctionDecl>(D))
737       if (FD->getBody() &&
738           FD->getBody()->getStmtClass() == Stmt::CoroutineBodyStmtClass)
739         SanOpts.Mask &= ~SanitizerKind::Null;
740 
741   // Apply xray attributes to the function (as a string, for now)
742   if (D) {
743     if (const auto *XRayAttr = D->getAttr<XRayInstrumentAttr>()) {
744       if (CGM.getCodeGenOpts().XRayInstrumentationBundle.has(
745               XRayInstrKind::Function)) {
746         if (XRayAttr->alwaysXRayInstrument() && ShouldXRayInstrumentFunction())
747           Fn->addFnAttr("function-instrument", "xray-always");
748         if (XRayAttr->neverXRayInstrument())
749           Fn->addFnAttr("function-instrument", "xray-never");
750         if (const auto *LogArgs = D->getAttr<XRayLogArgsAttr>())
751           if (ShouldXRayInstrumentFunction())
752             Fn->addFnAttr("xray-log-args",
753                           llvm::utostr(LogArgs->getArgumentCount()));
754       }
755     } else {
756       if (ShouldXRayInstrumentFunction() && !CGM.imbueXRayAttrs(Fn, Loc))
757         Fn->addFnAttr(
758             "xray-instruction-threshold",
759             llvm::itostr(CGM.getCodeGenOpts().XRayInstructionThreshold));
760     }
761   }
762 
763   // Add no-jump-tables value.
764   Fn->addFnAttr("no-jump-tables",
765                 llvm::toStringRef(CGM.getCodeGenOpts().NoUseJumpTables));
766 
767   // Add no-inline-line-tables value.
768   if (CGM.getCodeGenOpts().NoInlineLineTables)
769     Fn->addFnAttr("no-inline-line-tables");
770 
771   // Add profile-sample-accurate value.
772   if (CGM.getCodeGenOpts().ProfileSampleAccurate)
773     Fn->addFnAttr("profile-sample-accurate");
774 
775   if (D && D->hasAttr<CFICanonicalJumpTableAttr>())
776     Fn->addFnAttr("cfi-canonical-jump-table");
777 
778   if (getLangOpts().OpenCL) {
779     // Add metadata for a kernel function.
780     if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D))
781       EmitOpenCLKernelMetadata(FD, Fn);
782   }
783 
784   // If we are checking function types, emit a function type signature as
785   // prologue data.
786   if (getLangOpts().CPlusPlus && SanOpts.has(SanitizerKind::Function)) {
787     if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D)) {
788       if (llvm::Constant *PrologueSig = getPrologueSignature(CGM, FD)) {
789         // Remove any (C++17) exception specifications, to allow calling e.g. a
790         // noexcept function through a non-noexcept pointer.
791         auto ProtoTy =
792           getContext().getFunctionTypeWithExceptionSpec(FD->getType(),
793                                                         EST_None);
794         llvm::Constant *FTRTTIConst =
795             CGM.GetAddrOfRTTIDescriptor(ProtoTy, /*ForEH=*/true);
796         llvm::Constant *FTRTTIConstEncoded =
797             EncodeAddrForUseInPrologue(Fn, FTRTTIConst);
798         llvm::Constant *PrologueStructElems[] = {PrologueSig,
799                                                  FTRTTIConstEncoded};
800         llvm::Constant *PrologueStructConst =
801             llvm::ConstantStruct::getAnon(PrologueStructElems, /*Packed=*/true);
802         Fn->setPrologueData(PrologueStructConst);
803       }
804     }
805   }
806 
807   // If we're checking nullability, we need to know whether we can check the
808   // return value. Initialize the flag to 'true' and refine it in EmitParmDecl.
809   if (SanOpts.has(SanitizerKind::NullabilityReturn)) {
810     auto Nullability = FnRetTy->getNullability(getContext());
811     if (Nullability && *Nullability == NullabilityKind::NonNull) {
812       if (!(SanOpts.has(SanitizerKind::ReturnsNonnullAttribute) &&
813             CurCodeDecl && CurCodeDecl->getAttr<ReturnsNonNullAttr>()))
814         RetValNullabilityPrecondition =
815             llvm::ConstantInt::getTrue(getLLVMContext());
816     }
817   }
818 
819   // If we're in C++ mode and the function name is "main", it is guaranteed
820   // to be norecurse by the standard (3.6.1.3 "The function main shall not be
821   // used within a program").
822   if (getLangOpts().CPlusPlus)
823     if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D))
824       if (FD->isMain())
825         Fn->addFnAttr(llvm::Attribute::NoRecurse);
826 
827   // If a custom alignment is used, force realigning to this alignment on
828   // any main function which certainly will need it.
829   if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D))
830     if ((FD->isMain() || FD->isMSVCRTEntryPoint()) &&
831         CGM.getCodeGenOpts().StackAlignment)
832       Fn->addFnAttr("stackrealign");
833 
834   llvm::BasicBlock *EntryBB = createBasicBlock("entry", CurFn);
835 
836   // Create a marker to make it easy to insert allocas into the entryblock
837   // later.  Don't create this with the builder, because we don't want it
838   // folded.
839   llvm::Value *Undef = llvm::UndefValue::get(Int32Ty);
840   AllocaInsertPt = new llvm::BitCastInst(Undef, Int32Ty, "allocapt", EntryBB);
841 
842   ReturnBlock = getJumpDestInCurrentScope("return");
843 
844   Builder.SetInsertPoint(EntryBB);
845 
846   // If we're checking the return value, allocate space for a pointer to a
847   // precise source location of the checked return statement.
848   if (requiresReturnValueCheck()) {
849     ReturnLocation = CreateDefaultAlignTempAlloca(Int8PtrTy, "return.sloc.ptr");
850     InitTempAlloca(ReturnLocation, llvm::ConstantPointerNull::get(Int8PtrTy));
851   }
852 
853   // Emit subprogram debug descriptor.
854   if (CGDebugInfo *DI = getDebugInfo()) {
855     // Reconstruct the type from the argument list so that implicit parameters,
856     // such as 'this' and 'vtt', show up in the debug info. Preserve the calling
857     // convention.
858     CallingConv CC = CallingConv::CC_C;
859     if (auto *FD = dyn_cast_or_null<FunctionDecl>(D))
860       if (const auto *SrcFnTy = FD->getType()->getAs<FunctionType>())
861         CC = SrcFnTy->getCallConv();
862     SmallVector<QualType, 16> ArgTypes;
863     for (const VarDecl *VD : Args)
864       ArgTypes.push_back(VD->getType());
865     QualType FnType = getContext().getFunctionType(
866         RetTy, ArgTypes, FunctionProtoType::ExtProtoInfo(CC));
867     DI->EmitFunctionStart(GD, Loc, StartLoc, FnType, CurFn, CurFuncIsThunk,
868                           Builder);
869   }
870 
871   if (ShouldInstrumentFunction()) {
872     if (CGM.getCodeGenOpts().InstrumentFunctions)
873       CurFn->addFnAttr("instrument-function-entry", "__cyg_profile_func_enter");
874     if (CGM.getCodeGenOpts().InstrumentFunctionsAfterInlining)
875       CurFn->addFnAttr("instrument-function-entry-inlined",
876                        "__cyg_profile_func_enter");
877     if (CGM.getCodeGenOpts().InstrumentFunctionEntryBare)
878       CurFn->addFnAttr("instrument-function-entry-inlined",
879                        "__cyg_profile_func_enter_bare");
880   }
881 
882   // Since emitting the mcount call here impacts optimizations such as function
883   // inlining, we just add an attribute to insert a mcount call in backend.
884   // The attribute "counting-function" is set to mcount function name which is
885   // architecture dependent.
886   if (CGM.getCodeGenOpts().InstrumentForProfiling) {
887     // Calls to fentry/mcount should not be generated if function has
888     // the no_instrument_function attribute.
889     if (!CurFuncDecl || !CurFuncDecl->hasAttr<NoInstrumentFunctionAttr>()) {
890       if (CGM.getCodeGenOpts().CallFEntry)
891         Fn->addFnAttr("fentry-call", "true");
892       else {
893         Fn->addFnAttr("instrument-function-entry-inlined",
894                       getTarget().getMCountName());
895       }
896     }
897   }
898 
899   if (RetTy->isVoidType()) {
900     // Void type; nothing to return.
901     ReturnValue = Address::invalid();
902 
903     // Count the implicit return.
904     if (!endsWithReturn(D))
905       ++NumReturnExprs;
906   } else if (CurFnInfo->getReturnInfo().getKind() == ABIArgInfo::Indirect) {
907     // Indirect return; emit returned value directly into sret slot.
908     // This reduces code size, and affects correctness in C++.
909     auto AI = CurFn->arg_begin();
910     if (CurFnInfo->getReturnInfo().isSRetAfterThis())
911       ++AI;
912     ReturnValue = Address(&*AI, CurFnInfo->getReturnInfo().getIndirectAlign());
913     if (!CurFnInfo->getReturnInfo().getIndirectByVal()) {
914       ReturnValuePointer =
915           CreateDefaultAlignTempAlloca(Int8PtrTy, "result.ptr");
916       Builder.CreateStore(Builder.CreatePointerBitCastOrAddrSpaceCast(
917                               ReturnValue.getPointer(), Int8PtrTy),
918                           ReturnValuePointer);
919     }
920   } else if (CurFnInfo->getReturnInfo().getKind() == ABIArgInfo::InAlloca &&
921              !hasScalarEvaluationKind(CurFnInfo->getReturnType())) {
922     // Load the sret pointer from the argument struct and return into that.
923     unsigned Idx = CurFnInfo->getReturnInfo().getInAllocaFieldIndex();
924     llvm::Function::arg_iterator EI = CurFn->arg_end();
925     --EI;
926     llvm::Value *Addr = Builder.CreateStructGEP(nullptr, &*EI, Idx);
927     ReturnValuePointer = Address(Addr, getPointerAlign());
928     Addr = Builder.CreateAlignedLoad(Addr, getPointerAlign(), "agg.result");
929     ReturnValue = Address(Addr, getNaturalTypeAlignment(RetTy));
930   } else {
931     ReturnValue = CreateIRTemp(RetTy, "retval");
932 
933     // Tell the epilog emitter to autorelease the result.  We do this
934     // now so that various specialized functions can suppress it
935     // during their IR-generation.
936     if (getLangOpts().ObjCAutoRefCount &&
937         !CurFnInfo->isReturnsRetained() &&
938         RetTy->isObjCRetainableType())
939       AutoreleaseResult = true;
940   }
941 
942   EmitStartEHSpec(CurCodeDecl);
943 
944   PrologueCleanupDepth = EHStack.stable_begin();
945 
946   // Emit OpenMP specific initialization of the device functions.
947   if (getLangOpts().OpenMP && CurCodeDecl)
948     CGM.getOpenMPRuntime().emitFunctionProlog(*this, CurCodeDecl);
949 
950   EmitFunctionProlog(*CurFnInfo, CurFn, Args);
951 
952   if (D && isa<CXXMethodDecl>(D) && cast<CXXMethodDecl>(D)->isInstance()) {
953     CGM.getCXXABI().EmitInstanceFunctionProlog(*this);
954     const CXXMethodDecl *MD = cast<CXXMethodDecl>(D);
955     if (MD->getParent()->isLambda() &&
956         MD->getOverloadedOperator() == OO_Call) {
957       // We're in a lambda; figure out the captures.
958       MD->getParent()->getCaptureFields(LambdaCaptureFields,
959                                         LambdaThisCaptureField);
960       if (LambdaThisCaptureField) {
961         // If the lambda captures the object referred to by '*this' - either by
962         // value or by reference, make sure CXXThisValue points to the correct
963         // object.
964 
965         // Get the lvalue for the field (which is a copy of the enclosing object
966         // or contains the address of the enclosing object).
967         LValue ThisFieldLValue = EmitLValueForLambdaField(LambdaThisCaptureField);
968         if (!LambdaThisCaptureField->getType()->isPointerType()) {
969           // If the enclosing object was captured by value, just use its address.
970           CXXThisValue = ThisFieldLValue.getAddress().getPointer();
971         } else {
972           // Load the lvalue pointed to by the field, since '*this' was captured
973           // by reference.
974           CXXThisValue =
975               EmitLoadOfLValue(ThisFieldLValue, SourceLocation()).getScalarVal();
976         }
977       }
978       for (auto *FD : MD->getParent()->fields()) {
979         if (FD->hasCapturedVLAType()) {
980           auto *ExprArg = EmitLoadOfLValue(EmitLValueForLambdaField(FD),
981                                            SourceLocation()).getScalarVal();
982           auto VAT = FD->getCapturedVLAType();
983           VLASizeMap[VAT->getSizeExpr()] = ExprArg;
984         }
985       }
986     } else {
987       // Not in a lambda; just use 'this' from the method.
988       // FIXME: Should we generate a new load for each use of 'this'?  The
989       // fast register allocator would be happier...
990       CXXThisValue = CXXABIThisValue;
991     }
992 
993     // Check the 'this' pointer once per function, if it's available.
994     if (CXXABIThisValue) {
995       SanitizerSet SkippedChecks;
996       SkippedChecks.set(SanitizerKind::ObjectSize, true);
997       QualType ThisTy = MD->getThisType();
998 
999       // If this is the call operator of a lambda with no capture-default, it
1000       // may have a static invoker function, which may call this operator with
1001       // a null 'this' pointer.
1002       if (isLambdaCallOperator(MD) &&
1003           MD->getParent()->getLambdaCaptureDefault() == LCD_None)
1004         SkippedChecks.set(SanitizerKind::Null, true);
1005 
1006       EmitTypeCheck(isa<CXXConstructorDecl>(MD) ? TCK_ConstructorCall
1007                                                 : TCK_MemberCall,
1008                     Loc, CXXABIThisValue, ThisTy,
1009                     getContext().getTypeAlignInChars(ThisTy->getPointeeType()),
1010                     SkippedChecks);
1011     }
1012   }
1013 
1014   // If any of the arguments have a variably modified type, make sure to
1015   // emit the type size.
1016   for (FunctionArgList::const_iterator i = Args.begin(), e = Args.end();
1017        i != e; ++i) {
1018     const VarDecl *VD = *i;
1019 
1020     // Dig out the type as written from ParmVarDecls; it's unclear whether
1021     // the standard (C99 6.9.1p10) requires this, but we're following the
1022     // precedent set by gcc.
1023     QualType Ty;
1024     if (const ParmVarDecl *PVD = dyn_cast<ParmVarDecl>(VD))
1025       Ty = PVD->getOriginalType();
1026     else
1027       Ty = VD->getType();
1028 
1029     if (Ty->isVariablyModifiedType())
1030       EmitVariablyModifiedType(Ty);
1031   }
1032   // Emit a location at the end of the prologue.
1033   if (CGDebugInfo *DI = getDebugInfo())
1034     DI->EmitLocation(Builder, StartLoc);
1035 
1036   // TODO: Do we need to handle this in two places like we do with
1037   // target-features/target-cpu?
1038   if (CurFuncDecl)
1039     if (const auto *VecWidth = CurFuncDecl->getAttr<MinVectorWidthAttr>())
1040       LargestVectorWidth = VecWidth->getVectorWidth();
1041 }
1042 
1043 void CodeGenFunction::EmitFunctionBody(const Stmt *Body) {
1044   incrementProfileCounter(Body);
1045   if (const CompoundStmt *S = dyn_cast<CompoundStmt>(Body))
1046     EmitCompoundStmtWithoutScope(*S);
1047   else
1048     EmitStmt(Body);
1049 }
1050 
1051 /// When instrumenting to collect profile data, the counts for some blocks
1052 /// such as switch cases need to not include the fall-through counts, so
1053 /// emit a branch around the instrumentation code. When not instrumenting,
1054 /// this just calls EmitBlock().
1055 void CodeGenFunction::EmitBlockWithFallThrough(llvm::BasicBlock *BB,
1056                                                const Stmt *S) {
1057   llvm::BasicBlock *SkipCountBB = nullptr;
1058   if (HaveInsertPoint() && CGM.getCodeGenOpts().hasProfileClangInstr()) {
1059     // When instrumenting for profiling, the fallthrough to certain
1060     // statements needs to skip over the instrumentation code so that we
1061     // get an accurate count.
1062     SkipCountBB = createBasicBlock("skipcount");
1063     EmitBranch(SkipCountBB);
1064   }
1065   EmitBlock(BB);
1066   uint64_t CurrentCount = getCurrentProfileCount();
1067   incrementProfileCounter(S);
1068   setCurrentProfileCount(getCurrentProfileCount() + CurrentCount);
1069   if (SkipCountBB)
1070     EmitBlock(SkipCountBB);
1071 }
1072 
1073 /// Tries to mark the given function nounwind based on the
1074 /// non-existence of any throwing calls within it.  We believe this is
1075 /// lightweight enough to do at -O0.
1076 static void TryMarkNoThrow(llvm::Function *F) {
1077   // LLVM treats 'nounwind' on a function as part of the type, so we
1078   // can't do this on functions that can be overwritten.
1079   if (F->isInterposable()) return;
1080 
1081   for (llvm::BasicBlock &BB : *F)
1082     for (llvm::Instruction &I : BB)
1083       if (I.mayThrow())
1084         return;
1085 
1086   F->setDoesNotThrow();
1087 }
1088 
1089 QualType CodeGenFunction::BuildFunctionArgList(GlobalDecl GD,
1090                                                FunctionArgList &Args) {
1091   const FunctionDecl *FD = cast<FunctionDecl>(GD.getDecl());
1092   QualType ResTy = FD->getReturnType();
1093 
1094   const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(FD);
1095   if (MD && MD->isInstance()) {
1096     if (CGM.getCXXABI().HasThisReturn(GD))
1097       ResTy = MD->getThisType();
1098     else if (CGM.getCXXABI().hasMostDerivedReturn(GD))
1099       ResTy = CGM.getContext().VoidPtrTy;
1100     CGM.getCXXABI().buildThisParam(*this, Args);
1101   }
1102 
1103   // The base version of an inheriting constructor whose constructed base is a
1104   // virtual base is not passed any arguments (because it doesn't actually call
1105   // the inherited constructor).
1106   bool PassedParams = true;
1107   if (const CXXConstructorDecl *CD = dyn_cast<CXXConstructorDecl>(FD))
1108     if (auto Inherited = CD->getInheritedConstructor())
1109       PassedParams =
1110           getTypes().inheritingCtorHasParams(Inherited, GD.getCtorType());
1111 
1112   if (PassedParams) {
1113     for (auto *Param : FD->parameters()) {
1114       Args.push_back(Param);
1115       if (!Param->hasAttr<PassObjectSizeAttr>())
1116         continue;
1117 
1118       auto *Implicit = ImplicitParamDecl::Create(
1119           getContext(), Param->getDeclContext(), Param->getLocation(),
1120           /*Id=*/nullptr, getContext().getSizeType(), ImplicitParamDecl::Other);
1121       SizeArguments[Param] = Implicit;
1122       Args.push_back(Implicit);
1123     }
1124   }
1125 
1126   if (MD && (isa<CXXConstructorDecl>(MD) || isa<CXXDestructorDecl>(MD)))
1127     CGM.getCXXABI().addImplicitStructorParams(*this, ResTy, Args);
1128 
1129   return ResTy;
1130 }
1131 
1132 static bool
1133 shouldUseUndefinedBehaviorReturnOptimization(const FunctionDecl *FD,
1134                                              const ASTContext &Context) {
1135   QualType T = FD->getReturnType();
1136   // Avoid the optimization for functions that return a record type with a
1137   // trivial destructor or another trivially copyable type.
1138   if (const RecordType *RT = T.getCanonicalType()->getAs<RecordType>()) {
1139     if (const auto *ClassDecl = dyn_cast<CXXRecordDecl>(RT->getDecl()))
1140       return !ClassDecl->hasTrivialDestructor();
1141   }
1142   return !T.isTriviallyCopyableType(Context);
1143 }
1144 
1145 void CodeGenFunction::GenerateCode(GlobalDecl GD, llvm::Function *Fn,
1146                                    const CGFunctionInfo &FnInfo) {
1147   const FunctionDecl *FD = cast<FunctionDecl>(GD.getDecl());
1148   CurGD = GD;
1149 
1150   FunctionArgList Args;
1151   QualType ResTy = BuildFunctionArgList(GD, Args);
1152 
1153   // Check if we should generate debug info for this function.
1154   if (FD->hasAttr<NoDebugAttr>())
1155     DebugInfo = nullptr; // disable debug info indefinitely for this function
1156 
1157   // The function might not have a body if we're generating thunks for a
1158   // function declaration.
1159   SourceRange BodyRange;
1160   if (Stmt *Body = FD->getBody())
1161     BodyRange = Body->getSourceRange();
1162   else
1163     BodyRange = FD->getLocation();
1164   CurEHLocation = BodyRange.getEnd();
1165 
1166   // Use the location of the start of the function to determine where
1167   // the function definition is located. By default use the location
1168   // of the declaration as the location for the subprogram. A function
1169   // may lack a declaration in the source code if it is created by code
1170   // gen. (examples: _GLOBAL__I_a, __cxx_global_array_dtor, thunk).
1171   SourceLocation Loc = FD->getLocation();
1172 
1173   // If this is a function specialization then use the pattern body
1174   // as the location for the function.
1175   if (const FunctionDecl *SpecDecl = FD->getTemplateInstantiationPattern())
1176     if (SpecDecl->hasBody(SpecDecl))
1177       Loc = SpecDecl->getLocation();
1178 
1179   Stmt *Body = FD->getBody();
1180 
1181   // Initialize helper which will detect jumps which can cause invalid lifetime
1182   // markers.
1183   if (Body && ShouldEmitLifetimeMarkers)
1184     Bypasses.Init(Body);
1185 
1186   // Emit the standard function prologue.
1187   StartFunction(GD, ResTy, Fn, FnInfo, Args, Loc, BodyRange.getBegin());
1188 
1189   // Generate the body of the function.
1190   PGO.assignRegionCounters(GD, CurFn);
1191   if (isa<CXXDestructorDecl>(FD))
1192     EmitDestructorBody(Args);
1193   else if (isa<CXXConstructorDecl>(FD))
1194     EmitConstructorBody(Args);
1195   else if (getLangOpts().CUDA &&
1196            !getLangOpts().CUDAIsDevice &&
1197            FD->hasAttr<CUDAGlobalAttr>())
1198     CGM.getCUDARuntime().emitDeviceStub(*this, Args);
1199   else if (isa<CXXMethodDecl>(FD) &&
1200            cast<CXXMethodDecl>(FD)->isLambdaStaticInvoker()) {
1201     // The lambda static invoker function is special, because it forwards or
1202     // clones the body of the function call operator (but is actually static).
1203     EmitLambdaStaticInvokeBody(cast<CXXMethodDecl>(FD));
1204   } else if (FD->isDefaulted() && isa<CXXMethodDecl>(FD) &&
1205              (cast<CXXMethodDecl>(FD)->isCopyAssignmentOperator() ||
1206               cast<CXXMethodDecl>(FD)->isMoveAssignmentOperator())) {
1207     // Implicit copy-assignment gets the same special treatment as implicit
1208     // copy-constructors.
1209     emitImplicitAssignmentOperatorBody(Args);
1210   } else if (Body) {
1211     EmitFunctionBody(Body);
1212   } else
1213     llvm_unreachable("no definition for emitted function");
1214 
1215   // C++11 [stmt.return]p2:
1216   //   Flowing off the end of a function [...] results in undefined behavior in
1217   //   a value-returning function.
1218   // C11 6.9.1p12:
1219   //   If the '}' that terminates a function is reached, and the value of the
1220   //   function call is used by the caller, the behavior is undefined.
1221   if (getLangOpts().CPlusPlus && !FD->hasImplicitReturnZero() && !SawAsmBlock &&
1222       !FD->getReturnType()->isVoidType() && Builder.GetInsertBlock()) {
1223     bool ShouldEmitUnreachable =
1224         CGM.getCodeGenOpts().StrictReturn ||
1225         shouldUseUndefinedBehaviorReturnOptimization(FD, getContext());
1226     if (SanOpts.has(SanitizerKind::Return)) {
1227       SanitizerScope SanScope(this);
1228       llvm::Value *IsFalse = Builder.getFalse();
1229       EmitCheck(std::make_pair(IsFalse, SanitizerKind::Return),
1230                 SanitizerHandler::MissingReturn,
1231                 EmitCheckSourceLocation(FD->getLocation()), None);
1232     } else if (ShouldEmitUnreachable) {
1233       if (CGM.getCodeGenOpts().OptimizationLevel == 0)
1234         EmitTrapCall(llvm::Intrinsic::trap);
1235     }
1236     if (SanOpts.has(SanitizerKind::Return) || ShouldEmitUnreachable) {
1237       Builder.CreateUnreachable();
1238       Builder.ClearInsertionPoint();
1239     }
1240   }
1241 
1242   // Emit the standard function epilogue.
1243   FinishFunction(BodyRange.getEnd());
1244 
1245   // If we haven't marked the function nothrow through other means, do
1246   // a quick pass now to see if we can.
1247   if (!CurFn->doesNotThrow())
1248     TryMarkNoThrow(CurFn);
1249 }
1250 
1251 /// ContainsLabel - Return true if the statement contains a label in it.  If
1252 /// this statement is not executed normally, it not containing a label means
1253 /// that we can just remove the code.
1254 bool CodeGenFunction::ContainsLabel(const Stmt *S, bool IgnoreCaseStmts) {
1255   // Null statement, not a label!
1256   if (!S) return false;
1257 
1258   // If this is a label, we have to emit the code, consider something like:
1259   // if (0) {  ...  foo:  bar(); }  goto foo;
1260   //
1261   // TODO: If anyone cared, we could track __label__'s, since we know that you
1262   // can't jump to one from outside their declared region.
1263   if (isa<LabelStmt>(S))
1264     return true;
1265 
1266   // If this is a case/default statement, and we haven't seen a switch, we have
1267   // to emit the code.
1268   if (isa<SwitchCase>(S) && !IgnoreCaseStmts)
1269     return true;
1270 
1271   // If this is a switch statement, we want to ignore cases below it.
1272   if (isa<SwitchStmt>(S))
1273     IgnoreCaseStmts = true;
1274 
1275   // Scan subexpressions for verboten labels.
1276   for (const Stmt *SubStmt : S->children())
1277     if (ContainsLabel(SubStmt, IgnoreCaseStmts))
1278       return true;
1279 
1280   return false;
1281 }
1282 
1283 /// containsBreak - Return true if the statement contains a break out of it.
1284 /// If the statement (recursively) contains a switch or loop with a break
1285 /// inside of it, this is fine.
1286 bool CodeGenFunction::containsBreak(const Stmt *S) {
1287   // Null statement, not a label!
1288   if (!S) return false;
1289 
1290   // If this is a switch or loop that defines its own break scope, then we can
1291   // include it and anything inside of it.
1292   if (isa<SwitchStmt>(S) || isa<WhileStmt>(S) || isa<DoStmt>(S) ||
1293       isa<ForStmt>(S))
1294     return false;
1295 
1296   if (isa<BreakStmt>(S))
1297     return true;
1298 
1299   // Scan subexpressions for verboten breaks.
1300   for (const Stmt *SubStmt : S->children())
1301     if (containsBreak(SubStmt))
1302       return true;
1303 
1304   return false;
1305 }
1306 
1307 bool CodeGenFunction::mightAddDeclToScope(const Stmt *S) {
1308   if (!S) return false;
1309 
1310   // Some statement kinds add a scope and thus never add a decl to the current
1311   // scope. Note, this list is longer than the list of statements that might
1312   // have an unscoped decl nested within them, but this way is conservatively
1313   // correct even if more statement kinds are added.
1314   if (isa<IfStmt>(S) || isa<SwitchStmt>(S) || isa<WhileStmt>(S) ||
1315       isa<DoStmt>(S) || isa<ForStmt>(S) || isa<CompoundStmt>(S) ||
1316       isa<CXXForRangeStmt>(S) || isa<CXXTryStmt>(S) ||
1317       isa<ObjCForCollectionStmt>(S) || isa<ObjCAtTryStmt>(S))
1318     return false;
1319 
1320   if (isa<DeclStmt>(S))
1321     return true;
1322 
1323   for (const Stmt *SubStmt : S->children())
1324     if (mightAddDeclToScope(SubStmt))
1325       return true;
1326 
1327   return false;
1328 }
1329 
1330 /// ConstantFoldsToSimpleInteger - If the specified expression does not fold
1331 /// to a constant, or if it does but contains a label, return false.  If it
1332 /// constant folds return true and set the boolean result in Result.
1333 bool CodeGenFunction::ConstantFoldsToSimpleInteger(const Expr *Cond,
1334                                                    bool &ResultBool,
1335                                                    bool AllowLabels) {
1336   llvm::APSInt ResultInt;
1337   if (!ConstantFoldsToSimpleInteger(Cond, ResultInt, AllowLabels))
1338     return false;
1339 
1340   ResultBool = ResultInt.getBoolValue();
1341   return true;
1342 }
1343 
1344 /// ConstantFoldsToSimpleInteger - If the specified expression does not fold
1345 /// to a constant, or if it does but contains a label, return false.  If it
1346 /// constant folds return true and set the folded value.
1347 bool CodeGenFunction::ConstantFoldsToSimpleInteger(const Expr *Cond,
1348                                                    llvm::APSInt &ResultInt,
1349                                                    bool AllowLabels) {
1350   // FIXME: Rename and handle conversion of other evaluatable things
1351   // to bool.
1352   Expr::EvalResult Result;
1353   if (!Cond->EvaluateAsInt(Result, getContext()))
1354     return false;  // Not foldable, not integer or not fully evaluatable.
1355 
1356   llvm::APSInt Int = Result.Val.getInt();
1357   if (!AllowLabels && CodeGenFunction::ContainsLabel(Cond))
1358     return false;  // Contains a label.
1359 
1360   ResultInt = Int;
1361   return true;
1362 }
1363 
1364 
1365 
1366 /// EmitBranchOnBoolExpr - Emit a branch on a boolean condition (e.g. for an if
1367 /// statement) to the specified blocks.  Based on the condition, this might try
1368 /// to simplify the codegen of the conditional based on the branch.
1369 ///
1370 void CodeGenFunction::EmitBranchOnBoolExpr(const Expr *Cond,
1371                                            llvm::BasicBlock *TrueBlock,
1372                                            llvm::BasicBlock *FalseBlock,
1373                                            uint64_t TrueCount) {
1374   Cond = Cond->IgnoreParens();
1375 
1376   if (const BinaryOperator *CondBOp = dyn_cast<BinaryOperator>(Cond)) {
1377 
1378     // Handle X && Y in a condition.
1379     if (CondBOp->getOpcode() == BO_LAnd) {
1380       // If we have "1 && X", simplify the code.  "0 && X" would have constant
1381       // folded if the case was simple enough.
1382       bool ConstantBool = false;
1383       if (ConstantFoldsToSimpleInteger(CondBOp->getLHS(), ConstantBool) &&
1384           ConstantBool) {
1385         // br(1 && X) -> br(X).
1386         incrementProfileCounter(CondBOp);
1387         return EmitBranchOnBoolExpr(CondBOp->getRHS(), TrueBlock, FalseBlock,
1388                                     TrueCount);
1389       }
1390 
1391       // If we have "X && 1", simplify the code to use an uncond branch.
1392       // "X && 0" would have been constant folded to 0.
1393       if (ConstantFoldsToSimpleInteger(CondBOp->getRHS(), ConstantBool) &&
1394           ConstantBool) {
1395         // br(X && 1) -> br(X).
1396         return EmitBranchOnBoolExpr(CondBOp->getLHS(), TrueBlock, FalseBlock,
1397                                     TrueCount);
1398       }
1399 
1400       // Emit the LHS as a conditional.  If the LHS conditional is false, we
1401       // want to jump to the FalseBlock.
1402       llvm::BasicBlock *LHSTrue = createBasicBlock("land.lhs.true");
1403       // The counter tells us how often we evaluate RHS, and all of TrueCount
1404       // can be propagated to that branch.
1405       uint64_t RHSCount = getProfileCount(CondBOp->getRHS());
1406 
1407       ConditionalEvaluation eval(*this);
1408       {
1409         ApplyDebugLocation DL(*this, Cond);
1410         EmitBranchOnBoolExpr(CondBOp->getLHS(), LHSTrue, FalseBlock, RHSCount);
1411         EmitBlock(LHSTrue);
1412       }
1413 
1414       incrementProfileCounter(CondBOp);
1415       setCurrentProfileCount(getProfileCount(CondBOp->getRHS()));
1416 
1417       // Any temporaries created here are conditional.
1418       eval.begin(*this);
1419       EmitBranchOnBoolExpr(CondBOp->getRHS(), TrueBlock, FalseBlock, TrueCount);
1420       eval.end(*this);
1421 
1422       return;
1423     }
1424 
1425     if (CondBOp->getOpcode() == BO_LOr) {
1426       // If we have "0 || X", simplify the code.  "1 || X" would have constant
1427       // folded if the case was simple enough.
1428       bool ConstantBool = false;
1429       if (ConstantFoldsToSimpleInteger(CondBOp->getLHS(), ConstantBool) &&
1430           !ConstantBool) {
1431         // br(0 || X) -> br(X).
1432         incrementProfileCounter(CondBOp);
1433         return EmitBranchOnBoolExpr(CondBOp->getRHS(), TrueBlock, FalseBlock,
1434                                     TrueCount);
1435       }
1436 
1437       // If we have "X || 0", simplify the code to use an uncond branch.
1438       // "X || 1" would have been constant folded to 1.
1439       if (ConstantFoldsToSimpleInteger(CondBOp->getRHS(), ConstantBool) &&
1440           !ConstantBool) {
1441         // br(X || 0) -> br(X).
1442         return EmitBranchOnBoolExpr(CondBOp->getLHS(), TrueBlock, FalseBlock,
1443                                     TrueCount);
1444       }
1445 
1446       // Emit the LHS as a conditional.  If the LHS conditional is true, we
1447       // want to jump to the TrueBlock.
1448       llvm::BasicBlock *LHSFalse = createBasicBlock("lor.lhs.false");
1449       // We have the count for entry to the RHS and for the whole expression
1450       // being true, so we can divy up True count between the short circuit and
1451       // the RHS.
1452       uint64_t LHSCount =
1453           getCurrentProfileCount() - getProfileCount(CondBOp->getRHS());
1454       uint64_t RHSCount = TrueCount - LHSCount;
1455 
1456       ConditionalEvaluation eval(*this);
1457       {
1458         ApplyDebugLocation DL(*this, Cond);
1459         EmitBranchOnBoolExpr(CondBOp->getLHS(), TrueBlock, LHSFalse, LHSCount);
1460         EmitBlock(LHSFalse);
1461       }
1462 
1463       incrementProfileCounter(CondBOp);
1464       setCurrentProfileCount(getProfileCount(CondBOp->getRHS()));
1465 
1466       // Any temporaries created here are conditional.
1467       eval.begin(*this);
1468       EmitBranchOnBoolExpr(CondBOp->getRHS(), TrueBlock, FalseBlock, RHSCount);
1469 
1470       eval.end(*this);
1471 
1472       return;
1473     }
1474   }
1475 
1476   if (const UnaryOperator *CondUOp = dyn_cast<UnaryOperator>(Cond)) {
1477     // br(!x, t, f) -> br(x, f, t)
1478     if (CondUOp->getOpcode() == UO_LNot) {
1479       // Negate the count.
1480       uint64_t FalseCount = getCurrentProfileCount() - TrueCount;
1481       // Negate the condition and swap the destination blocks.
1482       return EmitBranchOnBoolExpr(CondUOp->getSubExpr(), FalseBlock, TrueBlock,
1483                                   FalseCount);
1484     }
1485   }
1486 
1487   if (const ConditionalOperator *CondOp = dyn_cast<ConditionalOperator>(Cond)) {
1488     // br(c ? x : y, t, f) -> br(c, br(x, t, f), br(y, t, f))
1489     llvm::BasicBlock *LHSBlock = createBasicBlock("cond.true");
1490     llvm::BasicBlock *RHSBlock = createBasicBlock("cond.false");
1491 
1492     ConditionalEvaluation cond(*this);
1493     EmitBranchOnBoolExpr(CondOp->getCond(), LHSBlock, RHSBlock,
1494                          getProfileCount(CondOp));
1495 
1496     // When computing PGO branch weights, we only know the overall count for
1497     // the true block. This code is essentially doing tail duplication of the
1498     // naive code-gen, introducing new edges for which counts are not
1499     // available. Divide the counts proportionally between the LHS and RHS of
1500     // the conditional operator.
1501     uint64_t LHSScaledTrueCount = 0;
1502     if (TrueCount) {
1503       double LHSRatio =
1504           getProfileCount(CondOp) / (double)getCurrentProfileCount();
1505       LHSScaledTrueCount = TrueCount * LHSRatio;
1506     }
1507 
1508     cond.begin(*this);
1509     EmitBlock(LHSBlock);
1510     incrementProfileCounter(CondOp);
1511     {
1512       ApplyDebugLocation DL(*this, Cond);
1513       EmitBranchOnBoolExpr(CondOp->getLHS(), TrueBlock, FalseBlock,
1514                            LHSScaledTrueCount);
1515     }
1516     cond.end(*this);
1517 
1518     cond.begin(*this);
1519     EmitBlock(RHSBlock);
1520     EmitBranchOnBoolExpr(CondOp->getRHS(), TrueBlock, FalseBlock,
1521                          TrueCount - LHSScaledTrueCount);
1522     cond.end(*this);
1523 
1524     return;
1525   }
1526 
1527   if (const CXXThrowExpr *Throw = dyn_cast<CXXThrowExpr>(Cond)) {
1528     // Conditional operator handling can give us a throw expression as a
1529     // condition for a case like:
1530     //   br(c ? throw x : y, t, f) -> br(c, br(throw x, t, f), br(y, t, f)
1531     // Fold this to:
1532     //   br(c, throw x, br(y, t, f))
1533     EmitCXXThrowExpr(Throw, /*KeepInsertionPoint*/false);
1534     return;
1535   }
1536 
1537   // If the branch has a condition wrapped by __builtin_unpredictable,
1538   // create metadata that specifies that the branch is unpredictable.
1539   // Don't bother if not optimizing because that metadata would not be used.
1540   llvm::MDNode *Unpredictable = nullptr;
1541   auto *Call = dyn_cast<CallExpr>(Cond->IgnoreImpCasts());
1542   if (Call && CGM.getCodeGenOpts().OptimizationLevel != 0) {
1543     auto *FD = dyn_cast_or_null<FunctionDecl>(Call->getCalleeDecl());
1544     if (FD && FD->getBuiltinID() == Builtin::BI__builtin_unpredictable) {
1545       llvm::MDBuilder MDHelper(getLLVMContext());
1546       Unpredictable = MDHelper.createUnpredictable();
1547     }
1548   }
1549 
1550   // Create branch weights based on the number of times we get here and the
1551   // number of times the condition should be true.
1552   uint64_t CurrentCount = std::max(getCurrentProfileCount(), TrueCount);
1553   llvm::MDNode *Weights =
1554       createProfileWeights(TrueCount, CurrentCount - TrueCount);
1555 
1556   // Emit the code with the fully general case.
1557   llvm::Value *CondV;
1558   {
1559     ApplyDebugLocation DL(*this, Cond);
1560     CondV = EvaluateExprAsBool(Cond);
1561   }
1562   Builder.CreateCondBr(CondV, TrueBlock, FalseBlock, Weights, Unpredictable);
1563 }
1564 
1565 /// ErrorUnsupported - Print out an error that codegen doesn't support the
1566 /// specified stmt yet.
1567 void CodeGenFunction::ErrorUnsupported(const Stmt *S, const char *Type) {
1568   CGM.ErrorUnsupported(S, Type);
1569 }
1570 
1571 /// emitNonZeroVLAInit - Emit the "zero" initialization of a
1572 /// variable-length array whose elements have a non-zero bit-pattern.
1573 ///
1574 /// \param baseType the inner-most element type of the array
1575 /// \param src - a char* pointing to the bit-pattern for a single
1576 /// base element of the array
1577 /// \param sizeInChars - the total size of the VLA, in chars
1578 static void emitNonZeroVLAInit(CodeGenFunction &CGF, QualType baseType,
1579                                Address dest, Address src,
1580                                llvm::Value *sizeInChars) {
1581   CGBuilderTy &Builder = CGF.Builder;
1582 
1583   CharUnits baseSize = CGF.getContext().getTypeSizeInChars(baseType);
1584   llvm::Value *baseSizeInChars
1585     = llvm::ConstantInt::get(CGF.IntPtrTy, baseSize.getQuantity());
1586 
1587   Address begin =
1588     Builder.CreateElementBitCast(dest, CGF.Int8Ty, "vla.begin");
1589   llvm::Value *end =
1590     Builder.CreateInBoundsGEP(begin.getPointer(), sizeInChars, "vla.end");
1591 
1592   llvm::BasicBlock *originBB = CGF.Builder.GetInsertBlock();
1593   llvm::BasicBlock *loopBB = CGF.createBasicBlock("vla-init.loop");
1594   llvm::BasicBlock *contBB = CGF.createBasicBlock("vla-init.cont");
1595 
1596   // Make a loop over the VLA.  C99 guarantees that the VLA element
1597   // count must be nonzero.
1598   CGF.EmitBlock(loopBB);
1599 
1600   llvm::PHINode *cur = Builder.CreatePHI(begin.getType(), 2, "vla.cur");
1601   cur->addIncoming(begin.getPointer(), originBB);
1602 
1603   CharUnits curAlign =
1604     dest.getAlignment().alignmentOfArrayElement(baseSize);
1605 
1606   // memcpy the individual element bit-pattern.
1607   Builder.CreateMemCpy(Address(cur, curAlign), src, baseSizeInChars,
1608                        /*volatile*/ false);
1609 
1610   // Go to the next element.
1611   llvm::Value *next =
1612     Builder.CreateInBoundsGEP(CGF.Int8Ty, cur, baseSizeInChars, "vla.next");
1613 
1614   // Leave if that's the end of the VLA.
1615   llvm::Value *done = Builder.CreateICmpEQ(next, end, "vla-init.isdone");
1616   Builder.CreateCondBr(done, contBB, loopBB);
1617   cur->addIncoming(next, loopBB);
1618 
1619   CGF.EmitBlock(contBB);
1620 }
1621 
1622 void
1623 CodeGenFunction::EmitNullInitialization(Address DestPtr, QualType Ty) {
1624   // Ignore empty classes in C++.
1625   if (getLangOpts().CPlusPlus) {
1626     if (const RecordType *RT = Ty->getAs<RecordType>()) {
1627       if (cast<CXXRecordDecl>(RT->getDecl())->isEmpty())
1628         return;
1629     }
1630   }
1631 
1632   // Cast the dest ptr to the appropriate i8 pointer type.
1633   if (DestPtr.getElementType() != Int8Ty)
1634     DestPtr = Builder.CreateElementBitCast(DestPtr, Int8Ty);
1635 
1636   // Get size and alignment info for this aggregate.
1637   CharUnits size = getContext().getTypeSizeInChars(Ty);
1638 
1639   llvm::Value *SizeVal;
1640   const VariableArrayType *vla;
1641 
1642   // Don't bother emitting a zero-byte memset.
1643   if (size.isZero()) {
1644     // But note that getTypeInfo returns 0 for a VLA.
1645     if (const VariableArrayType *vlaType =
1646           dyn_cast_or_null<VariableArrayType>(
1647                                           getContext().getAsArrayType(Ty))) {
1648       auto VlaSize = getVLASize(vlaType);
1649       SizeVal = VlaSize.NumElts;
1650       CharUnits eltSize = getContext().getTypeSizeInChars(VlaSize.Type);
1651       if (!eltSize.isOne())
1652         SizeVal = Builder.CreateNUWMul(SizeVal, CGM.getSize(eltSize));
1653       vla = vlaType;
1654     } else {
1655       return;
1656     }
1657   } else {
1658     SizeVal = CGM.getSize(size);
1659     vla = nullptr;
1660   }
1661 
1662   // If the type contains a pointer to data member we can't memset it to zero.
1663   // Instead, create a null constant and copy it to the destination.
1664   // TODO: there are other patterns besides zero that we can usefully memset,
1665   // like -1, which happens to be the pattern used by member-pointers.
1666   if (!CGM.getTypes().isZeroInitializable(Ty)) {
1667     // For a VLA, emit a single element, then splat that over the VLA.
1668     if (vla) Ty = getContext().getBaseElementType(vla);
1669 
1670     llvm::Constant *NullConstant = CGM.EmitNullConstant(Ty);
1671 
1672     llvm::GlobalVariable *NullVariable =
1673       new llvm::GlobalVariable(CGM.getModule(), NullConstant->getType(),
1674                                /*isConstant=*/true,
1675                                llvm::GlobalVariable::PrivateLinkage,
1676                                NullConstant, Twine());
1677     CharUnits NullAlign = DestPtr.getAlignment();
1678     NullVariable->setAlignment(NullAlign.getAsAlign());
1679     Address SrcPtr(Builder.CreateBitCast(NullVariable, Builder.getInt8PtrTy()),
1680                    NullAlign);
1681 
1682     if (vla) return emitNonZeroVLAInit(*this, Ty, DestPtr, SrcPtr, SizeVal);
1683 
1684     // Get and call the appropriate llvm.memcpy overload.
1685     Builder.CreateMemCpy(DestPtr, SrcPtr, SizeVal, false);
1686     return;
1687   }
1688 
1689   // Otherwise, just memset the whole thing to zero.  This is legal
1690   // because in LLVM, all default initializers (other than the ones we just
1691   // handled above) are guaranteed to have a bit pattern of all zeros.
1692   Builder.CreateMemSet(DestPtr, Builder.getInt8(0), SizeVal, false);
1693 }
1694 
1695 llvm::BlockAddress *CodeGenFunction::GetAddrOfLabel(const LabelDecl *L) {
1696   // Make sure that there is a block for the indirect goto.
1697   if (!IndirectBranch)
1698     GetIndirectGotoBlock();
1699 
1700   llvm::BasicBlock *BB = getJumpDestForLabel(L).getBlock();
1701 
1702   // Make sure the indirect branch includes all of the address-taken blocks.
1703   IndirectBranch->addDestination(BB);
1704   return llvm::BlockAddress::get(CurFn, BB);
1705 }
1706 
1707 llvm::BasicBlock *CodeGenFunction::GetIndirectGotoBlock() {
1708   // If we already made the indirect branch for indirect goto, return its block.
1709   if (IndirectBranch) return IndirectBranch->getParent();
1710 
1711   CGBuilderTy TmpBuilder(*this, createBasicBlock("indirectgoto"));
1712 
1713   // Create the PHI node that indirect gotos will add entries to.
1714   llvm::Value *DestVal = TmpBuilder.CreatePHI(Int8PtrTy, 0,
1715                                               "indirect.goto.dest");
1716 
1717   // Create the indirect branch instruction.
1718   IndirectBranch = TmpBuilder.CreateIndirectBr(DestVal);
1719   return IndirectBranch->getParent();
1720 }
1721 
1722 /// Computes the length of an array in elements, as well as the base
1723 /// element type and a properly-typed first element pointer.
1724 llvm::Value *CodeGenFunction::emitArrayLength(const ArrayType *origArrayType,
1725                                               QualType &baseType,
1726                                               Address &addr) {
1727   const ArrayType *arrayType = origArrayType;
1728 
1729   // If it's a VLA, we have to load the stored size.  Note that
1730   // this is the size of the VLA in bytes, not its size in elements.
1731   llvm::Value *numVLAElements = nullptr;
1732   if (isa<VariableArrayType>(arrayType)) {
1733     numVLAElements = getVLASize(cast<VariableArrayType>(arrayType)).NumElts;
1734 
1735     // Walk into all VLAs.  This doesn't require changes to addr,
1736     // which has type T* where T is the first non-VLA element type.
1737     do {
1738       QualType elementType = arrayType->getElementType();
1739       arrayType = getContext().getAsArrayType(elementType);
1740 
1741       // If we only have VLA components, 'addr' requires no adjustment.
1742       if (!arrayType) {
1743         baseType = elementType;
1744         return numVLAElements;
1745       }
1746     } while (isa<VariableArrayType>(arrayType));
1747 
1748     // We get out here only if we find a constant array type
1749     // inside the VLA.
1750   }
1751 
1752   // We have some number of constant-length arrays, so addr should
1753   // have LLVM type [M x [N x [...]]]*.  Build a GEP that walks
1754   // down to the first element of addr.
1755   SmallVector<llvm::Value*, 8> gepIndices;
1756 
1757   // GEP down to the array type.
1758   llvm::ConstantInt *zero = Builder.getInt32(0);
1759   gepIndices.push_back(zero);
1760 
1761   uint64_t countFromCLAs = 1;
1762   QualType eltType;
1763 
1764   llvm::ArrayType *llvmArrayType =
1765     dyn_cast<llvm::ArrayType>(addr.getElementType());
1766   while (llvmArrayType) {
1767     assert(isa<ConstantArrayType>(arrayType));
1768     assert(cast<ConstantArrayType>(arrayType)->getSize().getZExtValue()
1769              == llvmArrayType->getNumElements());
1770 
1771     gepIndices.push_back(zero);
1772     countFromCLAs *= llvmArrayType->getNumElements();
1773     eltType = arrayType->getElementType();
1774 
1775     llvmArrayType =
1776       dyn_cast<llvm::ArrayType>(llvmArrayType->getElementType());
1777     arrayType = getContext().getAsArrayType(arrayType->getElementType());
1778     assert((!llvmArrayType || arrayType) &&
1779            "LLVM and Clang types are out-of-synch");
1780   }
1781 
1782   if (arrayType) {
1783     // From this point onwards, the Clang array type has been emitted
1784     // as some other type (probably a packed struct). Compute the array
1785     // size, and just emit the 'begin' expression as a bitcast.
1786     while (arrayType) {
1787       countFromCLAs *=
1788           cast<ConstantArrayType>(arrayType)->getSize().getZExtValue();
1789       eltType = arrayType->getElementType();
1790       arrayType = getContext().getAsArrayType(eltType);
1791     }
1792 
1793     llvm::Type *baseType = ConvertType(eltType);
1794     addr = Builder.CreateElementBitCast(addr, baseType, "array.begin");
1795   } else {
1796     // Create the actual GEP.
1797     addr = Address(Builder.CreateInBoundsGEP(addr.getPointer(),
1798                                              gepIndices, "array.begin"),
1799                    addr.getAlignment());
1800   }
1801 
1802   baseType = eltType;
1803 
1804   llvm::Value *numElements
1805     = llvm::ConstantInt::get(SizeTy, countFromCLAs);
1806 
1807   // If we had any VLA dimensions, factor them in.
1808   if (numVLAElements)
1809     numElements = Builder.CreateNUWMul(numVLAElements, numElements);
1810 
1811   return numElements;
1812 }
1813 
1814 CodeGenFunction::VlaSizePair CodeGenFunction::getVLASize(QualType type) {
1815   const VariableArrayType *vla = getContext().getAsVariableArrayType(type);
1816   assert(vla && "type was not a variable array type!");
1817   return getVLASize(vla);
1818 }
1819 
1820 CodeGenFunction::VlaSizePair
1821 CodeGenFunction::getVLASize(const VariableArrayType *type) {
1822   // The number of elements so far; always size_t.
1823   llvm::Value *numElements = nullptr;
1824 
1825   QualType elementType;
1826   do {
1827     elementType = type->getElementType();
1828     llvm::Value *vlaSize = VLASizeMap[type->getSizeExpr()];
1829     assert(vlaSize && "no size for VLA!");
1830     assert(vlaSize->getType() == SizeTy);
1831 
1832     if (!numElements) {
1833       numElements = vlaSize;
1834     } else {
1835       // It's undefined behavior if this wraps around, so mark it that way.
1836       // FIXME: Teach -fsanitize=undefined to trap this.
1837       numElements = Builder.CreateNUWMul(numElements, vlaSize);
1838     }
1839   } while ((type = getContext().getAsVariableArrayType(elementType)));
1840 
1841   return { numElements, elementType };
1842 }
1843 
1844 CodeGenFunction::VlaSizePair
1845 CodeGenFunction::getVLAElements1D(QualType type) {
1846   const VariableArrayType *vla = getContext().getAsVariableArrayType(type);
1847   assert(vla && "type was not a variable array type!");
1848   return getVLAElements1D(vla);
1849 }
1850 
1851 CodeGenFunction::VlaSizePair
1852 CodeGenFunction::getVLAElements1D(const VariableArrayType *Vla) {
1853   llvm::Value *VlaSize = VLASizeMap[Vla->getSizeExpr()];
1854   assert(VlaSize && "no size for VLA!");
1855   assert(VlaSize->getType() == SizeTy);
1856   return { VlaSize, Vla->getElementType() };
1857 }
1858 
1859 void CodeGenFunction::EmitVariablyModifiedType(QualType type) {
1860   assert(type->isVariablyModifiedType() &&
1861          "Must pass variably modified type to EmitVLASizes!");
1862 
1863   EnsureInsertPoint();
1864 
1865   // We're going to walk down into the type and look for VLA
1866   // expressions.
1867   do {
1868     assert(type->isVariablyModifiedType());
1869 
1870     const Type *ty = type.getTypePtr();
1871     switch (ty->getTypeClass()) {
1872 
1873 #define TYPE(Class, Base)
1874 #define ABSTRACT_TYPE(Class, Base)
1875 #define NON_CANONICAL_TYPE(Class, Base)
1876 #define DEPENDENT_TYPE(Class, Base) case Type::Class:
1877 #define NON_CANONICAL_UNLESS_DEPENDENT_TYPE(Class, Base)
1878 #include "clang/AST/TypeNodes.inc"
1879       llvm_unreachable("unexpected dependent type!");
1880 
1881     // These types are never variably-modified.
1882     case Type::Builtin:
1883     case Type::Complex:
1884     case Type::Vector:
1885     case Type::ExtVector:
1886     case Type::Record:
1887     case Type::Enum:
1888     case Type::Elaborated:
1889     case Type::TemplateSpecialization:
1890     case Type::ObjCTypeParam:
1891     case Type::ObjCObject:
1892     case Type::ObjCInterface:
1893     case Type::ObjCObjectPointer:
1894       llvm_unreachable("type class is never variably-modified!");
1895 
1896     case Type::Adjusted:
1897       type = cast<AdjustedType>(ty)->getAdjustedType();
1898       break;
1899 
1900     case Type::Decayed:
1901       type = cast<DecayedType>(ty)->getPointeeType();
1902       break;
1903 
1904     case Type::Pointer:
1905       type = cast<PointerType>(ty)->getPointeeType();
1906       break;
1907 
1908     case Type::BlockPointer:
1909       type = cast<BlockPointerType>(ty)->getPointeeType();
1910       break;
1911 
1912     case Type::LValueReference:
1913     case Type::RValueReference:
1914       type = cast<ReferenceType>(ty)->getPointeeType();
1915       break;
1916 
1917     case Type::MemberPointer:
1918       type = cast<MemberPointerType>(ty)->getPointeeType();
1919       break;
1920 
1921     case Type::ConstantArray:
1922     case Type::IncompleteArray:
1923       // Losing element qualification here is fine.
1924       type = cast<ArrayType>(ty)->getElementType();
1925       break;
1926 
1927     case Type::VariableArray: {
1928       // Losing element qualification here is fine.
1929       const VariableArrayType *vat = cast<VariableArrayType>(ty);
1930 
1931       // Unknown size indication requires no size computation.
1932       // Otherwise, evaluate and record it.
1933       if (const Expr *size = vat->getSizeExpr()) {
1934         // It's possible that we might have emitted this already,
1935         // e.g. with a typedef and a pointer to it.
1936         llvm::Value *&entry = VLASizeMap[size];
1937         if (!entry) {
1938           llvm::Value *Size = EmitScalarExpr(size);
1939 
1940           // C11 6.7.6.2p5:
1941           //   If the size is an expression that is not an integer constant
1942           //   expression [...] each time it is evaluated it shall have a value
1943           //   greater than zero.
1944           if (SanOpts.has(SanitizerKind::VLABound) &&
1945               size->getType()->isSignedIntegerType()) {
1946             SanitizerScope SanScope(this);
1947             llvm::Value *Zero = llvm::Constant::getNullValue(Size->getType());
1948             llvm::Constant *StaticArgs[] = {
1949                 EmitCheckSourceLocation(size->getBeginLoc()),
1950                 EmitCheckTypeDescriptor(size->getType())};
1951             EmitCheck(std::make_pair(Builder.CreateICmpSGT(Size, Zero),
1952                                      SanitizerKind::VLABound),
1953                       SanitizerHandler::VLABoundNotPositive, StaticArgs, Size);
1954           }
1955 
1956           // Always zexting here would be wrong if it weren't
1957           // undefined behavior to have a negative bound.
1958           entry = Builder.CreateIntCast(Size, SizeTy, /*signed*/ false);
1959         }
1960       }
1961       type = vat->getElementType();
1962       break;
1963     }
1964 
1965     case Type::FunctionProto:
1966     case Type::FunctionNoProto:
1967       type = cast<FunctionType>(ty)->getReturnType();
1968       break;
1969 
1970     case Type::Paren:
1971     case Type::TypeOf:
1972     case Type::UnaryTransform:
1973     case Type::Attributed:
1974     case Type::SubstTemplateTypeParm:
1975     case Type::PackExpansion:
1976     case Type::MacroQualified:
1977       // Keep walking after single level desugaring.
1978       type = type.getSingleStepDesugaredType(getContext());
1979       break;
1980 
1981     case Type::Typedef:
1982     case Type::Decltype:
1983     case Type::Auto:
1984     case Type::DeducedTemplateSpecialization:
1985       // Stop walking: nothing to do.
1986       return;
1987 
1988     case Type::TypeOfExpr:
1989       // Stop walking: emit typeof expression.
1990       EmitIgnoredExpr(cast<TypeOfExprType>(ty)->getUnderlyingExpr());
1991       return;
1992 
1993     case Type::Atomic:
1994       type = cast<AtomicType>(ty)->getValueType();
1995       break;
1996 
1997     case Type::Pipe:
1998       type = cast<PipeType>(ty)->getElementType();
1999       break;
2000     }
2001   } while (type->isVariablyModifiedType());
2002 }
2003 
2004 Address CodeGenFunction::EmitVAListRef(const Expr* E) {
2005   if (getContext().getBuiltinVaListType()->isArrayType())
2006     return EmitPointerWithAlignment(E);
2007   return EmitLValue(E).getAddress();
2008 }
2009 
2010 Address CodeGenFunction::EmitMSVAListRef(const Expr *E) {
2011   return EmitLValue(E).getAddress();
2012 }
2013 
2014 void CodeGenFunction::EmitDeclRefExprDbgValue(const DeclRefExpr *E,
2015                                               const APValue &Init) {
2016   assert(Init.hasValue() && "Invalid DeclRefExpr initializer!");
2017   if (CGDebugInfo *Dbg = getDebugInfo())
2018     if (CGM.getCodeGenOpts().getDebugInfo() >= codegenoptions::LimitedDebugInfo)
2019       Dbg->EmitGlobalVariable(E->getDecl(), Init);
2020 }
2021 
2022 CodeGenFunction::PeepholeProtection
2023 CodeGenFunction::protectFromPeepholes(RValue rvalue) {
2024   // At the moment, the only aggressive peephole we do in IR gen
2025   // is trunc(zext) folding, but if we add more, we can easily
2026   // extend this protection.
2027 
2028   if (!rvalue.isScalar()) return PeepholeProtection();
2029   llvm::Value *value = rvalue.getScalarVal();
2030   if (!isa<llvm::ZExtInst>(value)) return PeepholeProtection();
2031 
2032   // Just make an extra bitcast.
2033   assert(HaveInsertPoint());
2034   llvm::Instruction *inst = new llvm::BitCastInst(value, value->getType(), "",
2035                                                   Builder.GetInsertBlock());
2036 
2037   PeepholeProtection protection;
2038   protection.Inst = inst;
2039   return protection;
2040 }
2041 
2042 void CodeGenFunction::unprotectFromPeepholes(PeepholeProtection protection) {
2043   if (!protection.Inst) return;
2044 
2045   // In theory, we could try to duplicate the peepholes now, but whatever.
2046   protection.Inst->eraseFromParent();
2047 }
2048 
2049 void CodeGenFunction::EmitAlignmentAssumption(llvm::Value *PtrValue,
2050                                               QualType Ty, SourceLocation Loc,
2051                                               SourceLocation AssumptionLoc,
2052                                               llvm::Value *Alignment,
2053                                               llvm::Value *OffsetValue) {
2054   llvm::Value *TheCheck;
2055   llvm::Instruction *Assumption = Builder.CreateAlignmentAssumption(
2056       CGM.getDataLayout(), PtrValue, Alignment, OffsetValue, &TheCheck);
2057   if (SanOpts.has(SanitizerKind::Alignment)) {
2058     EmitAlignmentAssumptionCheck(PtrValue, Ty, Loc, AssumptionLoc, Alignment,
2059                                  OffsetValue, TheCheck, Assumption);
2060   }
2061 }
2062 
2063 void CodeGenFunction::EmitAlignmentAssumption(llvm::Value *PtrValue,
2064                                               const Expr *E,
2065                                               SourceLocation AssumptionLoc,
2066                                               llvm::Value *Alignment,
2067                                               llvm::Value *OffsetValue) {
2068   if (auto *CE = dyn_cast<CastExpr>(E))
2069     E = CE->getSubExprAsWritten();
2070   QualType Ty = E->getType();
2071   SourceLocation Loc = E->getExprLoc();
2072 
2073   EmitAlignmentAssumption(PtrValue, Ty, Loc, AssumptionLoc, Alignment,
2074                           OffsetValue);
2075 }
2076 
2077 llvm::Value *CodeGenFunction::EmitAnnotationCall(llvm::Function *AnnotationFn,
2078                                                  llvm::Value *AnnotatedVal,
2079                                                  StringRef AnnotationStr,
2080                                                  SourceLocation Location) {
2081   llvm::Value *Args[4] = {
2082     AnnotatedVal,
2083     Builder.CreateBitCast(CGM.EmitAnnotationString(AnnotationStr), Int8PtrTy),
2084     Builder.CreateBitCast(CGM.EmitAnnotationUnit(Location), Int8PtrTy),
2085     CGM.EmitAnnotationLineNo(Location)
2086   };
2087   return Builder.CreateCall(AnnotationFn, Args);
2088 }
2089 
2090 void CodeGenFunction::EmitVarAnnotations(const VarDecl *D, llvm::Value *V) {
2091   assert(D->hasAttr<AnnotateAttr>() && "no annotate attribute");
2092   // FIXME We create a new bitcast for every annotation because that's what
2093   // llvm-gcc was doing.
2094   for (const auto *I : D->specific_attrs<AnnotateAttr>())
2095     EmitAnnotationCall(CGM.getIntrinsic(llvm::Intrinsic::var_annotation),
2096                        Builder.CreateBitCast(V, CGM.Int8PtrTy, V->getName()),
2097                        I->getAnnotation(), D->getLocation());
2098 }
2099 
2100 Address CodeGenFunction::EmitFieldAnnotations(const FieldDecl *D,
2101                                               Address Addr) {
2102   assert(D->hasAttr<AnnotateAttr>() && "no annotate attribute");
2103   llvm::Value *V = Addr.getPointer();
2104   llvm::Type *VTy = V->getType();
2105   llvm::Function *F = CGM.getIntrinsic(llvm::Intrinsic::ptr_annotation,
2106                                     CGM.Int8PtrTy);
2107 
2108   for (const auto *I : D->specific_attrs<AnnotateAttr>()) {
2109     // FIXME Always emit the cast inst so we can differentiate between
2110     // annotation on the first field of a struct and annotation on the struct
2111     // itself.
2112     if (VTy != CGM.Int8PtrTy)
2113       V = Builder.CreateBitCast(V, CGM.Int8PtrTy);
2114     V = EmitAnnotationCall(F, V, I->getAnnotation(), D->getLocation());
2115     V = Builder.CreateBitCast(V, VTy);
2116   }
2117 
2118   return Address(V, Addr.getAlignment());
2119 }
2120 
2121 CodeGenFunction::CGCapturedStmtInfo::~CGCapturedStmtInfo() { }
2122 
2123 CodeGenFunction::SanitizerScope::SanitizerScope(CodeGenFunction *CGF)
2124     : CGF(CGF) {
2125   assert(!CGF->IsSanitizerScope);
2126   CGF->IsSanitizerScope = true;
2127 }
2128 
2129 CodeGenFunction::SanitizerScope::~SanitizerScope() {
2130   CGF->IsSanitizerScope = false;
2131 }
2132 
2133 void CodeGenFunction::InsertHelper(llvm::Instruction *I,
2134                                    const llvm::Twine &Name,
2135                                    llvm::BasicBlock *BB,
2136                                    llvm::BasicBlock::iterator InsertPt) const {
2137   LoopStack.InsertHelper(I);
2138   if (IsSanitizerScope)
2139     CGM.getSanitizerMetadata()->disableSanitizerForInstruction(I);
2140 }
2141 
2142 void CGBuilderInserter::InsertHelper(
2143     llvm::Instruction *I, const llvm::Twine &Name, llvm::BasicBlock *BB,
2144     llvm::BasicBlock::iterator InsertPt) const {
2145   llvm::IRBuilderDefaultInserter::InsertHelper(I, Name, BB, InsertPt);
2146   if (CGF)
2147     CGF->InsertHelper(I, Name, BB, InsertPt);
2148 }
2149 
2150 static bool hasRequiredFeatures(const SmallVectorImpl<StringRef> &ReqFeatures,
2151                                 CodeGenModule &CGM, const FunctionDecl *FD,
2152                                 std::string &FirstMissing) {
2153   // If there aren't any required features listed then go ahead and return.
2154   if (ReqFeatures.empty())
2155     return false;
2156 
2157   // Now build up the set of caller features and verify that all the required
2158   // features are there.
2159   llvm::StringMap<bool> CallerFeatureMap;
2160   CGM.getFunctionFeatureMap(CallerFeatureMap, GlobalDecl().getWithDecl(FD));
2161 
2162   // If we have at least one of the features in the feature list return
2163   // true, otherwise return false.
2164   return std::all_of(
2165       ReqFeatures.begin(), ReqFeatures.end(), [&](StringRef Feature) {
2166         SmallVector<StringRef, 1> OrFeatures;
2167         Feature.split(OrFeatures, '|');
2168         return llvm::any_of(OrFeatures, [&](StringRef Feature) {
2169           if (!CallerFeatureMap.lookup(Feature)) {
2170             FirstMissing = Feature.str();
2171             return false;
2172           }
2173           return true;
2174         });
2175       });
2176 }
2177 
2178 // Emits an error if we don't have a valid set of target features for the
2179 // called function.
2180 void CodeGenFunction::checkTargetFeatures(const CallExpr *E,
2181                                           const FunctionDecl *TargetDecl) {
2182   return checkTargetFeatures(E->getBeginLoc(), TargetDecl);
2183 }
2184 
2185 // Emits an error if we don't have a valid set of target features for the
2186 // called function.
2187 void CodeGenFunction::checkTargetFeatures(SourceLocation Loc,
2188                                           const FunctionDecl *TargetDecl) {
2189   // Early exit if this is an indirect call.
2190   if (!TargetDecl)
2191     return;
2192 
2193   // Get the current enclosing function if it exists. If it doesn't
2194   // we can't check the target features anyhow.
2195   const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(CurCodeDecl);
2196   if (!FD)
2197     return;
2198 
2199   // Grab the required features for the call. For a builtin this is listed in
2200   // the td file with the default cpu, for an always_inline function this is any
2201   // listed cpu and any listed features.
2202   unsigned BuiltinID = TargetDecl->getBuiltinID();
2203   std::string MissingFeature;
2204   if (BuiltinID) {
2205     SmallVector<StringRef, 1> ReqFeatures;
2206     const char *FeatureList =
2207         CGM.getContext().BuiltinInfo.getRequiredFeatures(BuiltinID);
2208     // Return if the builtin doesn't have any required features.
2209     if (!FeatureList || StringRef(FeatureList) == "")
2210       return;
2211     StringRef(FeatureList).split(ReqFeatures, ',');
2212     if (!hasRequiredFeatures(ReqFeatures, CGM, FD, MissingFeature))
2213       CGM.getDiags().Report(Loc, diag::err_builtin_needs_feature)
2214           << TargetDecl->getDeclName()
2215           << CGM.getContext().BuiltinInfo.getRequiredFeatures(BuiltinID);
2216 
2217   } else if (TargetDecl->hasAttr<TargetAttr>() ||
2218              TargetDecl->hasAttr<CPUSpecificAttr>()) {
2219     // Get the required features for the callee.
2220 
2221     const TargetAttr *TD = TargetDecl->getAttr<TargetAttr>();
2222     TargetAttr::ParsedTargetAttr ParsedAttr = CGM.filterFunctionTargetAttrs(TD);
2223 
2224     SmallVector<StringRef, 1> ReqFeatures;
2225     llvm::StringMap<bool> CalleeFeatureMap;
2226     CGM.getFunctionFeatureMap(CalleeFeatureMap, TargetDecl);
2227 
2228     for (const auto &F : ParsedAttr.Features) {
2229       if (F[0] == '+' && CalleeFeatureMap.lookup(F.substr(1)))
2230         ReqFeatures.push_back(StringRef(F).substr(1));
2231     }
2232 
2233     for (const auto &F : CalleeFeatureMap) {
2234       // Only positive features are "required".
2235       if (F.getValue())
2236         ReqFeatures.push_back(F.getKey());
2237     }
2238     if (!hasRequiredFeatures(ReqFeatures, CGM, FD, MissingFeature))
2239       CGM.getDiags().Report(Loc, diag::err_function_needs_feature)
2240           << FD->getDeclName() << TargetDecl->getDeclName() << MissingFeature;
2241   }
2242 }
2243 
2244 void CodeGenFunction::EmitSanitizerStatReport(llvm::SanitizerStatKind SSK) {
2245   if (!CGM.getCodeGenOpts().SanitizeStats)
2246     return;
2247 
2248   llvm::IRBuilder<> IRB(Builder.GetInsertBlock(), Builder.GetInsertPoint());
2249   IRB.SetCurrentDebugLocation(Builder.getCurrentDebugLocation());
2250   CGM.getSanStats().create(IRB, SSK);
2251 }
2252 
2253 llvm::Value *
2254 CodeGenFunction::FormResolverCondition(const MultiVersionResolverOption &RO) {
2255   llvm::Value *Condition = nullptr;
2256 
2257   if (!RO.Conditions.Architecture.empty())
2258     Condition = EmitX86CpuIs(RO.Conditions.Architecture);
2259 
2260   if (!RO.Conditions.Features.empty()) {
2261     llvm::Value *FeatureCond = EmitX86CpuSupports(RO.Conditions.Features);
2262     Condition =
2263         Condition ? Builder.CreateAnd(Condition, FeatureCond) : FeatureCond;
2264   }
2265   return Condition;
2266 }
2267 
2268 static void CreateMultiVersionResolverReturn(CodeGenModule &CGM,
2269                                              llvm::Function *Resolver,
2270                                              CGBuilderTy &Builder,
2271                                              llvm::Function *FuncToReturn,
2272                                              bool SupportsIFunc) {
2273   if (SupportsIFunc) {
2274     Builder.CreateRet(FuncToReturn);
2275     return;
2276   }
2277 
2278   llvm::SmallVector<llvm::Value *, 10> Args;
2279   llvm::for_each(Resolver->args(),
2280                  [&](llvm::Argument &Arg) { Args.push_back(&Arg); });
2281 
2282   llvm::CallInst *Result = Builder.CreateCall(FuncToReturn, Args);
2283   Result->setTailCallKind(llvm::CallInst::TCK_MustTail);
2284 
2285   if (Resolver->getReturnType()->isVoidTy())
2286     Builder.CreateRetVoid();
2287   else
2288     Builder.CreateRet(Result);
2289 }
2290 
2291 void CodeGenFunction::EmitMultiVersionResolver(
2292     llvm::Function *Resolver, ArrayRef<MultiVersionResolverOption> Options) {
2293   assert((getContext().getTargetInfo().getTriple().getArch() ==
2294               llvm::Triple::x86 ||
2295           getContext().getTargetInfo().getTriple().getArch() ==
2296               llvm::Triple::x86_64) &&
2297          "Only implemented for x86 targets");
2298 
2299   bool SupportsIFunc = getContext().getTargetInfo().supportsIFunc();
2300 
2301   // Main function's basic block.
2302   llvm::BasicBlock *CurBlock = createBasicBlock("resolver_entry", Resolver);
2303   Builder.SetInsertPoint(CurBlock);
2304   EmitX86CpuInit();
2305 
2306   for (const MultiVersionResolverOption &RO : Options) {
2307     Builder.SetInsertPoint(CurBlock);
2308     llvm::Value *Condition = FormResolverCondition(RO);
2309 
2310     // The 'default' or 'generic' case.
2311     if (!Condition) {
2312       assert(&RO == Options.end() - 1 &&
2313              "Default or Generic case must be last");
2314       CreateMultiVersionResolverReturn(CGM, Resolver, Builder, RO.Function,
2315                                        SupportsIFunc);
2316       return;
2317     }
2318 
2319     llvm::BasicBlock *RetBlock = createBasicBlock("resolver_return", Resolver);
2320     CGBuilderTy RetBuilder(*this, RetBlock);
2321     CreateMultiVersionResolverReturn(CGM, Resolver, RetBuilder, RO.Function,
2322                                      SupportsIFunc);
2323     CurBlock = createBasicBlock("resolver_else", Resolver);
2324     Builder.CreateCondBr(Condition, RetBlock, CurBlock);
2325   }
2326 
2327   // If no generic/default, emit an unreachable.
2328   Builder.SetInsertPoint(CurBlock);
2329   llvm::CallInst *TrapCall = EmitTrapCall(llvm::Intrinsic::trap);
2330   TrapCall->setDoesNotReturn();
2331   TrapCall->setDoesNotThrow();
2332   Builder.CreateUnreachable();
2333   Builder.ClearInsertionPoint();
2334 }
2335 
2336 // Loc - where the diagnostic will point, where in the source code this
2337 //  alignment has failed.
2338 // SecondaryLoc - if present (will be present if sufficiently different from
2339 //  Loc), the diagnostic will additionally point a "Note:" to this location.
2340 //  It should be the location where the __attribute__((assume_aligned))
2341 //  was written e.g.
2342 void CodeGenFunction::EmitAlignmentAssumptionCheck(
2343     llvm::Value *Ptr, QualType Ty, SourceLocation Loc,
2344     SourceLocation SecondaryLoc, llvm::Value *Alignment,
2345     llvm::Value *OffsetValue, llvm::Value *TheCheck,
2346     llvm::Instruction *Assumption) {
2347   assert(Assumption && isa<llvm::CallInst>(Assumption) &&
2348          cast<llvm::CallInst>(Assumption)->getCalledValue() ==
2349              llvm::Intrinsic::getDeclaration(
2350                  Builder.GetInsertBlock()->getParent()->getParent(),
2351                  llvm::Intrinsic::assume) &&
2352          "Assumption should be a call to llvm.assume().");
2353   assert(&(Builder.GetInsertBlock()->back()) == Assumption &&
2354          "Assumption should be the last instruction of the basic block, "
2355          "since the basic block is still being generated.");
2356 
2357   if (!SanOpts.has(SanitizerKind::Alignment))
2358     return;
2359 
2360   // Don't check pointers to volatile data. The behavior here is implementation-
2361   // defined.
2362   if (Ty->getPointeeType().isVolatileQualified())
2363     return;
2364 
2365   // We need to temorairly remove the assumption so we can insert the
2366   // sanitizer check before it, else the check will be dropped by optimizations.
2367   Assumption->removeFromParent();
2368 
2369   {
2370     SanitizerScope SanScope(this);
2371 
2372     if (!OffsetValue)
2373       OffsetValue = Builder.getInt1(0); // no offset.
2374 
2375     llvm::Constant *StaticData[] = {EmitCheckSourceLocation(Loc),
2376                                     EmitCheckSourceLocation(SecondaryLoc),
2377                                     EmitCheckTypeDescriptor(Ty)};
2378     llvm::Value *DynamicData[] = {EmitCheckValue(Ptr),
2379                                   EmitCheckValue(Alignment),
2380                                   EmitCheckValue(OffsetValue)};
2381     EmitCheck({std::make_pair(TheCheck, SanitizerKind::Alignment)},
2382               SanitizerHandler::AlignmentAssumption, StaticData, DynamicData);
2383   }
2384 
2385   // We are now in the (new, empty) "cont" basic block.
2386   // Reintroduce the assumption.
2387   Builder.Insert(Assumption);
2388   // FIXME: Assumption still has it's original basic block as it's Parent.
2389 }
2390 
2391 llvm::DebugLoc CodeGenFunction::SourceLocToDebugLoc(SourceLocation Location) {
2392   if (CGDebugInfo *DI = getDebugInfo())
2393     return DI->SourceLocToDebugLoc(Location);
2394 
2395   return llvm::DebugLoc();
2396 }
2397