1 //===--- CodeGenFunction.cpp - Emit LLVM Code from ASTs for a Function ----===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This coordinates the per-function state used while generating code.
10 //
11 //===----------------------------------------------------------------------===//
12 
13 #include "CodeGenFunction.h"
14 #include "CGBlocks.h"
15 #include "CGCleanup.h"
16 #include "CGCUDARuntime.h"
17 #include "CGCXXABI.h"
18 #include "CGDebugInfo.h"
19 #include "CGOpenMPRuntime.h"
20 #include "CodeGenModule.h"
21 #include "CodeGenPGO.h"
22 #include "TargetInfo.h"
23 #include "clang/AST/ASTContext.h"
24 #include "clang/AST/ASTLambda.h"
25 #include "clang/AST/Decl.h"
26 #include "clang/AST/DeclCXX.h"
27 #include "clang/AST/StmtCXX.h"
28 #include "clang/AST/StmtObjC.h"
29 #include "clang/Basic/Builtins.h"
30 #include "clang/Basic/CodeGenOptions.h"
31 #include "clang/Basic/TargetInfo.h"
32 #include "clang/CodeGen/CGFunctionInfo.h"
33 #include "clang/Frontend/FrontendDiagnostic.h"
34 #include "llvm/IR/DataLayout.h"
35 #include "llvm/IR/Dominators.h"
36 #include "llvm/IR/Intrinsics.h"
37 #include "llvm/IR/MDBuilder.h"
38 #include "llvm/IR/Operator.h"
39 #include "llvm/Transforms/Utils/PromoteMemToReg.h"
40 using namespace clang;
41 using namespace CodeGen;
42 
43 /// shouldEmitLifetimeMarkers - Decide whether we need emit the life-time
44 /// markers.
45 static bool shouldEmitLifetimeMarkers(const CodeGenOptions &CGOpts,
46                                       const LangOptions &LangOpts) {
47   if (CGOpts.DisableLifetimeMarkers)
48     return false;
49 
50   // Sanitizers may use markers.
51   if (CGOpts.SanitizeAddressUseAfterScope ||
52       LangOpts.Sanitize.has(SanitizerKind::HWAddress) ||
53       LangOpts.Sanitize.has(SanitizerKind::Memory))
54     return true;
55 
56   // For now, only in optimized builds.
57   return CGOpts.OptimizationLevel != 0;
58 }
59 
60 CodeGenFunction::CodeGenFunction(CodeGenModule &cgm, bool suppressNewContext)
61     : CodeGenTypeCache(cgm), CGM(cgm), Target(cgm.getTarget()),
62       Builder(cgm, cgm.getModule().getContext(), llvm::ConstantFolder(),
63               CGBuilderInserterTy(this)),
64       SanOpts(CGM.getLangOpts().Sanitize), DebugInfo(CGM.getModuleDebugInfo()),
65       PGO(cgm), ShouldEmitLifetimeMarkers(shouldEmitLifetimeMarkers(
66                     CGM.getCodeGenOpts(), CGM.getLangOpts())) {
67   if (!suppressNewContext)
68     CGM.getCXXABI().getMangleContext().startNewFunction();
69 
70   llvm::FastMathFlags FMF;
71   if (CGM.getLangOpts().FastMath)
72     FMF.setFast();
73   if (CGM.getLangOpts().FiniteMathOnly) {
74     FMF.setNoNaNs();
75     FMF.setNoInfs();
76   }
77   if (CGM.getCodeGenOpts().NoNaNsFPMath) {
78     FMF.setNoNaNs();
79   }
80   if (CGM.getCodeGenOpts().NoSignedZeros) {
81     FMF.setNoSignedZeros();
82   }
83   if (CGM.getCodeGenOpts().ReciprocalMath) {
84     FMF.setAllowReciprocal();
85   }
86   if (CGM.getCodeGenOpts().Reassociate) {
87     FMF.setAllowReassoc();
88   }
89   Builder.setFastMathFlags(FMF);
90 }
91 
92 CodeGenFunction::~CodeGenFunction() {
93   assert(LifetimeExtendedCleanupStack.empty() && "failed to emit a cleanup");
94 
95   // If there are any unclaimed block infos, go ahead and destroy them
96   // now.  This can happen if IR-gen gets clever and skips evaluating
97   // something.
98   if (FirstBlockInfo)
99     destroyBlockInfos(FirstBlockInfo);
100 
101   if (getLangOpts().OpenMP && CurFn)
102     CGM.getOpenMPRuntime().functionFinished(*this);
103 }
104 
105 CharUnits CodeGenFunction::getNaturalPointeeTypeAlignment(QualType T,
106                                                     LValueBaseInfo *BaseInfo,
107                                                     TBAAAccessInfo *TBAAInfo) {
108   return getNaturalTypeAlignment(T->getPointeeType(), BaseInfo, TBAAInfo,
109                                  /* forPointeeType= */ true);
110 }
111 
112 CharUnits CodeGenFunction::getNaturalTypeAlignment(QualType T,
113                                                    LValueBaseInfo *BaseInfo,
114                                                    TBAAAccessInfo *TBAAInfo,
115                                                    bool forPointeeType) {
116   if (TBAAInfo)
117     *TBAAInfo = CGM.getTBAAAccessInfo(T);
118 
119   // Honor alignment typedef attributes even on incomplete types.
120   // We also honor them straight for C++ class types, even as pointees;
121   // there's an expressivity gap here.
122   if (auto TT = T->getAs<TypedefType>()) {
123     if (auto Align = TT->getDecl()->getMaxAlignment()) {
124       if (BaseInfo)
125         *BaseInfo = LValueBaseInfo(AlignmentSource::AttributedType);
126       return getContext().toCharUnitsFromBits(Align);
127     }
128   }
129 
130   if (BaseInfo)
131     *BaseInfo = LValueBaseInfo(AlignmentSource::Type);
132 
133   CharUnits Alignment;
134   if (T->isIncompleteType()) {
135     Alignment = CharUnits::One(); // Shouldn't be used, but pessimistic is best.
136   } else {
137     // For C++ class pointees, we don't know whether we're pointing at a
138     // base or a complete object, so we generally need to use the
139     // non-virtual alignment.
140     const CXXRecordDecl *RD;
141     if (forPointeeType && (RD = T->getAsCXXRecordDecl())) {
142       Alignment = CGM.getClassPointerAlignment(RD);
143     } else {
144       Alignment = getContext().getTypeAlignInChars(T);
145       if (T.getQualifiers().hasUnaligned())
146         Alignment = CharUnits::One();
147     }
148 
149     // Cap to the global maximum type alignment unless the alignment
150     // was somehow explicit on the type.
151     if (unsigned MaxAlign = getLangOpts().MaxTypeAlign) {
152       if (Alignment.getQuantity() > MaxAlign &&
153           !getContext().isAlignmentRequired(T))
154         Alignment = CharUnits::fromQuantity(MaxAlign);
155     }
156   }
157   return Alignment;
158 }
159 
160 LValue CodeGenFunction::MakeNaturalAlignAddrLValue(llvm::Value *V, QualType T) {
161   LValueBaseInfo BaseInfo;
162   TBAAAccessInfo TBAAInfo;
163   CharUnits Alignment = getNaturalTypeAlignment(T, &BaseInfo, &TBAAInfo);
164   return LValue::MakeAddr(Address(V, Alignment), T, getContext(), BaseInfo,
165                           TBAAInfo);
166 }
167 
168 /// Given a value of type T* that may not be to a complete object,
169 /// construct an l-value with the natural pointee alignment of T.
170 LValue
171 CodeGenFunction::MakeNaturalAlignPointeeAddrLValue(llvm::Value *V, QualType T) {
172   LValueBaseInfo BaseInfo;
173   TBAAAccessInfo TBAAInfo;
174   CharUnits Align = getNaturalTypeAlignment(T, &BaseInfo, &TBAAInfo,
175                                             /* forPointeeType= */ true);
176   return MakeAddrLValue(Address(V, Align), T, BaseInfo, TBAAInfo);
177 }
178 
179 
180 llvm::Type *CodeGenFunction::ConvertTypeForMem(QualType T) {
181   return CGM.getTypes().ConvertTypeForMem(T);
182 }
183 
184 llvm::Type *CodeGenFunction::ConvertType(QualType T) {
185   return CGM.getTypes().ConvertType(T);
186 }
187 
188 TypeEvaluationKind CodeGenFunction::getEvaluationKind(QualType type) {
189   type = type.getCanonicalType();
190   while (true) {
191     switch (type->getTypeClass()) {
192 #define TYPE(name, parent)
193 #define ABSTRACT_TYPE(name, parent)
194 #define NON_CANONICAL_TYPE(name, parent) case Type::name:
195 #define DEPENDENT_TYPE(name, parent) case Type::name:
196 #define NON_CANONICAL_UNLESS_DEPENDENT_TYPE(name, parent) case Type::name:
197 #include "clang/AST/TypeNodes.inc"
198       llvm_unreachable("non-canonical or dependent type in IR-generation");
199 
200     case Type::Auto:
201     case Type::DeducedTemplateSpecialization:
202       llvm_unreachable("undeduced type in IR-generation");
203 
204     // Various scalar types.
205     case Type::Builtin:
206     case Type::Pointer:
207     case Type::BlockPointer:
208     case Type::LValueReference:
209     case Type::RValueReference:
210     case Type::MemberPointer:
211     case Type::Vector:
212     case Type::ExtVector:
213     case Type::FunctionProto:
214     case Type::FunctionNoProto:
215     case Type::Enum:
216     case Type::ObjCObjectPointer:
217     case Type::Pipe:
218       return TEK_Scalar;
219 
220     // Complexes.
221     case Type::Complex:
222       return TEK_Complex;
223 
224     // Arrays, records, and Objective-C objects.
225     case Type::ConstantArray:
226     case Type::IncompleteArray:
227     case Type::VariableArray:
228     case Type::Record:
229     case Type::ObjCObject:
230     case Type::ObjCInterface:
231       return TEK_Aggregate;
232 
233     // We operate on atomic values according to their underlying type.
234     case Type::Atomic:
235       type = cast<AtomicType>(type)->getValueType();
236       continue;
237     }
238     llvm_unreachable("unknown type kind!");
239   }
240 }
241 
242 llvm::DebugLoc CodeGenFunction::EmitReturnBlock() {
243   // For cleanliness, we try to avoid emitting the return block for
244   // simple cases.
245   llvm::BasicBlock *CurBB = Builder.GetInsertBlock();
246 
247   if (CurBB) {
248     assert(!CurBB->getTerminator() && "Unexpected terminated block.");
249 
250     // We have a valid insert point, reuse it if it is empty or there are no
251     // explicit jumps to the return block.
252     if (CurBB->empty() || ReturnBlock.getBlock()->use_empty()) {
253       ReturnBlock.getBlock()->replaceAllUsesWith(CurBB);
254       delete ReturnBlock.getBlock();
255       ReturnBlock = JumpDest();
256     } else
257       EmitBlock(ReturnBlock.getBlock());
258     return llvm::DebugLoc();
259   }
260 
261   // Otherwise, if the return block is the target of a single direct
262   // branch then we can just put the code in that block instead. This
263   // cleans up functions which started with a unified return block.
264   if (ReturnBlock.getBlock()->hasOneUse()) {
265     llvm::BranchInst *BI =
266       dyn_cast<llvm::BranchInst>(*ReturnBlock.getBlock()->user_begin());
267     if (BI && BI->isUnconditional() &&
268         BI->getSuccessor(0) == ReturnBlock.getBlock()) {
269       // Record/return the DebugLoc of the simple 'return' expression to be used
270       // later by the actual 'ret' instruction.
271       llvm::DebugLoc Loc = BI->getDebugLoc();
272       Builder.SetInsertPoint(BI->getParent());
273       BI->eraseFromParent();
274       delete ReturnBlock.getBlock();
275       ReturnBlock = JumpDest();
276       return Loc;
277     }
278   }
279 
280   // FIXME: We are at an unreachable point, there is no reason to emit the block
281   // unless it has uses. However, we still need a place to put the debug
282   // region.end for now.
283 
284   EmitBlock(ReturnBlock.getBlock());
285   return llvm::DebugLoc();
286 }
287 
288 static void EmitIfUsed(CodeGenFunction &CGF, llvm::BasicBlock *BB) {
289   if (!BB) return;
290   if (!BB->use_empty())
291     return CGF.CurFn->getBasicBlockList().push_back(BB);
292   delete BB;
293 }
294 
295 void CodeGenFunction::FinishFunction(SourceLocation EndLoc) {
296   assert(BreakContinueStack.empty() &&
297          "mismatched push/pop in break/continue stack!");
298 
299   bool OnlySimpleReturnStmts = NumSimpleReturnExprs > 0
300     && NumSimpleReturnExprs == NumReturnExprs
301     && ReturnBlock.getBlock()->use_empty();
302   // Usually the return expression is evaluated before the cleanup
303   // code.  If the function contains only a simple return statement,
304   // such as a constant, the location before the cleanup code becomes
305   // the last useful breakpoint in the function, because the simple
306   // return expression will be evaluated after the cleanup code. To be
307   // safe, set the debug location for cleanup code to the location of
308   // the return statement.  Otherwise the cleanup code should be at the
309   // end of the function's lexical scope.
310   //
311   // If there are multiple branches to the return block, the branch
312   // instructions will get the location of the return statements and
313   // all will be fine.
314   if (CGDebugInfo *DI = getDebugInfo()) {
315     if (OnlySimpleReturnStmts)
316       DI->EmitLocation(Builder, LastStopPoint);
317     else
318       DI->EmitLocation(Builder, EndLoc);
319   }
320 
321   // Pop any cleanups that might have been associated with the
322   // parameters.  Do this in whatever block we're currently in; it's
323   // important to do this before we enter the return block or return
324   // edges will be *really* confused.
325   bool HasCleanups = EHStack.stable_begin() != PrologueCleanupDepth;
326   bool HasOnlyLifetimeMarkers =
327       HasCleanups && EHStack.containsOnlyLifetimeMarkers(PrologueCleanupDepth);
328   bool EmitRetDbgLoc = !HasCleanups || HasOnlyLifetimeMarkers;
329   if (HasCleanups) {
330     // Make sure the line table doesn't jump back into the body for
331     // the ret after it's been at EndLoc.
332     if (CGDebugInfo *DI = getDebugInfo())
333       if (OnlySimpleReturnStmts)
334         DI->EmitLocation(Builder, EndLoc);
335 
336     PopCleanupBlocks(PrologueCleanupDepth);
337   }
338 
339   // Emit function epilog (to return).
340   llvm::DebugLoc Loc = EmitReturnBlock();
341 
342   if (ShouldInstrumentFunction()) {
343     if (CGM.getCodeGenOpts().InstrumentFunctions)
344       CurFn->addFnAttr("instrument-function-exit", "__cyg_profile_func_exit");
345     if (CGM.getCodeGenOpts().InstrumentFunctionsAfterInlining)
346       CurFn->addFnAttr("instrument-function-exit-inlined",
347                        "__cyg_profile_func_exit");
348   }
349 
350   // Emit debug descriptor for function end.
351   if (CGDebugInfo *DI = getDebugInfo())
352     DI->EmitFunctionEnd(Builder, CurFn);
353 
354   // Reset the debug location to that of the simple 'return' expression, if any
355   // rather than that of the end of the function's scope '}'.
356   ApplyDebugLocation AL(*this, Loc);
357   EmitFunctionEpilog(*CurFnInfo, EmitRetDbgLoc, EndLoc);
358   EmitEndEHSpec(CurCodeDecl);
359 
360   assert(EHStack.empty() &&
361          "did not remove all scopes from cleanup stack!");
362 
363   // If someone did an indirect goto, emit the indirect goto block at the end of
364   // the function.
365   if (IndirectBranch) {
366     EmitBlock(IndirectBranch->getParent());
367     Builder.ClearInsertionPoint();
368   }
369 
370   // If some of our locals escaped, insert a call to llvm.localescape in the
371   // entry block.
372   if (!EscapedLocals.empty()) {
373     // Invert the map from local to index into a simple vector. There should be
374     // no holes.
375     SmallVector<llvm::Value *, 4> EscapeArgs;
376     EscapeArgs.resize(EscapedLocals.size());
377     for (auto &Pair : EscapedLocals)
378       EscapeArgs[Pair.second] = Pair.first;
379     llvm::Function *FrameEscapeFn = llvm::Intrinsic::getDeclaration(
380         &CGM.getModule(), llvm::Intrinsic::localescape);
381     CGBuilderTy(*this, AllocaInsertPt).CreateCall(FrameEscapeFn, EscapeArgs);
382   }
383 
384   // Remove the AllocaInsertPt instruction, which is just a convenience for us.
385   llvm::Instruction *Ptr = AllocaInsertPt;
386   AllocaInsertPt = nullptr;
387   Ptr->eraseFromParent();
388 
389   // If someone took the address of a label but never did an indirect goto, we
390   // made a zero entry PHI node, which is illegal, zap it now.
391   if (IndirectBranch) {
392     llvm::PHINode *PN = cast<llvm::PHINode>(IndirectBranch->getAddress());
393     if (PN->getNumIncomingValues() == 0) {
394       PN->replaceAllUsesWith(llvm::UndefValue::get(PN->getType()));
395       PN->eraseFromParent();
396     }
397   }
398 
399   EmitIfUsed(*this, EHResumeBlock);
400   EmitIfUsed(*this, TerminateLandingPad);
401   EmitIfUsed(*this, TerminateHandler);
402   EmitIfUsed(*this, UnreachableBlock);
403 
404   for (const auto &FuncletAndParent : TerminateFunclets)
405     EmitIfUsed(*this, FuncletAndParent.second);
406 
407   if (CGM.getCodeGenOpts().EmitDeclMetadata)
408     EmitDeclMetadata();
409 
410   for (SmallVectorImpl<std::pair<llvm::Instruction *, llvm::Value *> >::iterator
411            I = DeferredReplacements.begin(),
412            E = DeferredReplacements.end();
413        I != E; ++I) {
414     I->first->replaceAllUsesWith(I->second);
415     I->first->eraseFromParent();
416   }
417 
418   // Eliminate CleanupDestSlot alloca by replacing it with SSA values and
419   // PHIs if the current function is a coroutine. We don't do it for all
420   // functions as it may result in slight increase in numbers of instructions
421   // if compiled with no optimizations. We do it for coroutine as the lifetime
422   // of CleanupDestSlot alloca make correct coroutine frame building very
423   // difficult.
424   if (NormalCleanupDest.isValid() && isCoroutine()) {
425     llvm::DominatorTree DT(*CurFn);
426     llvm::PromoteMemToReg(
427         cast<llvm::AllocaInst>(NormalCleanupDest.getPointer()), DT);
428     NormalCleanupDest = Address::invalid();
429   }
430 
431   // Scan function arguments for vector width.
432   for (llvm::Argument &A : CurFn->args())
433     if (auto *VT = dyn_cast<llvm::VectorType>(A.getType()))
434       LargestVectorWidth = std::max((uint64_t)LargestVectorWidth,
435                                    VT->getPrimitiveSizeInBits().getFixedSize());
436 
437   // Update vector width based on return type.
438   if (auto *VT = dyn_cast<llvm::VectorType>(CurFn->getReturnType()))
439     LargestVectorWidth = std::max((uint64_t)LargestVectorWidth,
440                                   VT->getPrimitiveSizeInBits().getFixedSize());
441 
442   // Add the required-vector-width attribute. This contains the max width from:
443   // 1. min-vector-width attribute used in the source program.
444   // 2. Any builtins used that have a vector width specified.
445   // 3. Values passed in and out of inline assembly.
446   // 4. Width of vector arguments and return types for this function.
447   // 5. Width of vector aguments and return types for functions called by this
448   //    function.
449   CurFn->addFnAttr("min-legal-vector-width", llvm::utostr(LargestVectorWidth));
450 
451   // If we generated an unreachable return block, delete it now.
452   if (ReturnBlock.isValid() && ReturnBlock.getBlock()->use_empty()) {
453     Builder.ClearInsertionPoint();
454     ReturnBlock.getBlock()->eraseFromParent();
455   }
456   if (ReturnValue.isValid()) {
457     auto *RetAlloca = dyn_cast<llvm::AllocaInst>(ReturnValue.getPointer());
458     if (RetAlloca && RetAlloca->use_empty()) {
459       RetAlloca->eraseFromParent();
460       ReturnValue = Address::invalid();
461     }
462   }
463 }
464 
465 /// ShouldInstrumentFunction - Return true if the current function should be
466 /// instrumented with __cyg_profile_func_* calls
467 bool CodeGenFunction::ShouldInstrumentFunction() {
468   if (!CGM.getCodeGenOpts().InstrumentFunctions &&
469       !CGM.getCodeGenOpts().InstrumentFunctionsAfterInlining &&
470       !CGM.getCodeGenOpts().InstrumentFunctionEntryBare)
471     return false;
472   if (!CurFuncDecl || CurFuncDecl->hasAttr<NoInstrumentFunctionAttr>())
473     return false;
474   return true;
475 }
476 
477 /// ShouldXRayInstrument - Return true if the current function should be
478 /// instrumented with XRay nop sleds.
479 bool CodeGenFunction::ShouldXRayInstrumentFunction() const {
480   return CGM.getCodeGenOpts().XRayInstrumentFunctions;
481 }
482 
483 /// AlwaysEmitXRayCustomEvents - Return true if we should emit IR for calls to
484 /// the __xray_customevent(...) builtin calls, when doing XRay instrumentation.
485 bool CodeGenFunction::AlwaysEmitXRayCustomEvents() const {
486   return CGM.getCodeGenOpts().XRayInstrumentFunctions &&
487          (CGM.getCodeGenOpts().XRayAlwaysEmitCustomEvents ||
488           CGM.getCodeGenOpts().XRayInstrumentationBundle.Mask ==
489               XRayInstrKind::Custom);
490 }
491 
492 bool CodeGenFunction::AlwaysEmitXRayTypedEvents() const {
493   return CGM.getCodeGenOpts().XRayInstrumentFunctions &&
494          (CGM.getCodeGenOpts().XRayAlwaysEmitTypedEvents ||
495           CGM.getCodeGenOpts().XRayInstrumentationBundle.Mask ==
496               XRayInstrKind::Typed);
497 }
498 
499 llvm::Constant *
500 CodeGenFunction::EncodeAddrForUseInPrologue(llvm::Function *F,
501                                             llvm::Constant *Addr) {
502   // Addresses stored in prologue data can't require run-time fixups and must
503   // be PC-relative. Run-time fixups are undesirable because they necessitate
504   // writable text segments, which are unsafe. And absolute addresses are
505   // undesirable because they break PIE mode.
506 
507   // Add a layer of indirection through a private global. Taking its address
508   // won't result in a run-time fixup, even if Addr has linkonce_odr linkage.
509   auto *GV = new llvm::GlobalVariable(CGM.getModule(), Addr->getType(),
510                                       /*isConstant=*/true,
511                                       llvm::GlobalValue::PrivateLinkage, Addr);
512 
513   // Create a PC-relative address.
514   auto *GOTAsInt = llvm::ConstantExpr::getPtrToInt(GV, IntPtrTy);
515   auto *FuncAsInt = llvm::ConstantExpr::getPtrToInt(F, IntPtrTy);
516   auto *PCRelAsInt = llvm::ConstantExpr::getSub(GOTAsInt, FuncAsInt);
517   return (IntPtrTy == Int32Ty)
518              ? PCRelAsInt
519              : llvm::ConstantExpr::getTrunc(PCRelAsInt, Int32Ty);
520 }
521 
522 llvm::Value *
523 CodeGenFunction::DecodeAddrUsedInPrologue(llvm::Value *F,
524                                           llvm::Value *EncodedAddr) {
525   // Reconstruct the address of the global.
526   auto *PCRelAsInt = Builder.CreateSExt(EncodedAddr, IntPtrTy);
527   auto *FuncAsInt = Builder.CreatePtrToInt(F, IntPtrTy, "func_addr.int");
528   auto *GOTAsInt = Builder.CreateAdd(PCRelAsInt, FuncAsInt, "global_addr.int");
529   auto *GOTAddr = Builder.CreateIntToPtr(GOTAsInt, Int8PtrPtrTy, "global_addr");
530 
531   // Load the original pointer through the global.
532   return Builder.CreateLoad(Address(GOTAddr, getPointerAlign()),
533                             "decoded_addr");
534 }
535 
536 void CodeGenFunction::EmitOpenCLKernelMetadata(const FunctionDecl *FD,
537                                                llvm::Function *Fn)
538 {
539   if (!FD->hasAttr<OpenCLKernelAttr>())
540     return;
541 
542   llvm::LLVMContext &Context = getLLVMContext();
543 
544   CGM.GenOpenCLArgMetadata(Fn, FD, this);
545 
546   if (const VecTypeHintAttr *A = FD->getAttr<VecTypeHintAttr>()) {
547     QualType HintQTy = A->getTypeHint();
548     const ExtVectorType *HintEltQTy = HintQTy->getAs<ExtVectorType>();
549     bool IsSignedInteger =
550         HintQTy->isSignedIntegerType() ||
551         (HintEltQTy && HintEltQTy->getElementType()->isSignedIntegerType());
552     llvm::Metadata *AttrMDArgs[] = {
553         llvm::ConstantAsMetadata::get(llvm::UndefValue::get(
554             CGM.getTypes().ConvertType(A->getTypeHint()))),
555         llvm::ConstantAsMetadata::get(llvm::ConstantInt::get(
556             llvm::IntegerType::get(Context, 32),
557             llvm::APInt(32, (uint64_t)(IsSignedInteger ? 1 : 0))))};
558     Fn->setMetadata("vec_type_hint", llvm::MDNode::get(Context, AttrMDArgs));
559   }
560 
561   if (const WorkGroupSizeHintAttr *A = FD->getAttr<WorkGroupSizeHintAttr>()) {
562     llvm::Metadata *AttrMDArgs[] = {
563         llvm::ConstantAsMetadata::get(Builder.getInt32(A->getXDim())),
564         llvm::ConstantAsMetadata::get(Builder.getInt32(A->getYDim())),
565         llvm::ConstantAsMetadata::get(Builder.getInt32(A->getZDim()))};
566     Fn->setMetadata("work_group_size_hint", llvm::MDNode::get(Context, AttrMDArgs));
567   }
568 
569   if (const ReqdWorkGroupSizeAttr *A = FD->getAttr<ReqdWorkGroupSizeAttr>()) {
570     llvm::Metadata *AttrMDArgs[] = {
571         llvm::ConstantAsMetadata::get(Builder.getInt32(A->getXDim())),
572         llvm::ConstantAsMetadata::get(Builder.getInt32(A->getYDim())),
573         llvm::ConstantAsMetadata::get(Builder.getInt32(A->getZDim()))};
574     Fn->setMetadata("reqd_work_group_size", llvm::MDNode::get(Context, AttrMDArgs));
575   }
576 
577   if (const OpenCLIntelReqdSubGroupSizeAttr *A =
578           FD->getAttr<OpenCLIntelReqdSubGroupSizeAttr>()) {
579     llvm::Metadata *AttrMDArgs[] = {
580         llvm::ConstantAsMetadata::get(Builder.getInt32(A->getSubGroupSize()))};
581     Fn->setMetadata("intel_reqd_sub_group_size",
582                     llvm::MDNode::get(Context, AttrMDArgs));
583   }
584 }
585 
586 /// Determine whether the function F ends with a return stmt.
587 static bool endsWithReturn(const Decl* F) {
588   const Stmt *Body = nullptr;
589   if (auto *FD = dyn_cast_or_null<FunctionDecl>(F))
590     Body = FD->getBody();
591   else if (auto *OMD = dyn_cast_or_null<ObjCMethodDecl>(F))
592     Body = OMD->getBody();
593 
594   if (auto *CS = dyn_cast_or_null<CompoundStmt>(Body)) {
595     auto LastStmt = CS->body_rbegin();
596     if (LastStmt != CS->body_rend())
597       return isa<ReturnStmt>(*LastStmt);
598   }
599   return false;
600 }
601 
602 void CodeGenFunction::markAsIgnoreThreadCheckingAtRuntime(llvm::Function *Fn) {
603   if (SanOpts.has(SanitizerKind::Thread)) {
604     Fn->addFnAttr("sanitize_thread_no_checking_at_run_time");
605     Fn->removeFnAttr(llvm::Attribute::SanitizeThread);
606   }
607 }
608 
609 static bool matchesStlAllocatorFn(const Decl *D, const ASTContext &Ctx) {
610   auto *MD = dyn_cast_or_null<CXXMethodDecl>(D);
611   if (!MD || !MD->getDeclName().getAsIdentifierInfo() ||
612       !MD->getDeclName().getAsIdentifierInfo()->isStr("allocate") ||
613       (MD->getNumParams() != 1 && MD->getNumParams() != 2))
614     return false;
615 
616   if (MD->parameters()[0]->getType().getCanonicalType() != Ctx.getSizeType())
617     return false;
618 
619   if (MD->getNumParams() == 2) {
620     auto *PT = MD->parameters()[1]->getType()->getAs<PointerType>();
621     if (!PT || !PT->isVoidPointerType() ||
622         !PT->getPointeeType().isConstQualified())
623       return false;
624   }
625 
626   return true;
627 }
628 
629 /// Return the UBSan prologue signature for \p FD if one is available.
630 static llvm::Constant *getPrologueSignature(CodeGenModule &CGM,
631                                             const FunctionDecl *FD) {
632   if (const auto *MD = dyn_cast<CXXMethodDecl>(FD))
633     if (!MD->isStatic())
634       return nullptr;
635   return CGM.getTargetCodeGenInfo().getUBSanFunctionSignature(CGM);
636 }
637 
638 void CodeGenFunction::StartFunction(GlobalDecl GD,
639                                     QualType RetTy,
640                                     llvm::Function *Fn,
641                                     const CGFunctionInfo &FnInfo,
642                                     const FunctionArgList &Args,
643                                     SourceLocation Loc,
644                                     SourceLocation StartLoc) {
645   assert(!CurFn &&
646          "Do not use a CodeGenFunction object for more than one function");
647 
648   const Decl *D = GD.getDecl();
649 
650   DidCallStackSave = false;
651   CurCodeDecl = D;
652   if (const auto *FD = dyn_cast_or_null<FunctionDecl>(D))
653     if (FD->usesSEHTry())
654       CurSEHParent = FD;
655   CurFuncDecl = (D ? D->getNonClosureContext() : nullptr);
656   FnRetTy = RetTy;
657   CurFn = Fn;
658   CurFnInfo = &FnInfo;
659   assert(CurFn->isDeclaration() && "Function already has body?");
660 
661   // If this function has been blacklisted for any of the enabled sanitizers,
662   // disable the sanitizer for the function.
663   do {
664 #define SANITIZER(NAME, ID)                                                    \
665   if (SanOpts.empty())                                                         \
666     break;                                                                     \
667   if (SanOpts.has(SanitizerKind::ID))                                          \
668     if (CGM.isInSanitizerBlacklist(SanitizerKind::ID, Fn, Loc))                \
669       SanOpts.set(SanitizerKind::ID, false);
670 
671 #include "clang/Basic/Sanitizers.def"
672 #undef SANITIZER
673   } while (0);
674 
675   if (D) {
676     // Apply the no_sanitize* attributes to SanOpts.
677     for (auto Attr : D->specific_attrs<NoSanitizeAttr>()) {
678       SanitizerMask mask = Attr->getMask();
679       SanOpts.Mask &= ~mask;
680       if (mask & SanitizerKind::Address)
681         SanOpts.set(SanitizerKind::KernelAddress, false);
682       if (mask & SanitizerKind::KernelAddress)
683         SanOpts.set(SanitizerKind::Address, false);
684       if (mask & SanitizerKind::HWAddress)
685         SanOpts.set(SanitizerKind::KernelHWAddress, false);
686       if (mask & SanitizerKind::KernelHWAddress)
687         SanOpts.set(SanitizerKind::HWAddress, false);
688     }
689   }
690 
691   // Apply sanitizer attributes to the function.
692   if (SanOpts.hasOneOf(SanitizerKind::Address | SanitizerKind::KernelAddress))
693     Fn->addFnAttr(llvm::Attribute::SanitizeAddress);
694   if (SanOpts.hasOneOf(SanitizerKind::HWAddress | SanitizerKind::KernelHWAddress))
695     Fn->addFnAttr(llvm::Attribute::SanitizeHWAddress);
696   if (SanOpts.has(SanitizerKind::MemTag))
697     Fn->addFnAttr(llvm::Attribute::SanitizeMemTag);
698   if (SanOpts.has(SanitizerKind::Thread))
699     Fn->addFnAttr(llvm::Attribute::SanitizeThread);
700   if (SanOpts.hasOneOf(SanitizerKind::Memory | SanitizerKind::KernelMemory))
701     Fn->addFnAttr(llvm::Attribute::SanitizeMemory);
702   if (SanOpts.has(SanitizerKind::SafeStack))
703     Fn->addFnAttr(llvm::Attribute::SafeStack);
704   if (SanOpts.has(SanitizerKind::ShadowCallStack))
705     Fn->addFnAttr(llvm::Attribute::ShadowCallStack);
706 
707   // Apply fuzzing attribute to the function.
708   if (SanOpts.hasOneOf(SanitizerKind::Fuzzer | SanitizerKind::FuzzerNoLink))
709     Fn->addFnAttr(llvm::Attribute::OptForFuzzing);
710 
711   // Ignore TSan memory acesses from within ObjC/ObjC++ dealloc, initialize,
712   // .cxx_destruct, __destroy_helper_block_ and all of their calees at run time.
713   if (SanOpts.has(SanitizerKind::Thread)) {
714     if (const auto *OMD = dyn_cast_or_null<ObjCMethodDecl>(D)) {
715       IdentifierInfo *II = OMD->getSelector().getIdentifierInfoForSlot(0);
716       if (OMD->getMethodFamily() == OMF_dealloc ||
717           OMD->getMethodFamily() == OMF_initialize ||
718           (OMD->getSelector().isUnarySelector() && II->isStr(".cxx_destruct"))) {
719         markAsIgnoreThreadCheckingAtRuntime(Fn);
720       }
721     }
722   }
723 
724   // Ignore unrelated casts in STL allocate() since the allocator must cast
725   // from void* to T* before object initialization completes. Don't match on the
726   // namespace because not all allocators are in std::
727   if (D && SanOpts.has(SanitizerKind::CFIUnrelatedCast)) {
728     if (matchesStlAllocatorFn(D, getContext()))
729       SanOpts.Mask &= ~SanitizerKind::CFIUnrelatedCast;
730   }
731 
732   // Ignore null checks in coroutine functions since the coroutines passes
733   // are not aware of how to move the extra UBSan instructions across the split
734   // coroutine boundaries.
735   if (D && SanOpts.has(SanitizerKind::Null))
736     if (const auto *FD = dyn_cast<FunctionDecl>(D))
737       if (FD->getBody() &&
738           FD->getBody()->getStmtClass() == Stmt::CoroutineBodyStmtClass)
739         SanOpts.Mask &= ~SanitizerKind::Null;
740 
741   // Apply xray attributes to the function (as a string, for now)
742   if (D) {
743     if (const auto *XRayAttr = D->getAttr<XRayInstrumentAttr>()) {
744       if (CGM.getCodeGenOpts().XRayInstrumentationBundle.has(
745               XRayInstrKind::Function)) {
746         if (XRayAttr->alwaysXRayInstrument() && ShouldXRayInstrumentFunction())
747           Fn->addFnAttr("function-instrument", "xray-always");
748         if (XRayAttr->neverXRayInstrument())
749           Fn->addFnAttr("function-instrument", "xray-never");
750         if (const auto *LogArgs = D->getAttr<XRayLogArgsAttr>())
751           if (ShouldXRayInstrumentFunction())
752             Fn->addFnAttr("xray-log-args",
753                           llvm::utostr(LogArgs->getArgumentCount()));
754       }
755     } else {
756       if (ShouldXRayInstrumentFunction() && !CGM.imbueXRayAttrs(Fn, Loc))
757         Fn->addFnAttr(
758             "xray-instruction-threshold",
759             llvm::itostr(CGM.getCodeGenOpts().XRayInstructionThreshold));
760     }
761   }
762 
763   // Add no-jump-tables value.
764   Fn->addFnAttr("no-jump-tables",
765                 llvm::toStringRef(CGM.getCodeGenOpts().NoUseJumpTables));
766 
767   // Add no-inline-line-tables value.
768   if (CGM.getCodeGenOpts().NoInlineLineTables)
769     Fn->addFnAttr("no-inline-line-tables");
770 
771   // Add profile-sample-accurate value.
772   if (CGM.getCodeGenOpts().ProfileSampleAccurate)
773     Fn->addFnAttr("profile-sample-accurate");
774 
775   if (D && D->hasAttr<CFICanonicalJumpTableAttr>())
776     Fn->addFnAttr("cfi-canonical-jump-table");
777 
778   if (getLangOpts().OpenCL) {
779     // Add metadata for a kernel function.
780     if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D))
781       EmitOpenCLKernelMetadata(FD, Fn);
782   }
783 
784   // If we are checking function types, emit a function type signature as
785   // prologue data.
786   if (getLangOpts().CPlusPlus && SanOpts.has(SanitizerKind::Function)) {
787     if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D)) {
788       if (llvm::Constant *PrologueSig = getPrologueSignature(CGM, FD)) {
789         // Remove any (C++17) exception specifications, to allow calling e.g. a
790         // noexcept function through a non-noexcept pointer.
791         auto ProtoTy =
792           getContext().getFunctionTypeWithExceptionSpec(FD->getType(),
793                                                         EST_None);
794         llvm::Constant *FTRTTIConst =
795             CGM.GetAddrOfRTTIDescriptor(ProtoTy, /*ForEH=*/true);
796         llvm::Constant *FTRTTIConstEncoded =
797             EncodeAddrForUseInPrologue(Fn, FTRTTIConst);
798         llvm::Constant *PrologueStructElems[] = {PrologueSig,
799                                                  FTRTTIConstEncoded};
800         llvm::Constant *PrologueStructConst =
801             llvm::ConstantStruct::getAnon(PrologueStructElems, /*Packed=*/true);
802         Fn->setPrologueData(PrologueStructConst);
803       }
804     }
805   }
806 
807   // If we're checking nullability, we need to know whether we can check the
808   // return value. Initialize the flag to 'true' and refine it in EmitParmDecl.
809   if (SanOpts.has(SanitizerKind::NullabilityReturn)) {
810     auto Nullability = FnRetTy->getNullability(getContext());
811     if (Nullability && *Nullability == NullabilityKind::NonNull) {
812       if (!(SanOpts.has(SanitizerKind::ReturnsNonnullAttribute) &&
813             CurCodeDecl && CurCodeDecl->getAttr<ReturnsNonNullAttr>()))
814         RetValNullabilityPrecondition =
815             llvm::ConstantInt::getTrue(getLLVMContext());
816     }
817   }
818 
819   // If we're in C++ mode and the function name is "main", it is guaranteed
820   // to be norecurse by the standard (3.6.1.3 "The function main shall not be
821   // used within a program").
822   if (getLangOpts().CPlusPlus)
823     if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D))
824       if (FD->isMain())
825         Fn->addFnAttr(llvm::Attribute::NoRecurse);
826 
827   // If a custom alignment is used, force realigning to this alignment on
828   // any main function which certainly will need it.
829   if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D))
830     if ((FD->isMain() || FD->isMSVCRTEntryPoint()) &&
831         CGM.getCodeGenOpts().StackAlignment)
832       Fn->addFnAttr("stackrealign");
833 
834   llvm::BasicBlock *EntryBB = createBasicBlock("entry", CurFn);
835 
836   // Create a marker to make it easy to insert allocas into the entryblock
837   // later.  Don't create this with the builder, because we don't want it
838   // folded.
839   llvm::Value *Undef = llvm::UndefValue::get(Int32Ty);
840   AllocaInsertPt = new llvm::BitCastInst(Undef, Int32Ty, "allocapt", EntryBB);
841 
842   ReturnBlock = getJumpDestInCurrentScope("return");
843 
844   Builder.SetInsertPoint(EntryBB);
845 
846   // If we're checking the return value, allocate space for a pointer to a
847   // precise source location of the checked return statement.
848   if (requiresReturnValueCheck()) {
849     ReturnLocation = CreateDefaultAlignTempAlloca(Int8PtrTy, "return.sloc.ptr");
850     InitTempAlloca(ReturnLocation, llvm::ConstantPointerNull::get(Int8PtrTy));
851   }
852 
853   // Emit subprogram debug descriptor.
854   if (CGDebugInfo *DI = getDebugInfo()) {
855     // Reconstruct the type from the argument list so that implicit parameters,
856     // such as 'this' and 'vtt', show up in the debug info. Preserve the calling
857     // convention.
858     CallingConv CC = CallingConv::CC_C;
859     if (auto *FD = dyn_cast_or_null<FunctionDecl>(D))
860       if (const auto *SrcFnTy = FD->getType()->getAs<FunctionType>())
861         CC = SrcFnTy->getCallConv();
862     SmallVector<QualType, 16> ArgTypes;
863     for (const VarDecl *VD : Args)
864       ArgTypes.push_back(VD->getType());
865     QualType FnType = getContext().getFunctionType(
866         RetTy, ArgTypes, FunctionProtoType::ExtProtoInfo(CC));
867     DI->EmitFunctionStart(GD, Loc, StartLoc, FnType, CurFn, CurFuncIsThunk,
868                           Builder);
869   }
870 
871   if (ShouldInstrumentFunction()) {
872     if (CGM.getCodeGenOpts().InstrumentFunctions)
873       CurFn->addFnAttr("instrument-function-entry", "__cyg_profile_func_enter");
874     if (CGM.getCodeGenOpts().InstrumentFunctionsAfterInlining)
875       CurFn->addFnAttr("instrument-function-entry-inlined",
876                        "__cyg_profile_func_enter");
877     if (CGM.getCodeGenOpts().InstrumentFunctionEntryBare)
878       CurFn->addFnAttr("instrument-function-entry-inlined",
879                        "__cyg_profile_func_enter_bare");
880   }
881 
882   // Since emitting the mcount call here impacts optimizations such as function
883   // inlining, we just add an attribute to insert a mcount call in backend.
884   // The attribute "counting-function" is set to mcount function name which is
885   // architecture dependent.
886   if (CGM.getCodeGenOpts().InstrumentForProfiling) {
887     // Calls to fentry/mcount should not be generated if function has
888     // the no_instrument_function attribute.
889     if (!CurFuncDecl || !CurFuncDecl->hasAttr<NoInstrumentFunctionAttr>()) {
890       if (CGM.getCodeGenOpts().CallFEntry)
891         Fn->addFnAttr("fentry-call", "true");
892       else {
893         Fn->addFnAttr("instrument-function-entry-inlined",
894                       getTarget().getMCountName());
895       }
896       if (CGM.getCodeGenOpts().MNopMCount) {
897         if (getContext().getTargetInfo().getTriple().getArch() !=
898             llvm::Triple::systemz)
899           CGM.getDiags().Report(diag::err_opt_not_valid_on_target)
900             << "-mnop-mcount";
901         if (!CGM.getCodeGenOpts().CallFEntry)
902           CGM.getDiags().Report(diag::err_opt_not_valid_without_opt)
903             << "-mnop-mcount" << "-mfentry";
904         Fn->addFnAttr("mnop-mcount", "true");
905       }
906     }
907   }
908 
909   if (RetTy->isVoidType()) {
910     // Void type; nothing to return.
911     ReturnValue = Address::invalid();
912 
913     // Count the implicit return.
914     if (!endsWithReturn(D))
915       ++NumReturnExprs;
916   } else if (CurFnInfo->getReturnInfo().getKind() == ABIArgInfo::Indirect) {
917     // Indirect return; emit returned value directly into sret slot.
918     // This reduces code size, and affects correctness in C++.
919     auto AI = CurFn->arg_begin();
920     if (CurFnInfo->getReturnInfo().isSRetAfterThis())
921       ++AI;
922     ReturnValue = Address(&*AI, CurFnInfo->getReturnInfo().getIndirectAlign());
923     if (!CurFnInfo->getReturnInfo().getIndirectByVal()) {
924       ReturnValuePointer =
925           CreateDefaultAlignTempAlloca(Int8PtrTy, "result.ptr");
926       Builder.CreateStore(Builder.CreatePointerBitCastOrAddrSpaceCast(
927                               ReturnValue.getPointer(), Int8PtrTy),
928                           ReturnValuePointer);
929     }
930   } else if (CurFnInfo->getReturnInfo().getKind() == ABIArgInfo::InAlloca &&
931              !hasScalarEvaluationKind(CurFnInfo->getReturnType())) {
932     // Load the sret pointer from the argument struct and return into that.
933     unsigned Idx = CurFnInfo->getReturnInfo().getInAllocaFieldIndex();
934     llvm::Function::arg_iterator EI = CurFn->arg_end();
935     --EI;
936     llvm::Value *Addr = Builder.CreateStructGEP(nullptr, &*EI, Idx);
937     ReturnValuePointer = Address(Addr, getPointerAlign());
938     Addr = Builder.CreateAlignedLoad(Addr, getPointerAlign(), "agg.result");
939     ReturnValue = Address(Addr, getNaturalTypeAlignment(RetTy));
940   } else {
941     ReturnValue = CreateIRTemp(RetTy, "retval");
942 
943     // Tell the epilog emitter to autorelease the result.  We do this
944     // now so that various specialized functions can suppress it
945     // during their IR-generation.
946     if (getLangOpts().ObjCAutoRefCount &&
947         !CurFnInfo->isReturnsRetained() &&
948         RetTy->isObjCRetainableType())
949       AutoreleaseResult = true;
950   }
951 
952   EmitStartEHSpec(CurCodeDecl);
953 
954   PrologueCleanupDepth = EHStack.stable_begin();
955 
956   // Emit OpenMP specific initialization of the device functions.
957   if (getLangOpts().OpenMP && CurCodeDecl)
958     CGM.getOpenMPRuntime().emitFunctionProlog(*this, CurCodeDecl);
959 
960   EmitFunctionProlog(*CurFnInfo, CurFn, Args);
961 
962   if (D && isa<CXXMethodDecl>(D) && cast<CXXMethodDecl>(D)->isInstance()) {
963     CGM.getCXXABI().EmitInstanceFunctionProlog(*this);
964     const CXXMethodDecl *MD = cast<CXXMethodDecl>(D);
965     if (MD->getParent()->isLambda() &&
966         MD->getOverloadedOperator() == OO_Call) {
967       // We're in a lambda; figure out the captures.
968       MD->getParent()->getCaptureFields(LambdaCaptureFields,
969                                         LambdaThisCaptureField);
970       if (LambdaThisCaptureField) {
971         // If the lambda captures the object referred to by '*this' - either by
972         // value or by reference, make sure CXXThisValue points to the correct
973         // object.
974 
975         // Get the lvalue for the field (which is a copy of the enclosing object
976         // or contains the address of the enclosing object).
977         LValue ThisFieldLValue = EmitLValueForLambdaField(LambdaThisCaptureField);
978         if (!LambdaThisCaptureField->getType()->isPointerType()) {
979           // If the enclosing object was captured by value, just use its address.
980           CXXThisValue = ThisFieldLValue.getAddress().getPointer();
981         } else {
982           // Load the lvalue pointed to by the field, since '*this' was captured
983           // by reference.
984           CXXThisValue =
985               EmitLoadOfLValue(ThisFieldLValue, SourceLocation()).getScalarVal();
986         }
987       }
988       for (auto *FD : MD->getParent()->fields()) {
989         if (FD->hasCapturedVLAType()) {
990           auto *ExprArg = EmitLoadOfLValue(EmitLValueForLambdaField(FD),
991                                            SourceLocation()).getScalarVal();
992           auto VAT = FD->getCapturedVLAType();
993           VLASizeMap[VAT->getSizeExpr()] = ExprArg;
994         }
995       }
996     } else {
997       // Not in a lambda; just use 'this' from the method.
998       // FIXME: Should we generate a new load for each use of 'this'?  The
999       // fast register allocator would be happier...
1000       CXXThisValue = CXXABIThisValue;
1001     }
1002 
1003     // Check the 'this' pointer once per function, if it's available.
1004     if (CXXABIThisValue) {
1005       SanitizerSet SkippedChecks;
1006       SkippedChecks.set(SanitizerKind::ObjectSize, true);
1007       QualType ThisTy = MD->getThisType();
1008 
1009       // If this is the call operator of a lambda with no capture-default, it
1010       // may have a static invoker function, which may call this operator with
1011       // a null 'this' pointer.
1012       if (isLambdaCallOperator(MD) &&
1013           MD->getParent()->getLambdaCaptureDefault() == LCD_None)
1014         SkippedChecks.set(SanitizerKind::Null, true);
1015 
1016       EmitTypeCheck(isa<CXXConstructorDecl>(MD) ? TCK_ConstructorCall
1017                                                 : TCK_MemberCall,
1018                     Loc, CXXABIThisValue, ThisTy,
1019                     getContext().getTypeAlignInChars(ThisTy->getPointeeType()),
1020                     SkippedChecks);
1021     }
1022   }
1023 
1024   // If any of the arguments have a variably modified type, make sure to
1025   // emit the type size.
1026   for (FunctionArgList::const_iterator i = Args.begin(), e = Args.end();
1027        i != e; ++i) {
1028     const VarDecl *VD = *i;
1029 
1030     // Dig out the type as written from ParmVarDecls; it's unclear whether
1031     // the standard (C99 6.9.1p10) requires this, but we're following the
1032     // precedent set by gcc.
1033     QualType Ty;
1034     if (const ParmVarDecl *PVD = dyn_cast<ParmVarDecl>(VD))
1035       Ty = PVD->getOriginalType();
1036     else
1037       Ty = VD->getType();
1038 
1039     if (Ty->isVariablyModifiedType())
1040       EmitVariablyModifiedType(Ty);
1041   }
1042   // Emit a location at the end of the prologue.
1043   if (CGDebugInfo *DI = getDebugInfo())
1044     DI->EmitLocation(Builder, StartLoc);
1045 
1046   // TODO: Do we need to handle this in two places like we do with
1047   // target-features/target-cpu?
1048   if (CurFuncDecl)
1049     if (const auto *VecWidth = CurFuncDecl->getAttr<MinVectorWidthAttr>())
1050       LargestVectorWidth = VecWidth->getVectorWidth();
1051 }
1052 
1053 void CodeGenFunction::EmitFunctionBody(const Stmt *Body) {
1054   incrementProfileCounter(Body);
1055   if (const CompoundStmt *S = dyn_cast<CompoundStmt>(Body))
1056     EmitCompoundStmtWithoutScope(*S);
1057   else
1058     EmitStmt(Body);
1059 }
1060 
1061 /// When instrumenting to collect profile data, the counts for some blocks
1062 /// such as switch cases need to not include the fall-through counts, so
1063 /// emit a branch around the instrumentation code. When not instrumenting,
1064 /// this just calls EmitBlock().
1065 void CodeGenFunction::EmitBlockWithFallThrough(llvm::BasicBlock *BB,
1066                                                const Stmt *S) {
1067   llvm::BasicBlock *SkipCountBB = nullptr;
1068   if (HaveInsertPoint() && CGM.getCodeGenOpts().hasProfileClangInstr()) {
1069     // When instrumenting for profiling, the fallthrough to certain
1070     // statements needs to skip over the instrumentation code so that we
1071     // get an accurate count.
1072     SkipCountBB = createBasicBlock("skipcount");
1073     EmitBranch(SkipCountBB);
1074   }
1075   EmitBlock(BB);
1076   uint64_t CurrentCount = getCurrentProfileCount();
1077   incrementProfileCounter(S);
1078   setCurrentProfileCount(getCurrentProfileCount() + CurrentCount);
1079   if (SkipCountBB)
1080     EmitBlock(SkipCountBB);
1081 }
1082 
1083 /// Tries to mark the given function nounwind based on the
1084 /// non-existence of any throwing calls within it.  We believe this is
1085 /// lightweight enough to do at -O0.
1086 static void TryMarkNoThrow(llvm::Function *F) {
1087   // LLVM treats 'nounwind' on a function as part of the type, so we
1088   // can't do this on functions that can be overwritten.
1089   if (F->isInterposable()) return;
1090 
1091   for (llvm::BasicBlock &BB : *F)
1092     for (llvm::Instruction &I : BB)
1093       if (I.mayThrow())
1094         return;
1095 
1096   F->setDoesNotThrow();
1097 }
1098 
1099 QualType CodeGenFunction::BuildFunctionArgList(GlobalDecl GD,
1100                                                FunctionArgList &Args) {
1101   const FunctionDecl *FD = cast<FunctionDecl>(GD.getDecl());
1102   QualType ResTy = FD->getReturnType();
1103 
1104   const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(FD);
1105   if (MD && MD->isInstance()) {
1106     if (CGM.getCXXABI().HasThisReturn(GD))
1107       ResTy = MD->getThisType();
1108     else if (CGM.getCXXABI().hasMostDerivedReturn(GD))
1109       ResTy = CGM.getContext().VoidPtrTy;
1110     CGM.getCXXABI().buildThisParam(*this, Args);
1111   }
1112 
1113   // The base version of an inheriting constructor whose constructed base is a
1114   // virtual base is not passed any arguments (because it doesn't actually call
1115   // the inherited constructor).
1116   bool PassedParams = true;
1117   if (const CXXConstructorDecl *CD = dyn_cast<CXXConstructorDecl>(FD))
1118     if (auto Inherited = CD->getInheritedConstructor())
1119       PassedParams =
1120           getTypes().inheritingCtorHasParams(Inherited, GD.getCtorType());
1121 
1122   if (PassedParams) {
1123     for (auto *Param : FD->parameters()) {
1124       Args.push_back(Param);
1125       if (!Param->hasAttr<PassObjectSizeAttr>())
1126         continue;
1127 
1128       auto *Implicit = ImplicitParamDecl::Create(
1129           getContext(), Param->getDeclContext(), Param->getLocation(),
1130           /*Id=*/nullptr, getContext().getSizeType(), ImplicitParamDecl::Other);
1131       SizeArguments[Param] = Implicit;
1132       Args.push_back(Implicit);
1133     }
1134   }
1135 
1136   if (MD && (isa<CXXConstructorDecl>(MD) || isa<CXXDestructorDecl>(MD)))
1137     CGM.getCXXABI().addImplicitStructorParams(*this, ResTy, Args);
1138 
1139   return ResTy;
1140 }
1141 
1142 static bool
1143 shouldUseUndefinedBehaviorReturnOptimization(const FunctionDecl *FD,
1144                                              const ASTContext &Context) {
1145   QualType T = FD->getReturnType();
1146   // Avoid the optimization for functions that return a record type with a
1147   // trivial destructor or another trivially copyable type.
1148   if (const RecordType *RT = T.getCanonicalType()->getAs<RecordType>()) {
1149     if (const auto *ClassDecl = dyn_cast<CXXRecordDecl>(RT->getDecl()))
1150       return !ClassDecl->hasTrivialDestructor();
1151   }
1152   return !T.isTriviallyCopyableType(Context);
1153 }
1154 
1155 void CodeGenFunction::GenerateCode(GlobalDecl GD, llvm::Function *Fn,
1156                                    const CGFunctionInfo &FnInfo) {
1157   const FunctionDecl *FD = cast<FunctionDecl>(GD.getDecl());
1158   CurGD = GD;
1159 
1160   FunctionArgList Args;
1161   QualType ResTy = BuildFunctionArgList(GD, Args);
1162 
1163   // Check if we should generate debug info for this function.
1164   if (FD->hasAttr<NoDebugAttr>())
1165     DebugInfo = nullptr; // disable debug info indefinitely for this function
1166 
1167   // The function might not have a body if we're generating thunks for a
1168   // function declaration.
1169   SourceRange BodyRange;
1170   if (Stmt *Body = FD->getBody())
1171     BodyRange = Body->getSourceRange();
1172   else
1173     BodyRange = FD->getLocation();
1174   CurEHLocation = BodyRange.getEnd();
1175 
1176   // Use the location of the start of the function to determine where
1177   // the function definition is located. By default use the location
1178   // of the declaration as the location for the subprogram. A function
1179   // may lack a declaration in the source code if it is created by code
1180   // gen. (examples: _GLOBAL__I_a, __cxx_global_array_dtor, thunk).
1181   SourceLocation Loc = FD->getLocation();
1182 
1183   // If this is a function specialization then use the pattern body
1184   // as the location for the function.
1185   if (const FunctionDecl *SpecDecl = FD->getTemplateInstantiationPattern())
1186     if (SpecDecl->hasBody(SpecDecl))
1187       Loc = SpecDecl->getLocation();
1188 
1189   Stmt *Body = FD->getBody();
1190 
1191   // Initialize helper which will detect jumps which can cause invalid lifetime
1192   // markers.
1193   if (Body && ShouldEmitLifetimeMarkers)
1194     Bypasses.Init(Body);
1195 
1196   // Emit the standard function prologue.
1197   StartFunction(GD, ResTy, Fn, FnInfo, Args, Loc, BodyRange.getBegin());
1198 
1199   // Generate the body of the function.
1200   PGO.assignRegionCounters(GD, CurFn);
1201   if (isa<CXXDestructorDecl>(FD))
1202     EmitDestructorBody(Args);
1203   else if (isa<CXXConstructorDecl>(FD))
1204     EmitConstructorBody(Args);
1205   else if (getLangOpts().CUDA &&
1206            !getLangOpts().CUDAIsDevice &&
1207            FD->hasAttr<CUDAGlobalAttr>())
1208     CGM.getCUDARuntime().emitDeviceStub(*this, Args);
1209   else if (isa<CXXMethodDecl>(FD) &&
1210            cast<CXXMethodDecl>(FD)->isLambdaStaticInvoker()) {
1211     // The lambda static invoker function is special, because it forwards or
1212     // clones the body of the function call operator (but is actually static).
1213     EmitLambdaStaticInvokeBody(cast<CXXMethodDecl>(FD));
1214   } else if (FD->isDefaulted() && isa<CXXMethodDecl>(FD) &&
1215              (cast<CXXMethodDecl>(FD)->isCopyAssignmentOperator() ||
1216               cast<CXXMethodDecl>(FD)->isMoveAssignmentOperator())) {
1217     // Implicit copy-assignment gets the same special treatment as implicit
1218     // copy-constructors.
1219     emitImplicitAssignmentOperatorBody(Args);
1220   } else if (Body) {
1221     EmitFunctionBody(Body);
1222   } else
1223     llvm_unreachable("no definition for emitted function");
1224 
1225   // C++11 [stmt.return]p2:
1226   //   Flowing off the end of a function [...] results in undefined behavior in
1227   //   a value-returning function.
1228   // C11 6.9.1p12:
1229   //   If the '}' that terminates a function is reached, and the value of the
1230   //   function call is used by the caller, the behavior is undefined.
1231   if (getLangOpts().CPlusPlus && !FD->hasImplicitReturnZero() && !SawAsmBlock &&
1232       !FD->getReturnType()->isVoidType() && Builder.GetInsertBlock()) {
1233     bool ShouldEmitUnreachable =
1234         CGM.getCodeGenOpts().StrictReturn ||
1235         shouldUseUndefinedBehaviorReturnOptimization(FD, getContext());
1236     if (SanOpts.has(SanitizerKind::Return)) {
1237       SanitizerScope SanScope(this);
1238       llvm::Value *IsFalse = Builder.getFalse();
1239       EmitCheck(std::make_pair(IsFalse, SanitizerKind::Return),
1240                 SanitizerHandler::MissingReturn,
1241                 EmitCheckSourceLocation(FD->getLocation()), None);
1242     } else if (ShouldEmitUnreachable) {
1243       if (CGM.getCodeGenOpts().OptimizationLevel == 0)
1244         EmitTrapCall(llvm::Intrinsic::trap);
1245     }
1246     if (SanOpts.has(SanitizerKind::Return) || ShouldEmitUnreachable) {
1247       Builder.CreateUnreachable();
1248       Builder.ClearInsertionPoint();
1249     }
1250   }
1251 
1252   // Emit the standard function epilogue.
1253   FinishFunction(BodyRange.getEnd());
1254 
1255   // If we haven't marked the function nothrow through other means, do
1256   // a quick pass now to see if we can.
1257   if (!CurFn->doesNotThrow())
1258     TryMarkNoThrow(CurFn);
1259 }
1260 
1261 /// ContainsLabel - Return true if the statement contains a label in it.  If
1262 /// this statement is not executed normally, it not containing a label means
1263 /// that we can just remove the code.
1264 bool CodeGenFunction::ContainsLabel(const Stmt *S, bool IgnoreCaseStmts) {
1265   // Null statement, not a label!
1266   if (!S) return false;
1267 
1268   // If this is a label, we have to emit the code, consider something like:
1269   // if (0) {  ...  foo:  bar(); }  goto foo;
1270   //
1271   // TODO: If anyone cared, we could track __label__'s, since we know that you
1272   // can't jump to one from outside their declared region.
1273   if (isa<LabelStmt>(S))
1274     return true;
1275 
1276   // If this is a case/default statement, and we haven't seen a switch, we have
1277   // to emit the code.
1278   if (isa<SwitchCase>(S) && !IgnoreCaseStmts)
1279     return true;
1280 
1281   // If this is a switch statement, we want to ignore cases below it.
1282   if (isa<SwitchStmt>(S))
1283     IgnoreCaseStmts = true;
1284 
1285   // Scan subexpressions for verboten labels.
1286   for (const Stmt *SubStmt : S->children())
1287     if (ContainsLabel(SubStmt, IgnoreCaseStmts))
1288       return true;
1289 
1290   return false;
1291 }
1292 
1293 /// containsBreak - Return true if the statement contains a break out of it.
1294 /// If the statement (recursively) contains a switch or loop with a break
1295 /// inside of it, this is fine.
1296 bool CodeGenFunction::containsBreak(const Stmt *S) {
1297   // Null statement, not a label!
1298   if (!S) return false;
1299 
1300   // If this is a switch or loop that defines its own break scope, then we can
1301   // include it and anything inside of it.
1302   if (isa<SwitchStmt>(S) || isa<WhileStmt>(S) || isa<DoStmt>(S) ||
1303       isa<ForStmt>(S))
1304     return false;
1305 
1306   if (isa<BreakStmt>(S))
1307     return true;
1308 
1309   // Scan subexpressions for verboten breaks.
1310   for (const Stmt *SubStmt : S->children())
1311     if (containsBreak(SubStmt))
1312       return true;
1313 
1314   return false;
1315 }
1316 
1317 bool CodeGenFunction::mightAddDeclToScope(const Stmt *S) {
1318   if (!S) return false;
1319 
1320   // Some statement kinds add a scope and thus never add a decl to the current
1321   // scope. Note, this list is longer than the list of statements that might
1322   // have an unscoped decl nested within them, but this way is conservatively
1323   // correct even if more statement kinds are added.
1324   if (isa<IfStmt>(S) || isa<SwitchStmt>(S) || isa<WhileStmt>(S) ||
1325       isa<DoStmt>(S) || isa<ForStmt>(S) || isa<CompoundStmt>(S) ||
1326       isa<CXXForRangeStmt>(S) || isa<CXXTryStmt>(S) ||
1327       isa<ObjCForCollectionStmt>(S) || isa<ObjCAtTryStmt>(S))
1328     return false;
1329 
1330   if (isa<DeclStmt>(S))
1331     return true;
1332 
1333   for (const Stmt *SubStmt : S->children())
1334     if (mightAddDeclToScope(SubStmt))
1335       return true;
1336 
1337   return false;
1338 }
1339 
1340 /// ConstantFoldsToSimpleInteger - If the specified expression does not fold
1341 /// to a constant, or if it does but contains a label, return false.  If it
1342 /// constant folds return true and set the boolean result in Result.
1343 bool CodeGenFunction::ConstantFoldsToSimpleInteger(const Expr *Cond,
1344                                                    bool &ResultBool,
1345                                                    bool AllowLabels) {
1346   llvm::APSInt ResultInt;
1347   if (!ConstantFoldsToSimpleInteger(Cond, ResultInt, AllowLabels))
1348     return false;
1349 
1350   ResultBool = ResultInt.getBoolValue();
1351   return true;
1352 }
1353 
1354 /// ConstantFoldsToSimpleInteger - If the specified expression does not fold
1355 /// to a constant, or if it does but contains a label, return false.  If it
1356 /// constant folds return true and set the folded value.
1357 bool CodeGenFunction::ConstantFoldsToSimpleInteger(const Expr *Cond,
1358                                                    llvm::APSInt &ResultInt,
1359                                                    bool AllowLabels) {
1360   // FIXME: Rename and handle conversion of other evaluatable things
1361   // to bool.
1362   Expr::EvalResult Result;
1363   if (!Cond->EvaluateAsInt(Result, getContext()))
1364     return false;  // Not foldable, not integer or not fully evaluatable.
1365 
1366   llvm::APSInt Int = Result.Val.getInt();
1367   if (!AllowLabels && CodeGenFunction::ContainsLabel(Cond))
1368     return false;  // Contains a label.
1369 
1370   ResultInt = Int;
1371   return true;
1372 }
1373 
1374 
1375 
1376 /// EmitBranchOnBoolExpr - Emit a branch on a boolean condition (e.g. for an if
1377 /// statement) to the specified blocks.  Based on the condition, this might try
1378 /// to simplify the codegen of the conditional based on the branch.
1379 ///
1380 void CodeGenFunction::EmitBranchOnBoolExpr(const Expr *Cond,
1381                                            llvm::BasicBlock *TrueBlock,
1382                                            llvm::BasicBlock *FalseBlock,
1383                                            uint64_t TrueCount) {
1384   Cond = Cond->IgnoreParens();
1385 
1386   if (const BinaryOperator *CondBOp = dyn_cast<BinaryOperator>(Cond)) {
1387 
1388     // Handle X && Y in a condition.
1389     if (CondBOp->getOpcode() == BO_LAnd) {
1390       // If we have "1 && X", simplify the code.  "0 && X" would have constant
1391       // folded if the case was simple enough.
1392       bool ConstantBool = false;
1393       if (ConstantFoldsToSimpleInteger(CondBOp->getLHS(), ConstantBool) &&
1394           ConstantBool) {
1395         // br(1 && X) -> br(X).
1396         incrementProfileCounter(CondBOp);
1397         return EmitBranchOnBoolExpr(CondBOp->getRHS(), TrueBlock, FalseBlock,
1398                                     TrueCount);
1399       }
1400 
1401       // If we have "X && 1", simplify the code to use an uncond branch.
1402       // "X && 0" would have been constant folded to 0.
1403       if (ConstantFoldsToSimpleInteger(CondBOp->getRHS(), ConstantBool) &&
1404           ConstantBool) {
1405         // br(X && 1) -> br(X).
1406         return EmitBranchOnBoolExpr(CondBOp->getLHS(), TrueBlock, FalseBlock,
1407                                     TrueCount);
1408       }
1409 
1410       // Emit the LHS as a conditional.  If the LHS conditional is false, we
1411       // want to jump to the FalseBlock.
1412       llvm::BasicBlock *LHSTrue = createBasicBlock("land.lhs.true");
1413       // The counter tells us how often we evaluate RHS, and all of TrueCount
1414       // can be propagated to that branch.
1415       uint64_t RHSCount = getProfileCount(CondBOp->getRHS());
1416 
1417       ConditionalEvaluation eval(*this);
1418       {
1419         ApplyDebugLocation DL(*this, Cond);
1420         EmitBranchOnBoolExpr(CondBOp->getLHS(), LHSTrue, FalseBlock, RHSCount);
1421         EmitBlock(LHSTrue);
1422       }
1423 
1424       incrementProfileCounter(CondBOp);
1425       setCurrentProfileCount(getProfileCount(CondBOp->getRHS()));
1426 
1427       // Any temporaries created here are conditional.
1428       eval.begin(*this);
1429       EmitBranchOnBoolExpr(CondBOp->getRHS(), TrueBlock, FalseBlock, TrueCount);
1430       eval.end(*this);
1431 
1432       return;
1433     }
1434 
1435     if (CondBOp->getOpcode() == BO_LOr) {
1436       // If we have "0 || X", simplify the code.  "1 || X" would have constant
1437       // folded if the case was simple enough.
1438       bool ConstantBool = false;
1439       if (ConstantFoldsToSimpleInteger(CondBOp->getLHS(), ConstantBool) &&
1440           !ConstantBool) {
1441         // br(0 || X) -> br(X).
1442         incrementProfileCounter(CondBOp);
1443         return EmitBranchOnBoolExpr(CondBOp->getRHS(), TrueBlock, FalseBlock,
1444                                     TrueCount);
1445       }
1446 
1447       // If we have "X || 0", simplify the code to use an uncond branch.
1448       // "X || 1" would have been constant folded to 1.
1449       if (ConstantFoldsToSimpleInteger(CondBOp->getRHS(), ConstantBool) &&
1450           !ConstantBool) {
1451         // br(X || 0) -> br(X).
1452         return EmitBranchOnBoolExpr(CondBOp->getLHS(), TrueBlock, FalseBlock,
1453                                     TrueCount);
1454       }
1455 
1456       // Emit the LHS as a conditional.  If the LHS conditional is true, we
1457       // want to jump to the TrueBlock.
1458       llvm::BasicBlock *LHSFalse = createBasicBlock("lor.lhs.false");
1459       // We have the count for entry to the RHS and for the whole expression
1460       // being true, so we can divy up True count between the short circuit and
1461       // the RHS.
1462       uint64_t LHSCount =
1463           getCurrentProfileCount() - getProfileCount(CondBOp->getRHS());
1464       uint64_t RHSCount = TrueCount - LHSCount;
1465 
1466       ConditionalEvaluation eval(*this);
1467       {
1468         ApplyDebugLocation DL(*this, Cond);
1469         EmitBranchOnBoolExpr(CondBOp->getLHS(), TrueBlock, LHSFalse, LHSCount);
1470         EmitBlock(LHSFalse);
1471       }
1472 
1473       incrementProfileCounter(CondBOp);
1474       setCurrentProfileCount(getProfileCount(CondBOp->getRHS()));
1475 
1476       // Any temporaries created here are conditional.
1477       eval.begin(*this);
1478       EmitBranchOnBoolExpr(CondBOp->getRHS(), TrueBlock, FalseBlock, RHSCount);
1479 
1480       eval.end(*this);
1481 
1482       return;
1483     }
1484   }
1485 
1486   if (const UnaryOperator *CondUOp = dyn_cast<UnaryOperator>(Cond)) {
1487     // br(!x, t, f) -> br(x, f, t)
1488     if (CondUOp->getOpcode() == UO_LNot) {
1489       // Negate the count.
1490       uint64_t FalseCount = getCurrentProfileCount() - TrueCount;
1491       // Negate the condition and swap the destination blocks.
1492       return EmitBranchOnBoolExpr(CondUOp->getSubExpr(), FalseBlock, TrueBlock,
1493                                   FalseCount);
1494     }
1495   }
1496 
1497   if (const ConditionalOperator *CondOp = dyn_cast<ConditionalOperator>(Cond)) {
1498     // br(c ? x : y, t, f) -> br(c, br(x, t, f), br(y, t, f))
1499     llvm::BasicBlock *LHSBlock = createBasicBlock("cond.true");
1500     llvm::BasicBlock *RHSBlock = createBasicBlock("cond.false");
1501 
1502     ConditionalEvaluation cond(*this);
1503     EmitBranchOnBoolExpr(CondOp->getCond(), LHSBlock, RHSBlock,
1504                          getProfileCount(CondOp));
1505 
1506     // When computing PGO branch weights, we only know the overall count for
1507     // the true block. This code is essentially doing tail duplication of the
1508     // naive code-gen, introducing new edges for which counts are not
1509     // available. Divide the counts proportionally between the LHS and RHS of
1510     // the conditional operator.
1511     uint64_t LHSScaledTrueCount = 0;
1512     if (TrueCount) {
1513       double LHSRatio =
1514           getProfileCount(CondOp) / (double)getCurrentProfileCount();
1515       LHSScaledTrueCount = TrueCount * LHSRatio;
1516     }
1517 
1518     cond.begin(*this);
1519     EmitBlock(LHSBlock);
1520     incrementProfileCounter(CondOp);
1521     {
1522       ApplyDebugLocation DL(*this, Cond);
1523       EmitBranchOnBoolExpr(CondOp->getLHS(), TrueBlock, FalseBlock,
1524                            LHSScaledTrueCount);
1525     }
1526     cond.end(*this);
1527 
1528     cond.begin(*this);
1529     EmitBlock(RHSBlock);
1530     EmitBranchOnBoolExpr(CondOp->getRHS(), TrueBlock, FalseBlock,
1531                          TrueCount - LHSScaledTrueCount);
1532     cond.end(*this);
1533 
1534     return;
1535   }
1536 
1537   if (const CXXThrowExpr *Throw = dyn_cast<CXXThrowExpr>(Cond)) {
1538     // Conditional operator handling can give us a throw expression as a
1539     // condition for a case like:
1540     //   br(c ? throw x : y, t, f) -> br(c, br(throw x, t, f), br(y, t, f)
1541     // Fold this to:
1542     //   br(c, throw x, br(y, t, f))
1543     EmitCXXThrowExpr(Throw, /*KeepInsertionPoint*/false);
1544     return;
1545   }
1546 
1547   // If the branch has a condition wrapped by __builtin_unpredictable,
1548   // create metadata that specifies that the branch is unpredictable.
1549   // Don't bother if not optimizing because that metadata would not be used.
1550   llvm::MDNode *Unpredictable = nullptr;
1551   auto *Call = dyn_cast<CallExpr>(Cond->IgnoreImpCasts());
1552   if (Call && CGM.getCodeGenOpts().OptimizationLevel != 0) {
1553     auto *FD = dyn_cast_or_null<FunctionDecl>(Call->getCalleeDecl());
1554     if (FD && FD->getBuiltinID() == Builtin::BI__builtin_unpredictable) {
1555       llvm::MDBuilder MDHelper(getLLVMContext());
1556       Unpredictable = MDHelper.createUnpredictable();
1557     }
1558   }
1559 
1560   // Create branch weights based on the number of times we get here and the
1561   // number of times the condition should be true.
1562   uint64_t CurrentCount = std::max(getCurrentProfileCount(), TrueCount);
1563   llvm::MDNode *Weights =
1564       createProfileWeights(TrueCount, CurrentCount - TrueCount);
1565 
1566   // Emit the code with the fully general case.
1567   llvm::Value *CondV;
1568   {
1569     ApplyDebugLocation DL(*this, Cond);
1570     CondV = EvaluateExprAsBool(Cond);
1571   }
1572   Builder.CreateCondBr(CondV, TrueBlock, FalseBlock, Weights, Unpredictable);
1573 }
1574 
1575 /// ErrorUnsupported - Print out an error that codegen doesn't support the
1576 /// specified stmt yet.
1577 void CodeGenFunction::ErrorUnsupported(const Stmt *S, const char *Type) {
1578   CGM.ErrorUnsupported(S, Type);
1579 }
1580 
1581 /// emitNonZeroVLAInit - Emit the "zero" initialization of a
1582 /// variable-length array whose elements have a non-zero bit-pattern.
1583 ///
1584 /// \param baseType the inner-most element type of the array
1585 /// \param src - a char* pointing to the bit-pattern for a single
1586 /// base element of the array
1587 /// \param sizeInChars - the total size of the VLA, in chars
1588 static void emitNonZeroVLAInit(CodeGenFunction &CGF, QualType baseType,
1589                                Address dest, Address src,
1590                                llvm::Value *sizeInChars) {
1591   CGBuilderTy &Builder = CGF.Builder;
1592 
1593   CharUnits baseSize = CGF.getContext().getTypeSizeInChars(baseType);
1594   llvm::Value *baseSizeInChars
1595     = llvm::ConstantInt::get(CGF.IntPtrTy, baseSize.getQuantity());
1596 
1597   Address begin =
1598     Builder.CreateElementBitCast(dest, CGF.Int8Ty, "vla.begin");
1599   llvm::Value *end =
1600     Builder.CreateInBoundsGEP(begin.getPointer(), sizeInChars, "vla.end");
1601 
1602   llvm::BasicBlock *originBB = CGF.Builder.GetInsertBlock();
1603   llvm::BasicBlock *loopBB = CGF.createBasicBlock("vla-init.loop");
1604   llvm::BasicBlock *contBB = CGF.createBasicBlock("vla-init.cont");
1605 
1606   // Make a loop over the VLA.  C99 guarantees that the VLA element
1607   // count must be nonzero.
1608   CGF.EmitBlock(loopBB);
1609 
1610   llvm::PHINode *cur = Builder.CreatePHI(begin.getType(), 2, "vla.cur");
1611   cur->addIncoming(begin.getPointer(), originBB);
1612 
1613   CharUnits curAlign =
1614     dest.getAlignment().alignmentOfArrayElement(baseSize);
1615 
1616   // memcpy the individual element bit-pattern.
1617   Builder.CreateMemCpy(Address(cur, curAlign), src, baseSizeInChars,
1618                        /*volatile*/ false);
1619 
1620   // Go to the next element.
1621   llvm::Value *next =
1622     Builder.CreateInBoundsGEP(CGF.Int8Ty, cur, baseSizeInChars, "vla.next");
1623 
1624   // Leave if that's the end of the VLA.
1625   llvm::Value *done = Builder.CreateICmpEQ(next, end, "vla-init.isdone");
1626   Builder.CreateCondBr(done, contBB, loopBB);
1627   cur->addIncoming(next, loopBB);
1628 
1629   CGF.EmitBlock(contBB);
1630 }
1631 
1632 void
1633 CodeGenFunction::EmitNullInitialization(Address DestPtr, QualType Ty) {
1634   // Ignore empty classes in C++.
1635   if (getLangOpts().CPlusPlus) {
1636     if (const RecordType *RT = Ty->getAs<RecordType>()) {
1637       if (cast<CXXRecordDecl>(RT->getDecl())->isEmpty())
1638         return;
1639     }
1640   }
1641 
1642   // Cast the dest ptr to the appropriate i8 pointer type.
1643   if (DestPtr.getElementType() != Int8Ty)
1644     DestPtr = Builder.CreateElementBitCast(DestPtr, Int8Ty);
1645 
1646   // Get size and alignment info for this aggregate.
1647   CharUnits size = getContext().getTypeSizeInChars(Ty);
1648 
1649   llvm::Value *SizeVal;
1650   const VariableArrayType *vla;
1651 
1652   // Don't bother emitting a zero-byte memset.
1653   if (size.isZero()) {
1654     // But note that getTypeInfo returns 0 for a VLA.
1655     if (const VariableArrayType *vlaType =
1656           dyn_cast_or_null<VariableArrayType>(
1657                                           getContext().getAsArrayType(Ty))) {
1658       auto VlaSize = getVLASize(vlaType);
1659       SizeVal = VlaSize.NumElts;
1660       CharUnits eltSize = getContext().getTypeSizeInChars(VlaSize.Type);
1661       if (!eltSize.isOne())
1662         SizeVal = Builder.CreateNUWMul(SizeVal, CGM.getSize(eltSize));
1663       vla = vlaType;
1664     } else {
1665       return;
1666     }
1667   } else {
1668     SizeVal = CGM.getSize(size);
1669     vla = nullptr;
1670   }
1671 
1672   // If the type contains a pointer to data member we can't memset it to zero.
1673   // Instead, create a null constant and copy it to the destination.
1674   // TODO: there are other patterns besides zero that we can usefully memset,
1675   // like -1, which happens to be the pattern used by member-pointers.
1676   if (!CGM.getTypes().isZeroInitializable(Ty)) {
1677     // For a VLA, emit a single element, then splat that over the VLA.
1678     if (vla) Ty = getContext().getBaseElementType(vla);
1679 
1680     llvm::Constant *NullConstant = CGM.EmitNullConstant(Ty);
1681 
1682     llvm::GlobalVariable *NullVariable =
1683       new llvm::GlobalVariable(CGM.getModule(), NullConstant->getType(),
1684                                /*isConstant=*/true,
1685                                llvm::GlobalVariable::PrivateLinkage,
1686                                NullConstant, Twine());
1687     CharUnits NullAlign = DestPtr.getAlignment();
1688     NullVariable->setAlignment(NullAlign.getAsAlign());
1689     Address SrcPtr(Builder.CreateBitCast(NullVariable, Builder.getInt8PtrTy()),
1690                    NullAlign);
1691 
1692     if (vla) return emitNonZeroVLAInit(*this, Ty, DestPtr, SrcPtr, SizeVal);
1693 
1694     // Get and call the appropriate llvm.memcpy overload.
1695     Builder.CreateMemCpy(DestPtr, SrcPtr, SizeVal, false);
1696     return;
1697   }
1698 
1699   // Otherwise, just memset the whole thing to zero.  This is legal
1700   // because in LLVM, all default initializers (other than the ones we just
1701   // handled above) are guaranteed to have a bit pattern of all zeros.
1702   Builder.CreateMemSet(DestPtr, Builder.getInt8(0), SizeVal, false);
1703 }
1704 
1705 llvm::BlockAddress *CodeGenFunction::GetAddrOfLabel(const LabelDecl *L) {
1706   // Make sure that there is a block for the indirect goto.
1707   if (!IndirectBranch)
1708     GetIndirectGotoBlock();
1709 
1710   llvm::BasicBlock *BB = getJumpDestForLabel(L).getBlock();
1711 
1712   // Make sure the indirect branch includes all of the address-taken blocks.
1713   IndirectBranch->addDestination(BB);
1714   return llvm::BlockAddress::get(CurFn, BB);
1715 }
1716 
1717 llvm::BasicBlock *CodeGenFunction::GetIndirectGotoBlock() {
1718   // If we already made the indirect branch for indirect goto, return its block.
1719   if (IndirectBranch) return IndirectBranch->getParent();
1720 
1721   CGBuilderTy TmpBuilder(*this, createBasicBlock("indirectgoto"));
1722 
1723   // Create the PHI node that indirect gotos will add entries to.
1724   llvm::Value *DestVal = TmpBuilder.CreatePHI(Int8PtrTy, 0,
1725                                               "indirect.goto.dest");
1726 
1727   // Create the indirect branch instruction.
1728   IndirectBranch = TmpBuilder.CreateIndirectBr(DestVal);
1729   return IndirectBranch->getParent();
1730 }
1731 
1732 /// Computes the length of an array in elements, as well as the base
1733 /// element type and a properly-typed first element pointer.
1734 llvm::Value *CodeGenFunction::emitArrayLength(const ArrayType *origArrayType,
1735                                               QualType &baseType,
1736                                               Address &addr) {
1737   const ArrayType *arrayType = origArrayType;
1738 
1739   // If it's a VLA, we have to load the stored size.  Note that
1740   // this is the size of the VLA in bytes, not its size in elements.
1741   llvm::Value *numVLAElements = nullptr;
1742   if (isa<VariableArrayType>(arrayType)) {
1743     numVLAElements = getVLASize(cast<VariableArrayType>(arrayType)).NumElts;
1744 
1745     // Walk into all VLAs.  This doesn't require changes to addr,
1746     // which has type T* where T is the first non-VLA element type.
1747     do {
1748       QualType elementType = arrayType->getElementType();
1749       arrayType = getContext().getAsArrayType(elementType);
1750 
1751       // If we only have VLA components, 'addr' requires no adjustment.
1752       if (!arrayType) {
1753         baseType = elementType;
1754         return numVLAElements;
1755       }
1756     } while (isa<VariableArrayType>(arrayType));
1757 
1758     // We get out here only if we find a constant array type
1759     // inside the VLA.
1760   }
1761 
1762   // We have some number of constant-length arrays, so addr should
1763   // have LLVM type [M x [N x [...]]]*.  Build a GEP that walks
1764   // down to the first element of addr.
1765   SmallVector<llvm::Value*, 8> gepIndices;
1766 
1767   // GEP down to the array type.
1768   llvm::ConstantInt *zero = Builder.getInt32(0);
1769   gepIndices.push_back(zero);
1770 
1771   uint64_t countFromCLAs = 1;
1772   QualType eltType;
1773 
1774   llvm::ArrayType *llvmArrayType =
1775     dyn_cast<llvm::ArrayType>(addr.getElementType());
1776   while (llvmArrayType) {
1777     assert(isa<ConstantArrayType>(arrayType));
1778     assert(cast<ConstantArrayType>(arrayType)->getSize().getZExtValue()
1779              == llvmArrayType->getNumElements());
1780 
1781     gepIndices.push_back(zero);
1782     countFromCLAs *= llvmArrayType->getNumElements();
1783     eltType = arrayType->getElementType();
1784 
1785     llvmArrayType =
1786       dyn_cast<llvm::ArrayType>(llvmArrayType->getElementType());
1787     arrayType = getContext().getAsArrayType(arrayType->getElementType());
1788     assert((!llvmArrayType || arrayType) &&
1789            "LLVM and Clang types are out-of-synch");
1790   }
1791 
1792   if (arrayType) {
1793     // From this point onwards, the Clang array type has been emitted
1794     // as some other type (probably a packed struct). Compute the array
1795     // size, and just emit the 'begin' expression as a bitcast.
1796     while (arrayType) {
1797       countFromCLAs *=
1798           cast<ConstantArrayType>(arrayType)->getSize().getZExtValue();
1799       eltType = arrayType->getElementType();
1800       arrayType = getContext().getAsArrayType(eltType);
1801     }
1802 
1803     llvm::Type *baseType = ConvertType(eltType);
1804     addr = Builder.CreateElementBitCast(addr, baseType, "array.begin");
1805   } else {
1806     // Create the actual GEP.
1807     addr = Address(Builder.CreateInBoundsGEP(addr.getPointer(),
1808                                              gepIndices, "array.begin"),
1809                    addr.getAlignment());
1810   }
1811 
1812   baseType = eltType;
1813 
1814   llvm::Value *numElements
1815     = llvm::ConstantInt::get(SizeTy, countFromCLAs);
1816 
1817   // If we had any VLA dimensions, factor them in.
1818   if (numVLAElements)
1819     numElements = Builder.CreateNUWMul(numVLAElements, numElements);
1820 
1821   return numElements;
1822 }
1823 
1824 CodeGenFunction::VlaSizePair CodeGenFunction::getVLASize(QualType type) {
1825   const VariableArrayType *vla = getContext().getAsVariableArrayType(type);
1826   assert(vla && "type was not a variable array type!");
1827   return getVLASize(vla);
1828 }
1829 
1830 CodeGenFunction::VlaSizePair
1831 CodeGenFunction::getVLASize(const VariableArrayType *type) {
1832   // The number of elements so far; always size_t.
1833   llvm::Value *numElements = nullptr;
1834 
1835   QualType elementType;
1836   do {
1837     elementType = type->getElementType();
1838     llvm::Value *vlaSize = VLASizeMap[type->getSizeExpr()];
1839     assert(vlaSize && "no size for VLA!");
1840     assert(vlaSize->getType() == SizeTy);
1841 
1842     if (!numElements) {
1843       numElements = vlaSize;
1844     } else {
1845       // It's undefined behavior if this wraps around, so mark it that way.
1846       // FIXME: Teach -fsanitize=undefined to trap this.
1847       numElements = Builder.CreateNUWMul(numElements, vlaSize);
1848     }
1849   } while ((type = getContext().getAsVariableArrayType(elementType)));
1850 
1851   return { numElements, elementType };
1852 }
1853 
1854 CodeGenFunction::VlaSizePair
1855 CodeGenFunction::getVLAElements1D(QualType type) {
1856   const VariableArrayType *vla = getContext().getAsVariableArrayType(type);
1857   assert(vla && "type was not a variable array type!");
1858   return getVLAElements1D(vla);
1859 }
1860 
1861 CodeGenFunction::VlaSizePair
1862 CodeGenFunction::getVLAElements1D(const VariableArrayType *Vla) {
1863   llvm::Value *VlaSize = VLASizeMap[Vla->getSizeExpr()];
1864   assert(VlaSize && "no size for VLA!");
1865   assert(VlaSize->getType() == SizeTy);
1866   return { VlaSize, Vla->getElementType() };
1867 }
1868 
1869 void CodeGenFunction::EmitVariablyModifiedType(QualType type) {
1870   assert(type->isVariablyModifiedType() &&
1871          "Must pass variably modified type to EmitVLASizes!");
1872 
1873   EnsureInsertPoint();
1874 
1875   // We're going to walk down into the type and look for VLA
1876   // expressions.
1877   do {
1878     assert(type->isVariablyModifiedType());
1879 
1880     const Type *ty = type.getTypePtr();
1881     switch (ty->getTypeClass()) {
1882 
1883 #define TYPE(Class, Base)
1884 #define ABSTRACT_TYPE(Class, Base)
1885 #define NON_CANONICAL_TYPE(Class, Base)
1886 #define DEPENDENT_TYPE(Class, Base) case Type::Class:
1887 #define NON_CANONICAL_UNLESS_DEPENDENT_TYPE(Class, Base)
1888 #include "clang/AST/TypeNodes.inc"
1889       llvm_unreachable("unexpected dependent type!");
1890 
1891     // These types are never variably-modified.
1892     case Type::Builtin:
1893     case Type::Complex:
1894     case Type::Vector:
1895     case Type::ExtVector:
1896     case Type::Record:
1897     case Type::Enum:
1898     case Type::Elaborated:
1899     case Type::TemplateSpecialization:
1900     case Type::ObjCTypeParam:
1901     case Type::ObjCObject:
1902     case Type::ObjCInterface:
1903     case Type::ObjCObjectPointer:
1904       llvm_unreachable("type class is never variably-modified!");
1905 
1906     case Type::Adjusted:
1907       type = cast<AdjustedType>(ty)->getAdjustedType();
1908       break;
1909 
1910     case Type::Decayed:
1911       type = cast<DecayedType>(ty)->getPointeeType();
1912       break;
1913 
1914     case Type::Pointer:
1915       type = cast<PointerType>(ty)->getPointeeType();
1916       break;
1917 
1918     case Type::BlockPointer:
1919       type = cast<BlockPointerType>(ty)->getPointeeType();
1920       break;
1921 
1922     case Type::LValueReference:
1923     case Type::RValueReference:
1924       type = cast<ReferenceType>(ty)->getPointeeType();
1925       break;
1926 
1927     case Type::MemberPointer:
1928       type = cast<MemberPointerType>(ty)->getPointeeType();
1929       break;
1930 
1931     case Type::ConstantArray:
1932     case Type::IncompleteArray:
1933       // Losing element qualification here is fine.
1934       type = cast<ArrayType>(ty)->getElementType();
1935       break;
1936 
1937     case Type::VariableArray: {
1938       // Losing element qualification here is fine.
1939       const VariableArrayType *vat = cast<VariableArrayType>(ty);
1940 
1941       // Unknown size indication requires no size computation.
1942       // Otherwise, evaluate and record it.
1943       if (const Expr *size = vat->getSizeExpr()) {
1944         // It's possible that we might have emitted this already,
1945         // e.g. with a typedef and a pointer to it.
1946         llvm::Value *&entry = VLASizeMap[size];
1947         if (!entry) {
1948           llvm::Value *Size = EmitScalarExpr(size);
1949 
1950           // C11 6.7.6.2p5:
1951           //   If the size is an expression that is not an integer constant
1952           //   expression [...] each time it is evaluated it shall have a value
1953           //   greater than zero.
1954           if (SanOpts.has(SanitizerKind::VLABound) &&
1955               size->getType()->isSignedIntegerType()) {
1956             SanitizerScope SanScope(this);
1957             llvm::Value *Zero = llvm::Constant::getNullValue(Size->getType());
1958             llvm::Constant *StaticArgs[] = {
1959                 EmitCheckSourceLocation(size->getBeginLoc()),
1960                 EmitCheckTypeDescriptor(size->getType())};
1961             EmitCheck(std::make_pair(Builder.CreateICmpSGT(Size, Zero),
1962                                      SanitizerKind::VLABound),
1963                       SanitizerHandler::VLABoundNotPositive, StaticArgs, Size);
1964           }
1965 
1966           // Always zexting here would be wrong if it weren't
1967           // undefined behavior to have a negative bound.
1968           entry = Builder.CreateIntCast(Size, SizeTy, /*signed*/ false);
1969         }
1970       }
1971       type = vat->getElementType();
1972       break;
1973     }
1974 
1975     case Type::FunctionProto:
1976     case Type::FunctionNoProto:
1977       type = cast<FunctionType>(ty)->getReturnType();
1978       break;
1979 
1980     case Type::Paren:
1981     case Type::TypeOf:
1982     case Type::UnaryTransform:
1983     case Type::Attributed:
1984     case Type::SubstTemplateTypeParm:
1985     case Type::PackExpansion:
1986     case Type::MacroQualified:
1987       // Keep walking after single level desugaring.
1988       type = type.getSingleStepDesugaredType(getContext());
1989       break;
1990 
1991     case Type::Typedef:
1992     case Type::Decltype:
1993     case Type::Auto:
1994     case Type::DeducedTemplateSpecialization:
1995       // Stop walking: nothing to do.
1996       return;
1997 
1998     case Type::TypeOfExpr:
1999       // Stop walking: emit typeof expression.
2000       EmitIgnoredExpr(cast<TypeOfExprType>(ty)->getUnderlyingExpr());
2001       return;
2002 
2003     case Type::Atomic:
2004       type = cast<AtomicType>(ty)->getValueType();
2005       break;
2006 
2007     case Type::Pipe:
2008       type = cast<PipeType>(ty)->getElementType();
2009       break;
2010     }
2011   } while (type->isVariablyModifiedType());
2012 }
2013 
2014 Address CodeGenFunction::EmitVAListRef(const Expr* E) {
2015   if (getContext().getBuiltinVaListType()->isArrayType())
2016     return EmitPointerWithAlignment(E);
2017   return EmitLValue(E).getAddress();
2018 }
2019 
2020 Address CodeGenFunction::EmitMSVAListRef(const Expr *E) {
2021   return EmitLValue(E).getAddress();
2022 }
2023 
2024 void CodeGenFunction::EmitDeclRefExprDbgValue(const DeclRefExpr *E,
2025                                               const APValue &Init) {
2026   assert(Init.hasValue() && "Invalid DeclRefExpr initializer!");
2027   if (CGDebugInfo *Dbg = getDebugInfo())
2028     if (CGM.getCodeGenOpts().getDebugInfo() >= codegenoptions::LimitedDebugInfo)
2029       Dbg->EmitGlobalVariable(E->getDecl(), Init);
2030 }
2031 
2032 CodeGenFunction::PeepholeProtection
2033 CodeGenFunction::protectFromPeepholes(RValue rvalue) {
2034   // At the moment, the only aggressive peephole we do in IR gen
2035   // is trunc(zext) folding, but if we add more, we can easily
2036   // extend this protection.
2037 
2038   if (!rvalue.isScalar()) return PeepholeProtection();
2039   llvm::Value *value = rvalue.getScalarVal();
2040   if (!isa<llvm::ZExtInst>(value)) return PeepholeProtection();
2041 
2042   // Just make an extra bitcast.
2043   assert(HaveInsertPoint());
2044   llvm::Instruction *inst = new llvm::BitCastInst(value, value->getType(), "",
2045                                                   Builder.GetInsertBlock());
2046 
2047   PeepholeProtection protection;
2048   protection.Inst = inst;
2049   return protection;
2050 }
2051 
2052 void CodeGenFunction::unprotectFromPeepholes(PeepholeProtection protection) {
2053   if (!protection.Inst) return;
2054 
2055   // In theory, we could try to duplicate the peepholes now, but whatever.
2056   protection.Inst->eraseFromParent();
2057 }
2058 
2059 void CodeGenFunction::EmitAlignmentAssumption(llvm::Value *PtrValue,
2060                                               QualType Ty, SourceLocation Loc,
2061                                               SourceLocation AssumptionLoc,
2062                                               llvm::Value *Alignment,
2063                                               llvm::Value *OffsetValue) {
2064   llvm::Value *TheCheck;
2065   llvm::Instruction *Assumption = Builder.CreateAlignmentAssumption(
2066       CGM.getDataLayout(), PtrValue, Alignment, OffsetValue, &TheCheck);
2067   if (SanOpts.has(SanitizerKind::Alignment)) {
2068     EmitAlignmentAssumptionCheck(PtrValue, Ty, Loc, AssumptionLoc, Alignment,
2069                                  OffsetValue, TheCheck, Assumption);
2070   }
2071 }
2072 
2073 void CodeGenFunction::EmitAlignmentAssumption(llvm::Value *PtrValue,
2074                                               const Expr *E,
2075                                               SourceLocation AssumptionLoc,
2076                                               llvm::Value *Alignment,
2077                                               llvm::Value *OffsetValue) {
2078   if (auto *CE = dyn_cast<CastExpr>(E))
2079     E = CE->getSubExprAsWritten();
2080   QualType Ty = E->getType();
2081   SourceLocation Loc = E->getExprLoc();
2082 
2083   EmitAlignmentAssumption(PtrValue, Ty, Loc, AssumptionLoc, Alignment,
2084                           OffsetValue);
2085 }
2086 
2087 llvm::Value *CodeGenFunction::EmitAnnotationCall(llvm::Function *AnnotationFn,
2088                                                  llvm::Value *AnnotatedVal,
2089                                                  StringRef AnnotationStr,
2090                                                  SourceLocation Location) {
2091   llvm::Value *Args[4] = {
2092     AnnotatedVal,
2093     Builder.CreateBitCast(CGM.EmitAnnotationString(AnnotationStr), Int8PtrTy),
2094     Builder.CreateBitCast(CGM.EmitAnnotationUnit(Location), Int8PtrTy),
2095     CGM.EmitAnnotationLineNo(Location)
2096   };
2097   return Builder.CreateCall(AnnotationFn, Args);
2098 }
2099 
2100 void CodeGenFunction::EmitVarAnnotations(const VarDecl *D, llvm::Value *V) {
2101   assert(D->hasAttr<AnnotateAttr>() && "no annotate attribute");
2102   // FIXME We create a new bitcast for every annotation because that's what
2103   // llvm-gcc was doing.
2104   for (const auto *I : D->specific_attrs<AnnotateAttr>())
2105     EmitAnnotationCall(CGM.getIntrinsic(llvm::Intrinsic::var_annotation),
2106                        Builder.CreateBitCast(V, CGM.Int8PtrTy, V->getName()),
2107                        I->getAnnotation(), D->getLocation());
2108 }
2109 
2110 Address CodeGenFunction::EmitFieldAnnotations(const FieldDecl *D,
2111                                               Address Addr) {
2112   assert(D->hasAttr<AnnotateAttr>() && "no annotate attribute");
2113   llvm::Value *V = Addr.getPointer();
2114   llvm::Type *VTy = V->getType();
2115   llvm::Function *F = CGM.getIntrinsic(llvm::Intrinsic::ptr_annotation,
2116                                     CGM.Int8PtrTy);
2117 
2118   for (const auto *I : D->specific_attrs<AnnotateAttr>()) {
2119     // FIXME Always emit the cast inst so we can differentiate between
2120     // annotation on the first field of a struct and annotation on the struct
2121     // itself.
2122     if (VTy != CGM.Int8PtrTy)
2123       V = Builder.CreateBitCast(V, CGM.Int8PtrTy);
2124     V = EmitAnnotationCall(F, V, I->getAnnotation(), D->getLocation());
2125     V = Builder.CreateBitCast(V, VTy);
2126   }
2127 
2128   return Address(V, Addr.getAlignment());
2129 }
2130 
2131 CodeGenFunction::CGCapturedStmtInfo::~CGCapturedStmtInfo() { }
2132 
2133 CodeGenFunction::SanitizerScope::SanitizerScope(CodeGenFunction *CGF)
2134     : CGF(CGF) {
2135   assert(!CGF->IsSanitizerScope);
2136   CGF->IsSanitizerScope = true;
2137 }
2138 
2139 CodeGenFunction::SanitizerScope::~SanitizerScope() {
2140   CGF->IsSanitizerScope = false;
2141 }
2142 
2143 void CodeGenFunction::InsertHelper(llvm::Instruction *I,
2144                                    const llvm::Twine &Name,
2145                                    llvm::BasicBlock *BB,
2146                                    llvm::BasicBlock::iterator InsertPt) const {
2147   LoopStack.InsertHelper(I);
2148   if (IsSanitizerScope)
2149     CGM.getSanitizerMetadata()->disableSanitizerForInstruction(I);
2150 }
2151 
2152 void CGBuilderInserter::InsertHelper(
2153     llvm::Instruction *I, const llvm::Twine &Name, llvm::BasicBlock *BB,
2154     llvm::BasicBlock::iterator InsertPt) const {
2155   llvm::IRBuilderDefaultInserter::InsertHelper(I, Name, BB, InsertPt);
2156   if (CGF)
2157     CGF->InsertHelper(I, Name, BB, InsertPt);
2158 }
2159 
2160 static bool hasRequiredFeatures(const SmallVectorImpl<StringRef> &ReqFeatures,
2161                                 CodeGenModule &CGM, const FunctionDecl *FD,
2162                                 std::string &FirstMissing) {
2163   // If there aren't any required features listed then go ahead and return.
2164   if (ReqFeatures.empty())
2165     return false;
2166 
2167   // Now build up the set of caller features and verify that all the required
2168   // features are there.
2169   llvm::StringMap<bool> CallerFeatureMap;
2170   CGM.getFunctionFeatureMap(CallerFeatureMap, GlobalDecl().getWithDecl(FD));
2171 
2172   // If we have at least one of the features in the feature list return
2173   // true, otherwise return false.
2174   return std::all_of(
2175       ReqFeatures.begin(), ReqFeatures.end(), [&](StringRef Feature) {
2176         SmallVector<StringRef, 1> OrFeatures;
2177         Feature.split(OrFeatures, '|');
2178         return llvm::any_of(OrFeatures, [&](StringRef Feature) {
2179           if (!CallerFeatureMap.lookup(Feature)) {
2180             FirstMissing = Feature.str();
2181             return false;
2182           }
2183           return true;
2184         });
2185       });
2186 }
2187 
2188 // Emits an error if we don't have a valid set of target features for the
2189 // called function.
2190 void CodeGenFunction::checkTargetFeatures(const CallExpr *E,
2191                                           const FunctionDecl *TargetDecl) {
2192   return checkTargetFeatures(E->getBeginLoc(), TargetDecl);
2193 }
2194 
2195 // Emits an error if we don't have a valid set of target features for the
2196 // called function.
2197 void CodeGenFunction::checkTargetFeatures(SourceLocation Loc,
2198                                           const FunctionDecl *TargetDecl) {
2199   // Early exit if this is an indirect call.
2200   if (!TargetDecl)
2201     return;
2202 
2203   // Get the current enclosing function if it exists. If it doesn't
2204   // we can't check the target features anyhow.
2205   const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(CurCodeDecl);
2206   if (!FD)
2207     return;
2208 
2209   // Grab the required features for the call. For a builtin this is listed in
2210   // the td file with the default cpu, for an always_inline function this is any
2211   // listed cpu and any listed features.
2212   unsigned BuiltinID = TargetDecl->getBuiltinID();
2213   std::string MissingFeature;
2214   if (BuiltinID) {
2215     SmallVector<StringRef, 1> ReqFeatures;
2216     const char *FeatureList =
2217         CGM.getContext().BuiltinInfo.getRequiredFeatures(BuiltinID);
2218     // Return if the builtin doesn't have any required features.
2219     if (!FeatureList || StringRef(FeatureList) == "")
2220       return;
2221     StringRef(FeatureList).split(ReqFeatures, ',');
2222     if (!hasRequiredFeatures(ReqFeatures, CGM, FD, MissingFeature))
2223       CGM.getDiags().Report(Loc, diag::err_builtin_needs_feature)
2224           << TargetDecl->getDeclName()
2225           << CGM.getContext().BuiltinInfo.getRequiredFeatures(BuiltinID);
2226 
2227   } else if (TargetDecl->hasAttr<TargetAttr>() ||
2228              TargetDecl->hasAttr<CPUSpecificAttr>()) {
2229     // Get the required features for the callee.
2230 
2231     const TargetAttr *TD = TargetDecl->getAttr<TargetAttr>();
2232     TargetAttr::ParsedTargetAttr ParsedAttr = CGM.filterFunctionTargetAttrs(TD);
2233 
2234     SmallVector<StringRef, 1> ReqFeatures;
2235     llvm::StringMap<bool> CalleeFeatureMap;
2236     CGM.getFunctionFeatureMap(CalleeFeatureMap, TargetDecl);
2237 
2238     for (const auto &F : ParsedAttr.Features) {
2239       if (F[0] == '+' && CalleeFeatureMap.lookup(F.substr(1)))
2240         ReqFeatures.push_back(StringRef(F).substr(1));
2241     }
2242 
2243     for (const auto &F : CalleeFeatureMap) {
2244       // Only positive features are "required".
2245       if (F.getValue())
2246         ReqFeatures.push_back(F.getKey());
2247     }
2248     if (!hasRequiredFeatures(ReqFeatures, CGM, FD, MissingFeature))
2249       CGM.getDiags().Report(Loc, diag::err_function_needs_feature)
2250           << FD->getDeclName() << TargetDecl->getDeclName() << MissingFeature;
2251   }
2252 }
2253 
2254 void CodeGenFunction::EmitSanitizerStatReport(llvm::SanitizerStatKind SSK) {
2255   if (!CGM.getCodeGenOpts().SanitizeStats)
2256     return;
2257 
2258   llvm::IRBuilder<> IRB(Builder.GetInsertBlock(), Builder.GetInsertPoint());
2259   IRB.SetCurrentDebugLocation(Builder.getCurrentDebugLocation());
2260   CGM.getSanStats().create(IRB, SSK);
2261 }
2262 
2263 llvm::Value *
2264 CodeGenFunction::FormResolverCondition(const MultiVersionResolverOption &RO) {
2265   llvm::Value *Condition = nullptr;
2266 
2267   if (!RO.Conditions.Architecture.empty())
2268     Condition = EmitX86CpuIs(RO.Conditions.Architecture);
2269 
2270   if (!RO.Conditions.Features.empty()) {
2271     llvm::Value *FeatureCond = EmitX86CpuSupports(RO.Conditions.Features);
2272     Condition =
2273         Condition ? Builder.CreateAnd(Condition, FeatureCond) : FeatureCond;
2274   }
2275   return Condition;
2276 }
2277 
2278 static void CreateMultiVersionResolverReturn(CodeGenModule &CGM,
2279                                              llvm::Function *Resolver,
2280                                              CGBuilderTy &Builder,
2281                                              llvm::Function *FuncToReturn,
2282                                              bool SupportsIFunc) {
2283   if (SupportsIFunc) {
2284     Builder.CreateRet(FuncToReturn);
2285     return;
2286   }
2287 
2288   llvm::SmallVector<llvm::Value *, 10> Args;
2289   llvm::for_each(Resolver->args(),
2290                  [&](llvm::Argument &Arg) { Args.push_back(&Arg); });
2291 
2292   llvm::CallInst *Result = Builder.CreateCall(FuncToReturn, Args);
2293   Result->setTailCallKind(llvm::CallInst::TCK_MustTail);
2294 
2295   if (Resolver->getReturnType()->isVoidTy())
2296     Builder.CreateRetVoid();
2297   else
2298     Builder.CreateRet(Result);
2299 }
2300 
2301 void CodeGenFunction::EmitMultiVersionResolver(
2302     llvm::Function *Resolver, ArrayRef<MultiVersionResolverOption> Options) {
2303   assert((getContext().getTargetInfo().getTriple().getArch() ==
2304               llvm::Triple::x86 ||
2305           getContext().getTargetInfo().getTriple().getArch() ==
2306               llvm::Triple::x86_64) &&
2307          "Only implemented for x86 targets");
2308 
2309   bool SupportsIFunc = getContext().getTargetInfo().supportsIFunc();
2310 
2311   // Main function's basic block.
2312   llvm::BasicBlock *CurBlock = createBasicBlock("resolver_entry", Resolver);
2313   Builder.SetInsertPoint(CurBlock);
2314   EmitX86CpuInit();
2315 
2316   for (const MultiVersionResolverOption &RO : Options) {
2317     Builder.SetInsertPoint(CurBlock);
2318     llvm::Value *Condition = FormResolverCondition(RO);
2319 
2320     // The 'default' or 'generic' case.
2321     if (!Condition) {
2322       assert(&RO == Options.end() - 1 &&
2323              "Default or Generic case must be last");
2324       CreateMultiVersionResolverReturn(CGM, Resolver, Builder, RO.Function,
2325                                        SupportsIFunc);
2326       return;
2327     }
2328 
2329     llvm::BasicBlock *RetBlock = createBasicBlock("resolver_return", Resolver);
2330     CGBuilderTy RetBuilder(*this, RetBlock);
2331     CreateMultiVersionResolverReturn(CGM, Resolver, RetBuilder, RO.Function,
2332                                      SupportsIFunc);
2333     CurBlock = createBasicBlock("resolver_else", Resolver);
2334     Builder.CreateCondBr(Condition, RetBlock, CurBlock);
2335   }
2336 
2337   // If no generic/default, emit an unreachable.
2338   Builder.SetInsertPoint(CurBlock);
2339   llvm::CallInst *TrapCall = EmitTrapCall(llvm::Intrinsic::trap);
2340   TrapCall->setDoesNotReturn();
2341   TrapCall->setDoesNotThrow();
2342   Builder.CreateUnreachable();
2343   Builder.ClearInsertionPoint();
2344 }
2345 
2346 // Loc - where the diagnostic will point, where in the source code this
2347 //  alignment has failed.
2348 // SecondaryLoc - if present (will be present if sufficiently different from
2349 //  Loc), the diagnostic will additionally point a "Note:" to this location.
2350 //  It should be the location where the __attribute__((assume_aligned))
2351 //  was written e.g.
2352 void CodeGenFunction::EmitAlignmentAssumptionCheck(
2353     llvm::Value *Ptr, QualType Ty, SourceLocation Loc,
2354     SourceLocation SecondaryLoc, llvm::Value *Alignment,
2355     llvm::Value *OffsetValue, llvm::Value *TheCheck,
2356     llvm::Instruction *Assumption) {
2357   assert(Assumption && isa<llvm::CallInst>(Assumption) &&
2358          cast<llvm::CallInst>(Assumption)->getCalledValue() ==
2359              llvm::Intrinsic::getDeclaration(
2360                  Builder.GetInsertBlock()->getParent()->getParent(),
2361                  llvm::Intrinsic::assume) &&
2362          "Assumption should be a call to llvm.assume().");
2363   assert(&(Builder.GetInsertBlock()->back()) == Assumption &&
2364          "Assumption should be the last instruction of the basic block, "
2365          "since the basic block is still being generated.");
2366 
2367   if (!SanOpts.has(SanitizerKind::Alignment))
2368     return;
2369 
2370   // Don't check pointers to volatile data. The behavior here is implementation-
2371   // defined.
2372   if (Ty->getPointeeType().isVolatileQualified())
2373     return;
2374 
2375   // We need to temorairly remove the assumption so we can insert the
2376   // sanitizer check before it, else the check will be dropped by optimizations.
2377   Assumption->removeFromParent();
2378 
2379   {
2380     SanitizerScope SanScope(this);
2381 
2382     if (!OffsetValue)
2383       OffsetValue = Builder.getInt1(0); // no offset.
2384 
2385     llvm::Constant *StaticData[] = {EmitCheckSourceLocation(Loc),
2386                                     EmitCheckSourceLocation(SecondaryLoc),
2387                                     EmitCheckTypeDescriptor(Ty)};
2388     llvm::Value *DynamicData[] = {EmitCheckValue(Ptr),
2389                                   EmitCheckValue(Alignment),
2390                                   EmitCheckValue(OffsetValue)};
2391     EmitCheck({std::make_pair(TheCheck, SanitizerKind::Alignment)},
2392               SanitizerHandler::AlignmentAssumption, StaticData, DynamicData);
2393   }
2394 
2395   // We are now in the (new, empty) "cont" basic block.
2396   // Reintroduce the assumption.
2397   Builder.Insert(Assumption);
2398   // FIXME: Assumption still has it's original basic block as it's Parent.
2399 }
2400 
2401 llvm::DebugLoc CodeGenFunction::SourceLocToDebugLoc(SourceLocation Location) {
2402   if (CGDebugInfo *DI = getDebugInfo())
2403     return DI->SourceLocToDebugLoc(Location);
2404 
2405   return llvm::DebugLoc();
2406 }
2407