1 //===--- CGExprScalar.cpp - Emit LLVM Code for Scalar Exprs ---------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This contains code to emit Expr nodes with scalar LLVM types as LLVM code.
10 //
11 //===----------------------------------------------------------------------===//
12 
13 #include "CGCXXABI.h"
14 #include "CGCleanup.h"
15 #include "CGDebugInfo.h"
16 #include "CGObjCRuntime.h"
17 #include "CGOpenMPRuntime.h"
18 #include "CodeGenFunction.h"
19 #include "CodeGenModule.h"
20 #include "ConstantEmitter.h"
21 #include "TargetInfo.h"
22 #include "clang/AST/ASTContext.h"
23 #include "clang/AST/Attr.h"
24 #include "clang/AST/DeclObjC.h"
25 #include "clang/AST/Expr.h"
26 #include "clang/AST/RecordLayout.h"
27 #include "clang/AST/StmtVisitor.h"
28 #include "clang/Basic/CodeGenOptions.h"
29 #include "clang/Basic/TargetInfo.h"
30 #include "llvm/ADT/APFixedPoint.h"
31 #include "llvm/ADT/Optional.h"
32 #include "llvm/IR/CFG.h"
33 #include "llvm/IR/Constants.h"
34 #include "llvm/IR/DataLayout.h"
35 #include "llvm/IR/FixedPointBuilder.h"
36 #include "llvm/IR/Function.h"
37 #include "llvm/IR/GetElementPtrTypeIterator.h"
38 #include "llvm/IR/GlobalVariable.h"
39 #include "llvm/IR/Intrinsics.h"
40 #include "llvm/IR/IntrinsicsPowerPC.h"
41 #include "llvm/IR/MatrixBuilder.h"
42 #include "llvm/IR/Module.h"
43 #include <cstdarg>
44 
45 using namespace clang;
46 using namespace CodeGen;
47 using llvm::Value;
48 
49 //===----------------------------------------------------------------------===//
50 //                         Scalar Expression Emitter
51 //===----------------------------------------------------------------------===//
52 
53 namespace {
54 
55 /// Determine whether the given binary operation may overflow.
56 /// Sets \p Result to the value of the operation for BO_Add, BO_Sub, BO_Mul,
57 /// and signed BO_{Div,Rem}. For these opcodes, and for unsigned BO_{Div,Rem},
58 /// the returned overflow check is precise. The returned value is 'true' for
59 /// all other opcodes, to be conservative.
60 bool mayHaveIntegerOverflow(llvm::ConstantInt *LHS, llvm::ConstantInt *RHS,
61                              BinaryOperator::Opcode Opcode, bool Signed,
62                              llvm::APInt &Result) {
63   // Assume overflow is possible, unless we can prove otherwise.
64   bool Overflow = true;
65   const auto &LHSAP = LHS->getValue();
66   const auto &RHSAP = RHS->getValue();
67   if (Opcode == BO_Add) {
68     if (Signed)
69       Result = LHSAP.sadd_ov(RHSAP, Overflow);
70     else
71       Result = LHSAP.uadd_ov(RHSAP, Overflow);
72   } else if (Opcode == BO_Sub) {
73     if (Signed)
74       Result = LHSAP.ssub_ov(RHSAP, Overflow);
75     else
76       Result = LHSAP.usub_ov(RHSAP, Overflow);
77   } else if (Opcode == BO_Mul) {
78     if (Signed)
79       Result = LHSAP.smul_ov(RHSAP, Overflow);
80     else
81       Result = LHSAP.umul_ov(RHSAP, Overflow);
82   } else if (Opcode == BO_Div || Opcode == BO_Rem) {
83     if (Signed && !RHS->isZero())
84       Result = LHSAP.sdiv_ov(RHSAP, Overflow);
85     else
86       return false;
87   }
88   return Overflow;
89 }
90 
91 struct BinOpInfo {
92   Value *LHS;
93   Value *RHS;
94   QualType Ty;  // Computation Type.
95   BinaryOperator::Opcode Opcode; // Opcode of BinOp to perform
96   FPOptions FPFeatures;
97   const Expr *E;      // Entire expr, for error unsupported.  May not be binop.
98 
99   /// Check if the binop can result in integer overflow.
100   bool mayHaveIntegerOverflow() const {
101     // Without constant input, we can't rule out overflow.
102     auto *LHSCI = dyn_cast<llvm::ConstantInt>(LHS);
103     auto *RHSCI = dyn_cast<llvm::ConstantInt>(RHS);
104     if (!LHSCI || !RHSCI)
105       return true;
106 
107     llvm::APInt Result;
108     return ::mayHaveIntegerOverflow(
109         LHSCI, RHSCI, Opcode, Ty->hasSignedIntegerRepresentation(), Result);
110   }
111 
112   /// Check if the binop computes a division or a remainder.
113   bool isDivremOp() const {
114     return Opcode == BO_Div || Opcode == BO_Rem || Opcode == BO_DivAssign ||
115            Opcode == BO_RemAssign;
116   }
117 
118   /// Check if the binop can result in an integer division by zero.
119   bool mayHaveIntegerDivisionByZero() const {
120     if (isDivremOp())
121       if (auto *CI = dyn_cast<llvm::ConstantInt>(RHS))
122         return CI->isZero();
123     return true;
124   }
125 
126   /// Check if the binop can result in a float division by zero.
127   bool mayHaveFloatDivisionByZero() const {
128     if (isDivremOp())
129       if (auto *CFP = dyn_cast<llvm::ConstantFP>(RHS))
130         return CFP->isZero();
131     return true;
132   }
133 
134   /// Check if at least one operand is a fixed point type. In such cases, this
135   /// operation did not follow usual arithmetic conversion and both operands
136   /// might not be of the same type.
137   bool isFixedPointOp() const {
138     // We cannot simply check the result type since comparison operations return
139     // an int.
140     if (const auto *BinOp = dyn_cast<BinaryOperator>(E)) {
141       QualType LHSType = BinOp->getLHS()->getType();
142       QualType RHSType = BinOp->getRHS()->getType();
143       return LHSType->isFixedPointType() || RHSType->isFixedPointType();
144     }
145     if (const auto *UnOp = dyn_cast<UnaryOperator>(E))
146       return UnOp->getSubExpr()->getType()->isFixedPointType();
147     return false;
148   }
149 };
150 
151 static bool MustVisitNullValue(const Expr *E) {
152   // If a null pointer expression's type is the C++0x nullptr_t, then
153   // it's not necessarily a simple constant and it must be evaluated
154   // for its potential side effects.
155   return E->getType()->isNullPtrType();
156 }
157 
158 /// If \p E is a widened promoted integer, get its base (unpromoted) type.
159 static llvm::Optional<QualType> getUnwidenedIntegerType(const ASTContext &Ctx,
160                                                         const Expr *E) {
161   const Expr *Base = E->IgnoreImpCasts();
162   if (E == Base)
163     return llvm::None;
164 
165   QualType BaseTy = Base->getType();
166   if (!BaseTy->isPromotableIntegerType() ||
167       Ctx.getTypeSize(BaseTy) >= Ctx.getTypeSize(E->getType()))
168     return llvm::None;
169 
170   return BaseTy;
171 }
172 
173 /// Check if \p E is a widened promoted integer.
174 static bool IsWidenedIntegerOp(const ASTContext &Ctx, const Expr *E) {
175   return getUnwidenedIntegerType(Ctx, E).hasValue();
176 }
177 
178 /// Check if we can skip the overflow check for \p Op.
179 static bool CanElideOverflowCheck(const ASTContext &Ctx, const BinOpInfo &Op) {
180   assert((isa<UnaryOperator>(Op.E) || isa<BinaryOperator>(Op.E)) &&
181          "Expected a unary or binary operator");
182 
183   // If the binop has constant inputs and we can prove there is no overflow,
184   // we can elide the overflow check.
185   if (!Op.mayHaveIntegerOverflow())
186     return true;
187 
188   // If a unary op has a widened operand, the op cannot overflow.
189   if (const auto *UO = dyn_cast<UnaryOperator>(Op.E))
190     return !UO->canOverflow();
191 
192   // We usually don't need overflow checks for binops with widened operands.
193   // Multiplication with promoted unsigned operands is a special case.
194   const auto *BO = cast<BinaryOperator>(Op.E);
195   auto OptionalLHSTy = getUnwidenedIntegerType(Ctx, BO->getLHS());
196   if (!OptionalLHSTy)
197     return false;
198 
199   auto OptionalRHSTy = getUnwidenedIntegerType(Ctx, BO->getRHS());
200   if (!OptionalRHSTy)
201     return false;
202 
203   QualType LHSTy = *OptionalLHSTy;
204   QualType RHSTy = *OptionalRHSTy;
205 
206   // This is the simple case: binops without unsigned multiplication, and with
207   // widened operands. No overflow check is needed here.
208   if ((Op.Opcode != BO_Mul && Op.Opcode != BO_MulAssign) ||
209       !LHSTy->isUnsignedIntegerType() || !RHSTy->isUnsignedIntegerType())
210     return true;
211 
212   // For unsigned multiplication the overflow check can be elided if either one
213   // of the unpromoted types are less than half the size of the promoted type.
214   unsigned PromotedSize = Ctx.getTypeSize(Op.E->getType());
215   return (2 * Ctx.getTypeSize(LHSTy)) < PromotedSize ||
216          (2 * Ctx.getTypeSize(RHSTy)) < PromotedSize;
217 }
218 
219 class ScalarExprEmitter
220   : public StmtVisitor<ScalarExprEmitter, Value*> {
221   CodeGenFunction &CGF;
222   CGBuilderTy &Builder;
223   bool IgnoreResultAssign;
224   llvm::LLVMContext &VMContext;
225 public:
226 
227   ScalarExprEmitter(CodeGenFunction &cgf, bool ira=false)
228     : CGF(cgf), Builder(CGF.Builder), IgnoreResultAssign(ira),
229       VMContext(cgf.getLLVMContext()) {
230   }
231 
232   //===--------------------------------------------------------------------===//
233   //                               Utilities
234   //===--------------------------------------------------------------------===//
235 
236   bool TestAndClearIgnoreResultAssign() {
237     bool I = IgnoreResultAssign;
238     IgnoreResultAssign = false;
239     return I;
240   }
241 
242   llvm::Type *ConvertType(QualType T) { return CGF.ConvertType(T); }
243   LValue EmitLValue(const Expr *E) { return CGF.EmitLValue(E); }
244   LValue EmitCheckedLValue(const Expr *E, CodeGenFunction::TypeCheckKind TCK) {
245     return CGF.EmitCheckedLValue(E, TCK);
246   }
247 
248   void EmitBinOpCheck(ArrayRef<std::pair<Value *, SanitizerMask>> Checks,
249                       const BinOpInfo &Info);
250 
251   Value *EmitLoadOfLValue(LValue LV, SourceLocation Loc) {
252     return CGF.EmitLoadOfLValue(LV, Loc).getScalarVal();
253   }
254 
255   void EmitLValueAlignmentAssumption(const Expr *E, Value *V) {
256     const AlignValueAttr *AVAttr = nullptr;
257     if (const auto *DRE = dyn_cast<DeclRefExpr>(E)) {
258       const ValueDecl *VD = DRE->getDecl();
259 
260       if (VD->getType()->isReferenceType()) {
261         if (const auto *TTy =
262             dyn_cast<TypedefType>(VD->getType().getNonReferenceType()))
263           AVAttr = TTy->getDecl()->getAttr<AlignValueAttr>();
264       } else {
265         // Assumptions for function parameters are emitted at the start of the
266         // function, so there is no need to repeat that here,
267         // unless the alignment-assumption sanitizer is enabled,
268         // then we prefer the assumption over alignment attribute
269         // on IR function param.
270         if (isa<ParmVarDecl>(VD) && !CGF.SanOpts.has(SanitizerKind::Alignment))
271           return;
272 
273         AVAttr = VD->getAttr<AlignValueAttr>();
274       }
275     }
276 
277     if (!AVAttr)
278       if (const auto *TTy =
279           dyn_cast<TypedefType>(E->getType()))
280         AVAttr = TTy->getDecl()->getAttr<AlignValueAttr>();
281 
282     if (!AVAttr)
283       return;
284 
285     Value *AlignmentValue = CGF.EmitScalarExpr(AVAttr->getAlignment());
286     llvm::ConstantInt *AlignmentCI = cast<llvm::ConstantInt>(AlignmentValue);
287     CGF.emitAlignmentAssumption(V, E, AVAttr->getLocation(), AlignmentCI);
288   }
289 
290   /// EmitLoadOfLValue - Given an expression with complex type that represents a
291   /// value l-value, this method emits the address of the l-value, then loads
292   /// and returns the result.
293   Value *EmitLoadOfLValue(const Expr *E) {
294     Value *V = EmitLoadOfLValue(EmitCheckedLValue(E, CodeGenFunction::TCK_Load),
295                                 E->getExprLoc());
296 
297     EmitLValueAlignmentAssumption(E, V);
298     return V;
299   }
300 
301   /// EmitConversionToBool - Convert the specified expression value to a
302   /// boolean (i1) truth value.  This is equivalent to "Val != 0".
303   Value *EmitConversionToBool(Value *Src, QualType DstTy);
304 
305   /// Emit a check that a conversion from a floating-point type does not
306   /// overflow.
307   void EmitFloatConversionCheck(Value *OrigSrc, QualType OrigSrcType,
308                                 Value *Src, QualType SrcType, QualType DstType,
309                                 llvm::Type *DstTy, SourceLocation Loc);
310 
311   /// Known implicit conversion check kinds.
312   /// Keep in sync with the enum of the same name in ubsan_handlers.h
313   enum ImplicitConversionCheckKind : unsigned char {
314     ICCK_IntegerTruncation = 0, // Legacy, was only used by clang 7.
315     ICCK_UnsignedIntegerTruncation = 1,
316     ICCK_SignedIntegerTruncation = 2,
317     ICCK_IntegerSignChange = 3,
318     ICCK_SignedIntegerTruncationOrSignChange = 4,
319   };
320 
321   /// Emit a check that an [implicit] truncation of an integer  does not
322   /// discard any bits. It is not UB, so we use the value after truncation.
323   void EmitIntegerTruncationCheck(Value *Src, QualType SrcType, Value *Dst,
324                                   QualType DstType, SourceLocation Loc);
325 
326   /// Emit a check that an [implicit] conversion of an integer does not change
327   /// the sign of the value. It is not UB, so we use the value after conversion.
328   /// NOTE: Src and Dst may be the exact same value! (point to the same thing)
329   void EmitIntegerSignChangeCheck(Value *Src, QualType SrcType, Value *Dst,
330                                   QualType DstType, SourceLocation Loc);
331 
332   /// Emit a conversion from the specified type to the specified destination
333   /// type, both of which are LLVM scalar types.
334   struct ScalarConversionOpts {
335     bool TreatBooleanAsSigned;
336     bool EmitImplicitIntegerTruncationChecks;
337     bool EmitImplicitIntegerSignChangeChecks;
338 
339     ScalarConversionOpts()
340         : TreatBooleanAsSigned(false),
341           EmitImplicitIntegerTruncationChecks(false),
342           EmitImplicitIntegerSignChangeChecks(false) {}
343 
344     ScalarConversionOpts(clang::SanitizerSet SanOpts)
345         : TreatBooleanAsSigned(false),
346           EmitImplicitIntegerTruncationChecks(
347               SanOpts.hasOneOf(SanitizerKind::ImplicitIntegerTruncation)),
348           EmitImplicitIntegerSignChangeChecks(
349               SanOpts.has(SanitizerKind::ImplicitIntegerSignChange)) {}
350   };
351   Value *EmitScalarCast(Value *Src, QualType SrcType, QualType DstType,
352                         llvm::Type *SrcTy, llvm::Type *DstTy,
353                         ScalarConversionOpts Opts);
354   Value *
355   EmitScalarConversion(Value *Src, QualType SrcTy, QualType DstTy,
356                        SourceLocation Loc,
357                        ScalarConversionOpts Opts = ScalarConversionOpts());
358 
359   /// Convert between either a fixed point and other fixed point or fixed point
360   /// and an integer.
361   Value *EmitFixedPointConversion(Value *Src, QualType SrcTy, QualType DstTy,
362                                   SourceLocation Loc);
363 
364   /// Emit a conversion from the specified complex type to the specified
365   /// destination type, where the destination type is an LLVM scalar type.
366   Value *EmitComplexToScalarConversion(CodeGenFunction::ComplexPairTy Src,
367                                        QualType SrcTy, QualType DstTy,
368                                        SourceLocation Loc);
369 
370   /// EmitNullValue - Emit a value that corresponds to null for the given type.
371   Value *EmitNullValue(QualType Ty);
372 
373   /// EmitFloatToBoolConversion - Perform an FP to boolean conversion.
374   Value *EmitFloatToBoolConversion(Value *V) {
375     // Compare against 0.0 for fp scalars.
376     llvm::Value *Zero = llvm::Constant::getNullValue(V->getType());
377     return Builder.CreateFCmpUNE(V, Zero, "tobool");
378   }
379 
380   /// EmitPointerToBoolConversion - Perform a pointer to boolean conversion.
381   Value *EmitPointerToBoolConversion(Value *V, QualType QT) {
382     Value *Zero = CGF.CGM.getNullPointer(cast<llvm::PointerType>(V->getType()), QT);
383 
384     return Builder.CreateICmpNE(V, Zero, "tobool");
385   }
386 
387   Value *EmitIntToBoolConversion(Value *V) {
388     // Because of the type rules of C, we often end up computing a
389     // logical value, then zero extending it to int, then wanting it
390     // as a logical value again.  Optimize this common case.
391     if (llvm::ZExtInst *ZI = dyn_cast<llvm::ZExtInst>(V)) {
392       if (ZI->getOperand(0)->getType() == Builder.getInt1Ty()) {
393         Value *Result = ZI->getOperand(0);
394         // If there aren't any more uses, zap the instruction to save space.
395         // Note that there can be more uses, for example if this
396         // is the result of an assignment.
397         if (ZI->use_empty())
398           ZI->eraseFromParent();
399         return Result;
400       }
401     }
402 
403     return Builder.CreateIsNotNull(V, "tobool");
404   }
405 
406   //===--------------------------------------------------------------------===//
407   //                            Visitor Methods
408   //===--------------------------------------------------------------------===//
409 
410   Value *Visit(Expr *E) {
411     ApplyDebugLocation DL(CGF, E);
412     return StmtVisitor<ScalarExprEmitter, Value*>::Visit(E);
413   }
414 
415   Value *VisitStmt(Stmt *S) {
416     S->dump(llvm::errs(), CGF.getContext());
417     llvm_unreachable("Stmt can't have complex result type!");
418   }
419   Value *VisitExpr(Expr *S);
420 
421   Value *VisitConstantExpr(ConstantExpr *E) {
422     if (Value *Result = ConstantEmitter(CGF).tryEmitConstantExpr(E)) {
423       if (E->isGLValue())
424         return CGF.Builder.CreateLoad(Address(
425             Result, CGF.getContext().getTypeAlignInChars(E->getType())));
426       return Result;
427     }
428     return Visit(E->getSubExpr());
429   }
430   Value *VisitParenExpr(ParenExpr *PE) {
431     return Visit(PE->getSubExpr());
432   }
433   Value *VisitSubstNonTypeTemplateParmExpr(SubstNonTypeTemplateParmExpr *E) {
434     return Visit(E->getReplacement());
435   }
436   Value *VisitGenericSelectionExpr(GenericSelectionExpr *GE) {
437     return Visit(GE->getResultExpr());
438   }
439   Value *VisitCoawaitExpr(CoawaitExpr *S) {
440     return CGF.EmitCoawaitExpr(*S).getScalarVal();
441   }
442   Value *VisitCoyieldExpr(CoyieldExpr *S) {
443     return CGF.EmitCoyieldExpr(*S).getScalarVal();
444   }
445   Value *VisitUnaryCoawait(const UnaryOperator *E) {
446     return Visit(E->getSubExpr());
447   }
448 
449   // Leaves.
450   Value *VisitIntegerLiteral(const IntegerLiteral *E) {
451     return Builder.getInt(E->getValue());
452   }
453   Value *VisitFixedPointLiteral(const FixedPointLiteral *E) {
454     return Builder.getInt(E->getValue());
455   }
456   Value *VisitFloatingLiteral(const FloatingLiteral *E) {
457     return llvm::ConstantFP::get(VMContext, E->getValue());
458   }
459   Value *VisitCharacterLiteral(const CharacterLiteral *E) {
460     return llvm::ConstantInt::get(ConvertType(E->getType()), E->getValue());
461   }
462   Value *VisitObjCBoolLiteralExpr(const ObjCBoolLiteralExpr *E) {
463     return llvm::ConstantInt::get(ConvertType(E->getType()), E->getValue());
464   }
465   Value *VisitCXXBoolLiteralExpr(const CXXBoolLiteralExpr *E) {
466     return llvm::ConstantInt::get(ConvertType(E->getType()), E->getValue());
467   }
468   Value *VisitCXXScalarValueInitExpr(const CXXScalarValueInitExpr *E) {
469     return EmitNullValue(E->getType());
470   }
471   Value *VisitGNUNullExpr(const GNUNullExpr *E) {
472     return EmitNullValue(E->getType());
473   }
474   Value *VisitOffsetOfExpr(OffsetOfExpr *E);
475   Value *VisitUnaryExprOrTypeTraitExpr(const UnaryExprOrTypeTraitExpr *E);
476   Value *VisitAddrLabelExpr(const AddrLabelExpr *E) {
477     llvm::Value *V = CGF.GetAddrOfLabel(E->getLabel());
478     return Builder.CreateBitCast(V, ConvertType(E->getType()));
479   }
480 
481   Value *VisitSizeOfPackExpr(SizeOfPackExpr *E) {
482     return llvm::ConstantInt::get(ConvertType(E->getType()),E->getPackLength());
483   }
484 
485   Value *VisitPseudoObjectExpr(PseudoObjectExpr *E) {
486     return CGF.EmitPseudoObjectRValue(E).getScalarVal();
487   }
488 
489   Value *VisitOpaqueValueExpr(OpaqueValueExpr *E) {
490     if (E->isGLValue())
491       return EmitLoadOfLValue(CGF.getOrCreateOpaqueLValueMapping(E),
492                               E->getExprLoc());
493 
494     // Otherwise, assume the mapping is the scalar directly.
495     return CGF.getOrCreateOpaqueRValueMapping(E).getScalarVal();
496   }
497 
498   // l-values.
499   Value *VisitDeclRefExpr(DeclRefExpr *E) {
500     if (CodeGenFunction::ConstantEmission Constant = CGF.tryEmitAsConstant(E))
501       return CGF.emitScalarConstant(Constant, E);
502     return EmitLoadOfLValue(E);
503   }
504 
505   Value *VisitObjCSelectorExpr(ObjCSelectorExpr *E) {
506     return CGF.EmitObjCSelectorExpr(E);
507   }
508   Value *VisitObjCProtocolExpr(ObjCProtocolExpr *E) {
509     return CGF.EmitObjCProtocolExpr(E);
510   }
511   Value *VisitObjCIvarRefExpr(ObjCIvarRefExpr *E) {
512     return EmitLoadOfLValue(E);
513   }
514   Value *VisitObjCMessageExpr(ObjCMessageExpr *E) {
515     if (E->getMethodDecl() &&
516         E->getMethodDecl()->getReturnType()->isReferenceType())
517       return EmitLoadOfLValue(E);
518     return CGF.EmitObjCMessageExpr(E).getScalarVal();
519   }
520 
521   Value *VisitObjCIsaExpr(ObjCIsaExpr *E) {
522     LValue LV = CGF.EmitObjCIsaExpr(E);
523     Value *V = CGF.EmitLoadOfLValue(LV, E->getExprLoc()).getScalarVal();
524     return V;
525   }
526 
527   Value *VisitObjCAvailabilityCheckExpr(ObjCAvailabilityCheckExpr *E) {
528     VersionTuple Version = E->getVersion();
529 
530     // If we're checking for a platform older than our minimum deployment
531     // target, we can fold the check away.
532     if (Version <= CGF.CGM.getTarget().getPlatformMinVersion())
533       return llvm::ConstantInt::get(Builder.getInt1Ty(), 1);
534 
535     return CGF.EmitBuiltinAvailable(Version);
536   }
537 
538   Value *VisitArraySubscriptExpr(ArraySubscriptExpr *E);
539   Value *VisitMatrixSubscriptExpr(MatrixSubscriptExpr *E);
540   Value *VisitShuffleVectorExpr(ShuffleVectorExpr *E);
541   Value *VisitConvertVectorExpr(ConvertVectorExpr *E);
542   Value *VisitMemberExpr(MemberExpr *E);
543   Value *VisitExtVectorElementExpr(Expr *E) { return EmitLoadOfLValue(E); }
544   Value *VisitCompoundLiteralExpr(CompoundLiteralExpr *E) {
545     // Strictly speaking, we shouldn't be calling EmitLoadOfLValue, which
546     // transitively calls EmitCompoundLiteralLValue, here in C++ since compound
547     // literals aren't l-values in C++. We do so simply because that's the
548     // cleanest way to handle compound literals in C++.
549     // See the discussion here: https://reviews.llvm.org/D64464
550     return EmitLoadOfLValue(E);
551   }
552 
553   Value *VisitInitListExpr(InitListExpr *E);
554 
555   Value *VisitArrayInitIndexExpr(ArrayInitIndexExpr *E) {
556     assert(CGF.getArrayInitIndex() &&
557            "ArrayInitIndexExpr not inside an ArrayInitLoopExpr?");
558     return CGF.getArrayInitIndex();
559   }
560 
561   Value *VisitImplicitValueInitExpr(const ImplicitValueInitExpr *E) {
562     return EmitNullValue(E->getType());
563   }
564   Value *VisitExplicitCastExpr(ExplicitCastExpr *E) {
565     CGF.CGM.EmitExplicitCastExprType(E, &CGF);
566     return VisitCastExpr(E);
567   }
568   Value *VisitCastExpr(CastExpr *E);
569 
570   Value *VisitCallExpr(const CallExpr *E) {
571     if (E->getCallReturnType(CGF.getContext())->isReferenceType())
572       return EmitLoadOfLValue(E);
573 
574     Value *V = CGF.EmitCallExpr(E).getScalarVal();
575 
576     EmitLValueAlignmentAssumption(E, V);
577     return V;
578   }
579 
580   Value *VisitStmtExpr(const StmtExpr *E);
581 
582   // Unary Operators.
583   Value *VisitUnaryPostDec(const UnaryOperator *E) {
584     LValue LV = EmitLValue(E->getSubExpr());
585     return EmitScalarPrePostIncDec(E, LV, false, false);
586   }
587   Value *VisitUnaryPostInc(const UnaryOperator *E) {
588     LValue LV = EmitLValue(E->getSubExpr());
589     return EmitScalarPrePostIncDec(E, LV, true, false);
590   }
591   Value *VisitUnaryPreDec(const UnaryOperator *E) {
592     LValue LV = EmitLValue(E->getSubExpr());
593     return EmitScalarPrePostIncDec(E, LV, false, true);
594   }
595   Value *VisitUnaryPreInc(const UnaryOperator *E) {
596     LValue LV = EmitLValue(E->getSubExpr());
597     return EmitScalarPrePostIncDec(E, LV, true, true);
598   }
599 
600   llvm::Value *EmitIncDecConsiderOverflowBehavior(const UnaryOperator *E,
601                                                   llvm::Value *InVal,
602                                                   bool IsInc);
603 
604   llvm::Value *EmitScalarPrePostIncDec(const UnaryOperator *E, LValue LV,
605                                        bool isInc, bool isPre);
606 
607 
608   Value *VisitUnaryAddrOf(const UnaryOperator *E) {
609     if (isa<MemberPointerType>(E->getType())) // never sugared
610       return CGF.CGM.getMemberPointerConstant(E);
611 
612     return EmitLValue(E->getSubExpr()).getPointer(CGF);
613   }
614   Value *VisitUnaryDeref(const UnaryOperator *E) {
615     if (E->getType()->isVoidType())
616       return Visit(E->getSubExpr()); // the actual value should be unused
617     return EmitLoadOfLValue(E);
618   }
619   Value *VisitUnaryPlus(const UnaryOperator *E) {
620     // This differs from gcc, though, most likely due to a bug in gcc.
621     TestAndClearIgnoreResultAssign();
622     return Visit(E->getSubExpr());
623   }
624   Value *VisitUnaryMinus    (const UnaryOperator *E);
625   Value *VisitUnaryNot      (const UnaryOperator *E);
626   Value *VisitUnaryLNot     (const UnaryOperator *E);
627   Value *VisitUnaryReal     (const UnaryOperator *E);
628   Value *VisitUnaryImag     (const UnaryOperator *E);
629   Value *VisitUnaryExtension(const UnaryOperator *E) {
630     return Visit(E->getSubExpr());
631   }
632 
633   // C++
634   Value *VisitMaterializeTemporaryExpr(const MaterializeTemporaryExpr *E) {
635     return EmitLoadOfLValue(E);
636   }
637   Value *VisitSourceLocExpr(SourceLocExpr *SLE) {
638     auto &Ctx = CGF.getContext();
639     APValue Evaluated =
640         SLE->EvaluateInContext(Ctx, CGF.CurSourceLocExprScope.getDefaultExpr());
641     return ConstantEmitter(CGF).emitAbstract(SLE->getLocation(), Evaluated,
642                                              SLE->getType());
643   }
644 
645   Value *VisitCXXDefaultArgExpr(CXXDefaultArgExpr *DAE) {
646     CodeGenFunction::CXXDefaultArgExprScope Scope(CGF, DAE);
647     return Visit(DAE->getExpr());
648   }
649   Value *VisitCXXDefaultInitExpr(CXXDefaultInitExpr *DIE) {
650     CodeGenFunction::CXXDefaultInitExprScope Scope(CGF, DIE);
651     return Visit(DIE->getExpr());
652   }
653   Value *VisitCXXThisExpr(CXXThisExpr *TE) {
654     return CGF.LoadCXXThis();
655   }
656 
657   Value *VisitExprWithCleanups(ExprWithCleanups *E);
658   Value *VisitCXXNewExpr(const CXXNewExpr *E) {
659     return CGF.EmitCXXNewExpr(E);
660   }
661   Value *VisitCXXDeleteExpr(const CXXDeleteExpr *E) {
662     CGF.EmitCXXDeleteExpr(E);
663     return nullptr;
664   }
665 
666   Value *VisitTypeTraitExpr(const TypeTraitExpr *E) {
667     return llvm::ConstantInt::get(ConvertType(E->getType()), E->getValue());
668   }
669 
670   Value *VisitConceptSpecializationExpr(const ConceptSpecializationExpr *E) {
671     return Builder.getInt1(E->isSatisfied());
672   }
673 
674   Value *VisitRequiresExpr(const RequiresExpr *E) {
675     return Builder.getInt1(E->isSatisfied());
676   }
677 
678   Value *VisitArrayTypeTraitExpr(const ArrayTypeTraitExpr *E) {
679     return llvm::ConstantInt::get(Builder.getInt32Ty(), E->getValue());
680   }
681 
682   Value *VisitExpressionTraitExpr(const ExpressionTraitExpr *E) {
683     return llvm::ConstantInt::get(Builder.getInt1Ty(), E->getValue());
684   }
685 
686   Value *VisitCXXPseudoDestructorExpr(const CXXPseudoDestructorExpr *E) {
687     // C++ [expr.pseudo]p1:
688     //   The result shall only be used as the operand for the function call
689     //   operator (), and the result of such a call has type void. The only
690     //   effect is the evaluation of the postfix-expression before the dot or
691     //   arrow.
692     CGF.EmitScalarExpr(E->getBase());
693     return nullptr;
694   }
695 
696   Value *VisitCXXNullPtrLiteralExpr(const CXXNullPtrLiteralExpr *E) {
697     return EmitNullValue(E->getType());
698   }
699 
700   Value *VisitCXXThrowExpr(const CXXThrowExpr *E) {
701     CGF.EmitCXXThrowExpr(E);
702     return nullptr;
703   }
704 
705   Value *VisitCXXNoexceptExpr(const CXXNoexceptExpr *E) {
706     return Builder.getInt1(E->getValue());
707   }
708 
709   // Binary Operators.
710   Value *EmitMul(const BinOpInfo &Ops) {
711     if (Ops.Ty->isSignedIntegerOrEnumerationType()) {
712       switch (CGF.getLangOpts().getSignedOverflowBehavior()) {
713       case LangOptions::SOB_Defined:
714         return Builder.CreateMul(Ops.LHS, Ops.RHS, "mul");
715       case LangOptions::SOB_Undefined:
716         if (!CGF.SanOpts.has(SanitizerKind::SignedIntegerOverflow))
717           return Builder.CreateNSWMul(Ops.LHS, Ops.RHS, "mul");
718         LLVM_FALLTHROUGH;
719       case LangOptions::SOB_Trapping:
720         if (CanElideOverflowCheck(CGF.getContext(), Ops))
721           return Builder.CreateNSWMul(Ops.LHS, Ops.RHS, "mul");
722         return EmitOverflowCheckedBinOp(Ops);
723       }
724     }
725 
726     if (Ops.Ty->isConstantMatrixType()) {
727       llvm::MatrixBuilder<CGBuilderTy> MB(Builder);
728       // We need to check the types of the operands of the operator to get the
729       // correct matrix dimensions.
730       auto *BO = cast<BinaryOperator>(Ops.E);
731       auto *LHSMatTy = dyn_cast<ConstantMatrixType>(
732           BO->getLHS()->getType().getCanonicalType());
733       auto *RHSMatTy = dyn_cast<ConstantMatrixType>(
734           BO->getRHS()->getType().getCanonicalType());
735       if (LHSMatTy && RHSMatTy)
736         return MB.CreateMatrixMultiply(Ops.LHS, Ops.RHS, LHSMatTy->getNumRows(),
737                                        LHSMatTy->getNumColumns(),
738                                        RHSMatTy->getNumColumns());
739       return MB.CreateScalarMultiply(Ops.LHS, Ops.RHS);
740     }
741 
742     if (Ops.Ty->isUnsignedIntegerType() &&
743         CGF.SanOpts.has(SanitizerKind::UnsignedIntegerOverflow) &&
744         !CanElideOverflowCheck(CGF.getContext(), Ops))
745       return EmitOverflowCheckedBinOp(Ops);
746 
747     if (Ops.LHS->getType()->isFPOrFPVectorTy()) {
748       //  Preserve the old values
749       CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, Ops.FPFeatures);
750       return Builder.CreateFMul(Ops.LHS, Ops.RHS, "mul");
751     }
752     if (Ops.isFixedPointOp())
753       return EmitFixedPointBinOp(Ops);
754     return Builder.CreateMul(Ops.LHS, Ops.RHS, "mul");
755   }
756   /// Create a binary op that checks for overflow.
757   /// Currently only supports +, - and *.
758   Value *EmitOverflowCheckedBinOp(const BinOpInfo &Ops);
759 
760   // Check for undefined division and modulus behaviors.
761   void EmitUndefinedBehaviorIntegerDivAndRemCheck(const BinOpInfo &Ops,
762                                                   llvm::Value *Zero,bool isDiv);
763   // Common helper for getting how wide LHS of shift is.
764   static Value *GetWidthMinusOneValue(Value* LHS,Value* RHS);
765 
766   // Used for shifting constraints for OpenCL, do mask for powers of 2, URem for
767   // non powers of two.
768   Value *ConstrainShiftValue(Value *LHS, Value *RHS, const Twine &Name);
769 
770   Value *EmitDiv(const BinOpInfo &Ops);
771   Value *EmitRem(const BinOpInfo &Ops);
772   Value *EmitAdd(const BinOpInfo &Ops);
773   Value *EmitSub(const BinOpInfo &Ops);
774   Value *EmitShl(const BinOpInfo &Ops);
775   Value *EmitShr(const BinOpInfo &Ops);
776   Value *EmitAnd(const BinOpInfo &Ops) {
777     return Builder.CreateAnd(Ops.LHS, Ops.RHS, "and");
778   }
779   Value *EmitXor(const BinOpInfo &Ops) {
780     return Builder.CreateXor(Ops.LHS, Ops.RHS, "xor");
781   }
782   Value *EmitOr (const BinOpInfo &Ops) {
783     return Builder.CreateOr(Ops.LHS, Ops.RHS, "or");
784   }
785 
786   // Helper functions for fixed point binary operations.
787   Value *EmitFixedPointBinOp(const BinOpInfo &Ops);
788 
789   BinOpInfo EmitBinOps(const BinaryOperator *E);
790   LValue EmitCompoundAssignLValue(const CompoundAssignOperator *E,
791                             Value *(ScalarExprEmitter::*F)(const BinOpInfo &),
792                                   Value *&Result);
793 
794   Value *EmitCompoundAssign(const CompoundAssignOperator *E,
795                             Value *(ScalarExprEmitter::*F)(const BinOpInfo &));
796 
797   // Binary operators and binary compound assignment operators.
798 #define HANDLEBINOP(OP) \
799   Value *VisitBin ## OP(const BinaryOperator *E) {                         \
800     return Emit ## OP(EmitBinOps(E));                                      \
801   }                                                                        \
802   Value *VisitBin ## OP ## Assign(const CompoundAssignOperator *E) {       \
803     return EmitCompoundAssign(E, &ScalarExprEmitter::Emit ## OP);          \
804   }
805   HANDLEBINOP(Mul)
806   HANDLEBINOP(Div)
807   HANDLEBINOP(Rem)
808   HANDLEBINOP(Add)
809   HANDLEBINOP(Sub)
810   HANDLEBINOP(Shl)
811   HANDLEBINOP(Shr)
812   HANDLEBINOP(And)
813   HANDLEBINOP(Xor)
814   HANDLEBINOP(Or)
815 #undef HANDLEBINOP
816 
817   // Comparisons.
818   Value *EmitCompare(const BinaryOperator *E, llvm::CmpInst::Predicate UICmpOpc,
819                      llvm::CmpInst::Predicate SICmpOpc,
820                      llvm::CmpInst::Predicate FCmpOpc, bool IsSignaling);
821 #define VISITCOMP(CODE, UI, SI, FP, SIG) \
822     Value *VisitBin##CODE(const BinaryOperator *E) { \
823       return EmitCompare(E, llvm::ICmpInst::UI, llvm::ICmpInst::SI, \
824                          llvm::FCmpInst::FP, SIG); }
825   VISITCOMP(LT, ICMP_ULT, ICMP_SLT, FCMP_OLT, true)
826   VISITCOMP(GT, ICMP_UGT, ICMP_SGT, FCMP_OGT, true)
827   VISITCOMP(LE, ICMP_ULE, ICMP_SLE, FCMP_OLE, true)
828   VISITCOMP(GE, ICMP_UGE, ICMP_SGE, FCMP_OGE, true)
829   VISITCOMP(EQ, ICMP_EQ , ICMP_EQ , FCMP_OEQ, false)
830   VISITCOMP(NE, ICMP_NE , ICMP_NE , FCMP_UNE, false)
831 #undef VISITCOMP
832 
833   Value *VisitBinAssign     (const BinaryOperator *E);
834 
835   Value *VisitBinLAnd       (const BinaryOperator *E);
836   Value *VisitBinLOr        (const BinaryOperator *E);
837   Value *VisitBinComma      (const BinaryOperator *E);
838 
839   Value *VisitBinPtrMemD(const Expr *E) { return EmitLoadOfLValue(E); }
840   Value *VisitBinPtrMemI(const Expr *E) { return EmitLoadOfLValue(E); }
841 
842   Value *VisitCXXRewrittenBinaryOperator(CXXRewrittenBinaryOperator *E) {
843     return Visit(E->getSemanticForm());
844   }
845 
846   // Other Operators.
847   Value *VisitBlockExpr(const BlockExpr *BE);
848   Value *VisitAbstractConditionalOperator(const AbstractConditionalOperator *);
849   Value *VisitChooseExpr(ChooseExpr *CE);
850   Value *VisitVAArgExpr(VAArgExpr *VE);
851   Value *VisitObjCStringLiteral(const ObjCStringLiteral *E) {
852     return CGF.EmitObjCStringLiteral(E);
853   }
854   Value *VisitObjCBoxedExpr(ObjCBoxedExpr *E) {
855     return CGF.EmitObjCBoxedExpr(E);
856   }
857   Value *VisitObjCArrayLiteral(ObjCArrayLiteral *E) {
858     return CGF.EmitObjCArrayLiteral(E);
859   }
860   Value *VisitObjCDictionaryLiteral(ObjCDictionaryLiteral *E) {
861     return CGF.EmitObjCDictionaryLiteral(E);
862   }
863   Value *VisitAsTypeExpr(AsTypeExpr *CE);
864   Value *VisitAtomicExpr(AtomicExpr *AE);
865 };
866 }  // end anonymous namespace.
867 
868 //===----------------------------------------------------------------------===//
869 //                                Utilities
870 //===----------------------------------------------------------------------===//
871 
872 /// EmitConversionToBool - Convert the specified expression value to a
873 /// boolean (i1) truth value.  This is equivalent to "Val != 0".
874 Value *ScalarExprEmitter::EmitConversionToBool(Value *Src, QualType SrcType) {
875   assert(SrcType.isCanonical() && "EmitScalarConversion strips typedefs");
876 
877   if (SrcType->isRealFloatingType())
878     return EmitFloatToBoolConversion(Src);
879 
880   if (const MemberPointerType *MPT = dyn_cast<MemberPointerType>(SrcType))
881     return CGF.CGM.getCXXABI().EmitMemberPointerIsNotNull(CGF, Src, MPT);
882 
883   assert((SrcType->isIntegerType() || isa<llvm::PointerType>(Src->getType())) &&
884          "Unknown scalar type to convert");
885 
886   if (isa<llvm::IntegerType>(Src->getType()))
887     return EmitIntToBoolConversion(Src);
888 
889   assert(isa<llvm::PointerType>(Src->getType()));
890   return EmitPointerToBoolConversion(Src, SrcType);
891 }
892 
893 void ScalarExprEmitter::EmitFloatConversionCheck(
894     Value *OrigSrc, QualType OrigSrcType, Value *Src, QualType SrcType,
895     QualType DstType, llvm::Type *DstTy, SourceLocation Loc) {
896   assert(SrcType->isFloatingType() && "not a conversion from floating point");
897   if (!isa<llvm::IntegerType>(DstTy))
898     return;
899 
900   CodeGenFunction::SanitizerScope SanScope(&CGF);
901   using llvm::APFloat;
902   using llvm::APSInt;
903 
904   llvm::Value *Check = nullptr;
905   const llvm::fltSemantics &SrcSema =
906     CGF.getContext().getFloatTypeSemantics(OrigSrcType);
907 
908   // Floating-point to integer. This has undefined behavior if the source is
909   // +-Inf, NaN, or doesn't fit into the destination type (after truncation
910   // to an integer).
911   unsigned Width = CGF.getContext().getIntWidth(DstType);
912   bool Unsigned = DstType->isUnsignedIntegerOrEnumerationType();
913 
914   APSInt Min = APSInt::getMinValue(Width, Unsigned);
915   APFloat MinSrc(SrcSema, APFloat::uninitialized);
916   if (MinSrc.convertFromAPInt(Min, !Unsigned, APFloat::rmTowardZero) &
917       APFloat::opOverflow)
918     // Don't need an overflow check for lower bound. Just check for
919     // -Inf/NaN.
920     MinSrc = APFloat::getInf(SrcSema, true);
921   else
922     // Find the largest value which is too small to represent (before
923     // truncation toward zero).
924     MinSrc.subtract(APFloat(SrcSema, 1), APFloat::rmTowardNegative);
925 
926   APSInt Max = APSInt::getMaxValue(Width, Unsigned);
927   APFloat MaxSrc(SrcSema, APFloat::uninitialized);
928   if (MaxSrc.convertFromAPInt(Max, !Unsigned, APFloat::rmTowardZero) &
929       APFloat::opOverflow)
930     // Don't need an overflow check for upper bound. Just check for
931     // +Inf/NaN.
932     MaxSrc = APFloat::getInf(SrcSema, false);
933   else
934     // Find the smallest value which is too large to represent (before
935     // truncation toward zero).
936     MaxSrc.add(APFloat(SrcSema, 1), APFloat::rmTowardPositive);
937 
938   // If we're converting from __half, convert the range to float to match
939   // the type of src.
940   if (OrigSrcType->isHalfType()) {
941     const llvm::fltSemantics &Sema =
942       CGF.getContext().getFloatTypeSemantics(SrcType);
943     bool IsInexact;
944     MinSrc.convert(Sema, APFloat::rmTowardZero, &IsInexact);
945     MaxSrc.convert(Sema, APFloat::rmTowardZero, &IsInexact);
946   }
947 
948   llvm::Value *GE =
949     Builder.CreateFCmpOGT(Src, llvm::ConstantFP::get(VMContext, MinSrc));
950   llvm::Value *LE =
951     Builder.CreateFCmpOLT(Src, llvm::ConstantFP::get(VMContext, MaxSrc));
952   Check = Builder.CreateAnd(GE, LE);
953 
954   llvm::Constant *StaticArgs[] = {CGF.EmitCheckSourceLocation(Loc),
955                                   CGF.EmitCheckTypeDescriptor(OrigSrcType),
956                                   CGF.EmitCheckTypeDescriptor(DstType)};
957   CGF.EmitCheck(std::make_pair(Check, SanitizerKind::FloatCastOverflow),
958                 SanitizerHandler::FloatCastOverflow, StaticArgs, OrigSrc);
959 }
960 
961 // Should be called within CodeGenFunction::SanitizerScope RAII scope.
962 // Returns 'i1 false' when the truncation Src -> Dst was lossy.
963 static std::pair<ScalarExprEmitter::ImplicitConversionCheckKind,
964                  std::pair<llvm::Value *, SanitizerMask>>
965 EmitIntegerTruncationCheckHelper(Value *Src, QualType SrcType, Value *Dst,
966                                  QualType DstType, CGBuilderTy &Builder) {
967   llvm::Type *SrcTy = Src->getType();
968   llvm::Type *DstTy = Dst->getType();
969   (void)DstTy; // Only used in assert()
970 
971   // This should be truncation of integral types.
972   assert(Src != Dst);
973   assert(SrcTy->getScalarSizeInBits() > Dst->getType()->getScalarSizeInBits());
974   assert(isa<llvm::IntegerType>(SrcTy) && isa<llvm::IntegerType>(DstTy) &&
975          "non-integer llvm type");
976 
977   bool SrcSigned = SrcType->isSignedIntegerOrEnumerationType();
978   bool DstSigned = DstType->isSignedIntegerOrEnumerationType();
979 
980   // If both (src and dst) types are unsigned, then it's an unsigned truncation.
981   // Else, it is a signed truncation.
982   ScalarExprEmitter::ImplicitConversionCheckKind Kind;
983   SanitizerMask Mask;
984   if (!SrcSigned && !DstSigned) {
985     Kind = ScalarExprEmitter::ICCK_UnsignedIntegerTruncation;
986     Mask = SanitizerKind::ImplicitUnsignedIntegerTruncation;
987   } else {
988     Kind = ScalarExprEmitter::ICCK_SignedIntegerTruncation;
989     Mask = SanitizerKind::ImplicitSignedIntegerTruncation;
990   }
991 
992   llvm::Value *Check = nullptr;
993   // 1. Extend the truncated value back to the same width as the Src.
994   Check = Builder.CreateIntCast(Dst, SrcTy, DstSigned, "anyext");
995   // 2. Equality-compare with the original source value
996   Check = Builder.CreateICmpEQ(Check, Src, "truncheck");
997   // If the comparison result is 'i1 false', then the truncation was lossy.
998   return std::make_pair(Kind, std::make_pair(Check, Mask));
999 }
1000 
1001 static bool PromotionIsPotentiallyEligibleForImplicitIntegerConversionCheck(
1002     QualType SrcType, QualType DstType) {
1003   return SrcType->isIntegerType() && DstType->isIntegerType();
1004 }
1005 
1006 void ScalarExprEmitter::EmitIntegerTruncationCheck(Value *Src, QualType SrcType,
1007                                                    Value *Dst, QualType DstType,
1008                                                    SourceLocation Loc) {
1009   if (!CGF.SanOpts.hasOneOf(SanitizerKind::ImplicitIntegerTruncation))
1010     return;
1011 
1012   // We only care about int->int conversions here.
1013   // We ignore conversions to/from pointer and/or bool.
1014   if (!PromotionIsPotentiallyEligibleForImplicitIntegerConversionCheck(SrcType,
1015                                                                        DstType))
1016     return;
1017 
1018   unsigned SrcBits = Src->getType()->getScalarSizeInBits();
1019   unsigned DstBits = Dst->getType()->getScalarSizeInBits();
1020   // This must be truncation. Else we do not care.
1021   if (SrcBits <= DstBits)
1022     return;
1023 
1024   assert(!DstType->isBooleanType() && "we should not get here with booleans.");
1025 
1026   // If the integer sign change sanitizer is enabled,
1027   // and we are truncating from larger unsigned type to smaller signed type,
1028   // let that next sanitizer deal with it.
1029   bool SrcSigned = SrcType->isSignedIntegerOrEnumerationType();
1030   bool DstSigned = DstType->isSignedIntegerOrEnumerationType();
1031   if (CGF.SanOpts.has(SanitizerKind::ImplicitIntegerSignChange) &&
1032       (!SrcSigned && DstSigned))
1033     return;
1034 
1035   CodeGenFunction::SanitizerScope SanScope(&CGF);
1036 
1037   std::pair<ScalarExprEmitter::ImplicitConversionCheckKind,
1038             std::pair<llvm::Value *, SanitizerMask>>
1039       Check =
1040           EmitIntegerTruncationCheckHelper(Src, SrcType, Dst, DstType, Builder);
1041   // If the comparison result is 'i1 false', then the truncation was lossy.
1042 
1043   // Do we care about this type of truncation?
1044   if (!CGF.SanOpts.has(Check.second.second))
1045     return;
1046 
1047   llvm::Constant *StaticArgs[] = {
1048       CGF.EmitCheckSourceLocation(Loc), CGF.EmitCheckTypeDescriptor(SrcType),
1049       CGF.EmitCheckTypeDescriptor(DstType),
1050       llvm::ConstantInt::get(Builder.getInt8Ty(), Check.first)};
1051   CGF.EmitCheck(Check.second, SanitizerHandler::ImplicitConversion, StaticArgs,
1052                 {Src, Dst});
1053 }
1054 
1055 // Should be called within CodeGenFunction::SanitizerScope RAII scope.
1056 // Returns 'i1 false' when the conversion Src -> Dst changed the sign.
1057 static std::pair<ScalarExprEmitter::ImplicitConversionCheckKind,
1058                  std::pair<llvm::Value *, SanitizerMask>>
1059 EmitIntegerSignChangeCheckHelper(Value *Src, QualType SrcType, Value *Dst,
1060                                  QualType DstType, CGBuilderTy &Builder) {
1061   llvm::Type *SrcTy = Src->getType();
1062   llvm::Type *DstTy = Dst->getType();
1063 
1064   assert(isa<llvm::IntegerType>(SrcTy) && isa<llvm::IntegerType>(DstTy) &&
1065          "non-integer llvm type");
1066 
1067   bool SrcSigned = SrcType->isSignedIntegerOrEnumerationType();
1068   bool DstSigned = DstType->isSignedIntegerOrEnumerationType();
1069   (void)SrcSigned; // Only used in assert()
1070   (void)DstSigned; // Only used in assert()
1071   unsigned SrcBits = SrcTy->getScalarSizeInBits();
1072   unsigned DstBits = DstTy->getScalarSizeInBits();
1073   (void)SrcBits; // Only used in assert()
1074   (void)DstBits; // Only used in assert()
1075 
1076   assert(((SrcBits != DstBits) || (SrcSigned != DstSigned)) &&
1077          "either the widths should be different, or the signednesses.");
1078 
1079   // NOTE: zero value is considered to be non-negative.
1080   auto EmitIsNegativeTest = [&Builder](Value *V, QualType VType,
1081                                        const char *Name) -> Value * {
1082     // Is this value a signed type?
1083     bool VSigned = VType->isSignedIntegerOrEnumerationType();
1084     llvm::Type *VTy = V->getType();
1085     if (!VSigned) {
1086       // If the value is unsigned, then it is never negative.
1087       // FIXME: can we encounter non-scalar VTy here?
1088       return llvm::ConstantInt::getFalse(VTy->getContext());
1089     }
1090     // Get the zero of the same type with which we will be comparing.
1091     llvm::Constant *Zero = llvm::ConstantInt::get(VTy, 0);
1092     // %V.isnegative = icmp slt %V, 0
1093     // I.e is %V *strictly* less than zero, does it have negative value?
1094     return Builder.CreateICmp(llvm::ICmpInst::ICMP_SLT, V, Zero,
1095                               llvm::Twine(Name) + "." + V->getName() +
1096                                   ".negativitycheck");
1097   };
1098 
1099   // 1. Was the old Value negative?
1100   llvm::Value *SrcIsNegative = EmitIsNegativeTest(Src, SrcType, "src");
1101   // 2. Is the new Value negative?
1102   llvm::Value *DstIsNegative = EmitIsNegativeTest(Dst, DstType, "dst");
1103   // 3. Now, was the 'negativity status' preserved during the conversion?
1104   //    NOTE: conversion from negative to zero is considered to change the sign.
1105   //    (We want to get 'false' when the conversion changed the sign)
1106   //    So we should just equality-compare the negativity statuses.
1107   llvm::Value *Check = nullptr;
1108   Check = Builder.CreateICmpEQ(SrcIsNegative, DstIsNegative, "signchangecheck");
1109   // If the comparison result is 'false', then the conversion changed the sign.
1110   return std::make_pair(
1111       ScalarExprEmitter::ICCK_IntegerSignChange,
1112       std::make_pair(Check, SanitizerKind::ImplicitIntegerSignChange));
1113 }
1114 
1115 void ScalarExprEmitter::EmitIntegerSignChangeCheck(Value *Src, QualType SrcType,
1116                                                    Value *Dst, QualType DstType,
1117                                                    SourceLocation Loc) {
1118   if (!CGF.SanOpts.has(SanitizerKind::ImplicitIntegerSignChange))
1119     return;
1120 
1121   llvm::Type *SrcTy = Src->getType();
1122   llvm::Type *DstTy = Dst->getType();
1123 
1124   // We only care about int->int conversions here.
1125   // We ignore conversions to/from pointer and/or bool.
1126   if (!PromotionIsPotentiallyEligibleForImplicitIntegerConversionCheck(SrcType,
1127                                                                        DstType))
1128     return;
1129 
1130   bool SrcSigned = SrcType->isSignedIntegerOrEnumerationType();
1131   bool DstSigned = DstType->isSignedIntegerOrEnumerationType();
1132   unsigned SrcBits = SrcTy->getScalarSizeInBits();
1133   unsigned DstBits = DstTy->getScalarSizeInBits();
1134 
1135   // Now, we do not need to emit the check in *all* of the cases.
1136   // We can avoid emitting it in some obvious cases where it would have been
1137   // dropped by the opt passes (instcombine) always anyways.
1138   // If it's a cast between effectively the same type, no check.
1139   // NOTE: this is *not* equivalent to checking the canonical types.
1140   if (SrcSigned == DstSigned && SrcBits == DstBits)
1141     return;
1142   // At least one of the values needs to have signed type.
1143   // If both are unsigned, then obviously, neither of them can be negative.
1144   if (!SrcSigned && !DstSigned)
1145     return;
1146   // If the conversion is to *larger* *signed* type, then no check is needed.
1147   // Because either sign-extension happens (so the sign will remain),
1148   // or zero-extension will happen (the sign bit will be zero.)
1149   if ((DstBits > SrcBits) && DstSigned)
1150     return;
1151   if (CGF.SanOpts.has(SanitizerKind::ImplicitSignedIntegerTruncation) &&
1152       (SrcBits > DstBits) && SrcSigned) {
1153     // If the signed integer truncation sanitizer is enabled,
1154     // and this is a truncation from signed type, then no check is needed.
1155     // Because here sign change check is interchangeable with truncation check.
1156     return;
1157   }
1158   // That's it. We can't rule out any more cases with the data we have.
1159 
1160   CodeGenFunction::SanitizerScope SanScope(&CGF);
1161 
1162   std::pair<ScalarExprEmitter::ImplicitConversionCheckKind,
1163             std::pair<llvm::Value *, SanitizerMask>>
1164       Check;
1165 
1166   // Each of these checks needs to return 'false' when an issue was detected.
1167   ImplicitConversionCheckKind CheckKind;
1168   llvm::SmallVector<std::pair<llvm::Value *, SanitizerMask>, 2> Checks;
1169   // So we can 'and' all the checks together, and still get 'false',
1170   // if at least one of the checks detected an issue.
1171 
1172   Check = EmitIntegerSignChangeCheckHelper(Src, SrcType, Dst, DstType, Builder);
1173   CheckKind = Check.first;
1174   Checks.emplace_back(Check.second);
1175 
1176   if (CGF.SanOpts.has(SanitizerKind::ImplicitSignedIntegerTruncation) &&
1177       (SrcBits > DstBits) && !SrcSigned && DstSigned) {
1178     // If the signed integer truncation sanitizer was enabled,
1179     // and we are truncating from larger unsigned type to smaller signed type,
1180     // let's handle the case we skipped in that check.
1181     Check =
1182         EmitIntegerTruncationCheckHelper(Src, SrcType, Dst, DstType, Builder);
1183     CheckKind = ICCK_SignedIntegerTruncationOrSignChange;
1184     Checks.emplace_back(Check.second);
1185     // If the comparison result is 'i1 false', then the truncation was lossy.
1186   }
1187 
1188   llvm::Constant *StaticArgs[] = {
1189       CGF.EmitCheckSourceLocation(Loc), CGF.EmitCheckTypeDescriptor(SrcType),
1190       CGF.EmitCheckTypeDescriptor(DstType),
1191       llvm::ConstantInt::get(Builder.getInt8Ty(), CheckKind)};
1192   // EmitCheck() will 'and' all the checks together.
1193   CGF.EmitCheck(Checks, SanitizerHandler::ImplicitConversion, StaticArgs,
1194                 {Src, Dst});
1195 }
1196 
1197 Value *ScalarExprEmitter::EmitScalarCast(Value *Src, QualType SrcType,
1198                                          QualType DstType, llvm::Type *SrcTy,
1199                                          llvm::Type *DstTy,
1200                                          ScalarConversionOpts Opts) {
1201   // The Element types determine the type of cast to perform.
1202   llvm::Type *SrcElementTy;
1203   llvm::Type *DstElementTy;
1204   QualType SrcElementType;
1205   QualType DstElementType;
1206   if (SrcType->isMatrixType() && DstType->isMatrixType()) {
1207     // Allow bitcast between matrixes of the same size.
1208     if (SrcTy->getPrimitiveSizeInBits() == DstTy->getPrimitiveSizeInBits())
1209       return Builder.CreateBitCast(Src, DstTy, "conv");
1210 
1211     SrcElementTy = cast<llvm::VectorType>(SrcTy)->getElementType();
1212     DstElementTy = cast<llvm::VectorType>(DstTy)->getElementType();
1213     SrcElementType = SrcType->castAs<MatrixType>()->getElementType();
1214     DstElementType = DstType->castAs<MatrixType>()->getElementType();
1215   } else {
1216     assert(!SrcType->isMatrixType() && !DstType->isMatrixType() &&
1217            "cannot cast between matrix and non-matrix types");
1218     SrcElementTy = SrcTy;
1219     DstElementTy = DstTy;
1220     SrcElementType = SrcType;
1221     DstElementType = DstType;
1222   }
1223 
1224   if (isa<llvm::IntegerType>(SrcElementTy)) {
1225     bool InputSigned = SrcElementType->isSignedIntegerOrEnumerationType();
1226     if (SrcElementType->isBooleanType() && Opts.TreatBooleanAsSigned) {
1227       InputSigned = true;
1228     }
1229 
1230     if (isa<llvm::IntegerType>(DstElementTy))
1231       return Builder.CreateIntCast(Src, DstTy, InputSigned, "conv");
1232     if (InputSigned)
1233       return Builder.CreateSIToFP(Src, DstTy, "conv");
1234     return Builder.CreateUIToFP(Src, DstTy, "conv");
1235   }
1236 
1237   if (isa<llvm::IntegerType>(DstElementTy)) {
1238     assert(SrcElementTy->isFloatingPointTy() && "Unknown real conversion");
1239     if (DstElementType->isSignedIntegerOrEnumerationType())
1240       return Builder.CreateFPToSI(Src, DstTy, "conv");
1241     return Builder.CreateFPToUI(Src, DstTy, "conv");
1242   }
1243 
1244   if (DstElementTy->getTypeID() < SrcElementTy->getTypeID())
1245     return Builder.CreateFPTrunc(Src, DstTy, "conv");
1246   return Builder.CreateFPExt(Src, DstTy, "conv");
1247 }
1248 
1249 /// Emit a conversion from the specified type to the specified destination type,
1250 /// both of which are LLVM scalar types.
1251 Value *ScalarExprEmitter::EmitScalarConversion(Value *Src, QualType SrcType,
1252                                                QualType DstType,
1253                                                SourceLocation Loc,
1254                                                ScalarConversionOpts Opts) {
1255   // All conversions involving fixed point types should be handled by the
1256   // EmitFixedPoint family functions. This is done to prevent bloating up this
1257   // function more, and although fixed point numbers are represented by
1258   // integers, we do not want to follow any logic that assumes they should be
1259   // treated as integers.
1260   // TODO(leonardchan): When necessary, add another if statement checking for
1261   // conversions to fixed point types from other types.
1262   if (SrcType->isFixedPointType()) {
1263     if (DstType->isBooleanType())
1264       // It is important that we check this before checking if the dest type is
1265       // an integer because booleans are technically integer types.
1266       // We do not need to check the padding bit on unsigned types if unsigned
1267       // padding is enabled because overflow into this bit is undefined
1268       // behavior.
1269       return Builder.CreateIsNotNull(Src, "tobool");
1270     if (DstType->isFixedPointType() || DstType->isIntegerType() ||
1271         DstType->isRealFloatingType())
1272       return EmitFixedPointConversion(Src, SrcType, DstType, Loc);
1273 
1274     llvm_unreachable(
1275         "Unhandled scalar conversion from a fixed point type to another type.");
1276   } else if (DstType->isFixedPointType()) {
1277     if (SrcType->isIntegerType() || SrcType->isRealFloatingType())
1278       // This also includes converting booleans and enums to fixed point types.
1279       return EmitFixedPointConversion(Src, SrcType, DstType, Loc);
1280 
1281     llvm_unreachable(
1282         "Unhandled scalar conversion to a fixed point type from another type.");
1283   }
1284 
1285   QualType NoncanonicalSrcType = SrcType;
1286   QualType NoncanonicalDstType = DstType;
1287 
1288   SrcType = CGF.getContext().getCanonicalType(SrcType);
1289   DstType = CGF.getContext().getCanonicalType(DstType);
1290   if (SrcType == DstType) return Src;
1291 
1292   if (DstType->isVoidType()) return nullptr;
1293 
1294   llvm::Value *OrigSrc = Src;
1295   QualType OrigSrcType = SrcType;
1296   llvm::Type *SrcTy = Src->getType();
1297 
1298   // Handle conversions to bool first, they are special: comparisons against 0.
1299   if (DstType->isBooleanType())
1300     return EmitConversionToBool(Src, SrcType);
1301 
1302   llvm::Type *DstTy = ConvertType(DstType);
1303 
1304   // Cast from half through float if half isn't a native type.
1305   if (SrcType->isHalfType() && !CGF.getContext().getLangOpts().NativeHalfType) {
1306     // Cast to FP using the intrinsic if the half type itself isn't supported.
1307     if (DstTy->isFloatingPointTy()) {
1308       if (CGF.getContext().getTargetInfo().useFP16ConversionIntrinsics())
1309         return Builder.CreateCall(
1310             CGF.CGM.getIntrinsic(llvm::Intrinsic::convert_from_fp16, DstTy),
1311             Src);
1312     } else {
1313       // Cast to other types through float, using either the intrinsic or FPExt,
1314       // depending on whether the half type itself is supported
1315       // (as opposed to operations on half, available with NativeHalfType).
1316       if (CGF.getContext().getTargetInfo().useFP16ConversionIntrinsics()) {
1317         Src = Builder.CreateCall(
1318             CGF.CGM.getIntrinsic(llvm::Intrinsic::convert_from_fp16,
1319                                  CGF.CGM.FloatTy),
1320             Src);
1321       } else {
1322         Src = Builder.CreateFPExt(Src, CGF.CGM.FloatTy, "conv");
1323       }
1324       SrcType = CGF.getContext().FloatTy;
1325       SrcTy = CGF.FloatTy;
1326     }
1327   }
1328 
1329   // Ignore conversions like int -> uint.
1330   if (SrcTy == DstTy) {
1331     if (Opts.EmitImplicitIntegerSignChangeChecks)
1332       EmitIntegerSignChangeCheck(Src, NoncanonicalSrcType, Src,
1333                                  NoncanonicalDstType, Loc);
1334 
1335     return Src;
1336   }
1337 
1338   // Handle pointer conversions next: pointers can only be converted to/from
1339   // other pointers and integers. Check for pointer types in terms of LLVM, as
1340   // some native types (like Obj-C id) may map to a pointer type.
1341   if (auto DstPT = dyn_cast<llvm::PointerType>(DstTy)) {
1342     // The source value may be an integer, or a pointer.
1343     if (isa<llvm::PointerType>(SrcTy))
1344       return Builder.CreateBitCast(Src, DstTy, "conv");
1345 
1346     assert(SrcType->isIntegerType() && "Not ptr->ptr or int->ptr conversion?");
1347     // First, convert to the correct width so that we control the kind of
1348     // extension.
1349     llvm::Type *MiddleTy = CGF.CGM.getDataLayout().getIntPtrType(DstPT);
1350     bool InputSigned = SrcType->isSignedIntegerOrEnumerationType();
1351     llvm::Value* IntResult =
1352         Builder.CreateIntCast(Src, MiddleTy, InputSigned, "conv");
1353     // Then, cast to pointer.
1354     return Builder.CreateIntToPtr(IntResult, DstTy, "conv");
1355   }
1356 
1357   if (isa<llvm::PointerType>(SrcTy)) {
1358     // Must be an ptr to int cast.
1359     assert(isa<llvm::IntegerType>(DstTy) && "not ptr->int?");
1360     return Builder.CreatePtrToInt(Src, DstTy, "conv");
1361   }
1362 
1363   // A scalar can be splatted to an extended vector of the same element type
1364   if (DstType->isExtVectorType() && !SrcType->isVectorType()) {
1365     // Sema should add casts to make sure that the source expression's type is
1366     // the same as the vector's element type (sans qualifiers)
1367     assert(DstType->castAs<ExtVectorType>()->getElementType().getTypePtr() ==
1368                SrcType.getTypePtr() &&
1369            "Splatted expr doesn't match with vector element type?");
1370 
1371     // Splat the element across to all elements
1372     unsigned NumElements = cast<llvm::FixedVectorType>(DstTy)->getNumElements();
1373     return Builder.CreateVectorSplat(NumElements, Src, "splat");
1374   }
1375 
1376   if (SrcType->isMatrixType() && DstType->isMatrixType())
1377     return EmitScalarCast(Src, SrcType, DstType, SrcTy, DstTy, Opts);
1378 
1379   if (isa<llvm::VectorType>(SrcTy) || isa<llvm::VectorType>(DstTy)) {
1380     // Allow bitcast from vector to integer/fp of the same size.
1381     unsigned SrcSize = SrcTy->getPrimitiveSizeInBits();
1382     unsigned DstSize = DstTy->getPrimitiveSizeInBits();
1383     if (SrcSize == DstSize)
1384       return Builder.CreateBitCast(Src, DstTy, "conv");
1385 
1386     // Conversions between vectors of different sizes are not allowed except
1387     // when vectors of half are involved. Operations on storage-only half
1388     // vectors require promoting half vector operands to float vectors and
1389     // truncating the result, which is either an int or float vector, to a
1390     // short or half vector.
1391 
1392     // Source and destination are both expected to be vectors.
1393     llvm::Type *SrcElementTy = cast<llvm::VectorType>(SrcTy)->getElementType();
1394     llvm::Type *DstElementTy = cast<llvm::VectorType>(DstTy)->getElementType();
1395     (void)DstElementTy;
1396 
1397     assert(((SrcElementTy->isIntegerTy() &&
1398              DstElementTy->isIntegerTy()) ||
1399             (SrcElementTy->isFloatingPointTy() &&
1400              DstElementTy->isFloatingPointTy())) &&
1401            "unexpected conversion between a floating-point vector and an "
1402            "integer vector");
1403 
1404     // Truncate an i32 vector to an i16 vector.
1405     if (SrcElementTy->isIntegerTy())
1406       return Builder.CreateIntCast(Src, DstTy, false, "conv");
1407 
1408     // Truncate a float vector to a half vector.
1409     if (SrcSize > DstSize)
1410       return Builder.CreateFPTrunc(Src, DstTy, "conv");
1411 
1412     // Promote a half vector to a float vector.
1413     return Builder.CreateFPExt(Src, DstTy, "conv");
1414   }
1415 
1416   // Finally, we have the arithmetic types: real int/float.
1417   Value *Res = nullptr;
1418   llvm::Type *ResTy = DstTy;
1419 
1420   // An overflowing conversion has undefined behavior if either the source type
1421   // or the destination type is a floating-point type. However, we consider the
1422   // range of representable values for all floating-point types to be
1423   // [-inf,+inf], so no overflow can ever happen when the destination type is a
1424   // floating-point type.
1425   if (CGF.SanOpts.has(SanitizerKind::FloatCastOverflow) &&
1426       OrigSrcType->isFloatingType())
1427     EmitFloatConversionCheck(OrigSrc, OrigSrcType, Src, SrcType, DstType, DstTy,
1428                              Loc);
1429 
1430   // Cast to half through float if half isn't a native type.
1431   if (DstType->isHalfType() && !CGF.getContext().getLangOpts().NativeHalfType) {
1432     // Make sure we cast in a single step if from another FP type.
1433     if (SrcTy->isFloatingPointTy()) {
1434       // Use the intrinsic if the half type itself isn't supported
1435       // (as opposed to operations on half, available with NativeHalfType).
1436       if (CGF.getContext().getTargetInfo().useFP16ConversionIntrinsics())
1437         return Builder.CreateCall(
1438             CGF.CGM.getIntrinsic(llvm::Intrinsic::convert_to_fp16, SrcTy), Src);
1439       // If the half type is supported, just use an fptrunc.
1440       return Builder.CreateFPTrunc(Src, DstTy);
1441     }
1442     DstTy = CGF.FloatTy;
1443   }
1444 
1445   Res = EmitScalarCast(Src, SrcType, DstType, SrcTy, DstTy, Opts);
1446 
1447   if (DstTy != ResTy) {
1448     if (CGF.getContext().getTargetInfo().useFP16ConversionIntrinsics()) {
1449       assert(ResTy->isIntegerTy(16) && "Only half FP requires extra conversion");
1450       Res = Builder.CreateCall(
1451         CGF.CGM.getIntrinsic(llvm::Intrinsic::convert_to_fp16, CGF.CGM.FloatTy),
1452         Res);
1453     } else {
1454       Res = Builder.CreateFPTrunc(Res, ResTy, "conv");
1455     }
1456   }
1457 
1458   if (Opts.EmitImplicitIntegerTruncationChecks)
1459     EmitIntegerTruncationCheck(Src, NoncanonicalSrcType, Res,
1460                                NoncanonicalDstType, Loc);
1461 
1462   if (Opts.EmitImplicitIntegerSignChangeChecks)
1463     EmitIntegerSignChangeCheck(Src, NoncanonicalSrcType, Res,
1464                                NoncanonicalDstType, Loc);
1465 
1466   return Res;
1467 }
1468 
1469 Value *ScalarExprEmitter::EmitFixedPointConversion(Value *Src, QualType SrcTy,
1470                                                    QualType DstTy,
1471                                                    SourceLocation Loc) {
1472   llvm::FixedPointBuilder<CGBuilderTy> FPBuilder(Builder);
1473   llvm::Value *Result;
1474   if (SrcTy->isRealFloatingType())
1475     Result = FPBuilder.CreateFloatingToFixed(Src,
1476         CGF.getContext().getFixedPointSemantics(DstTy));
1477   else if (DstTy->isRealFloatingType())
1478     Result = FPBuilder.CreateFixedToFloating(Src,
1479         CGF.getContext().getFixedPointSemantics(SrcTy),
1480         ConvertType(DstTy));
1481   else {
1482     auto SrcFPSema = CGF.getContext().getFixedPointSemantics(SrcTy);
1483     auto DstFPSema = CGF.getContext().getFixedPointSemantics(DstTy);
1484 
1485     if (DstTy->isIntegerType())
1486       Result = FPBuilder.CreateFixedToInteger(Src, SrcFPSema,
1487                                               DstFPSema.getWidth(),
1488                                               DstFPSema.isSigned());
1489     else if (SrcTy->isIntegerType())
1490       Result =  FPBuilder.CreateIntegerToFixed(Src, SrcFPSema.isSigned(),
1491                                                DstFPSema);
1492     else
1493       Result = FPBuilder.CreateFixedToFixed(Src, SrcFPSema, DstFPSema);
1494   }
1495   return Result;
1496 }
1497 
1498 /// Emit a conversion from the specified complex type to the specified
1499 /// destination type, where the destination type is an LLVM scalar type.
1500 Value *ScalarExprEmitter::EmitComplexToScalarConversion(
1501     CodeGenFunction::ComplexPairTy Src, QualType SrcTy, QualType DstTy,
1502     SourceLocation Loc) {
1503   // Get the source element type.
1504   SrcTy = SrcTy->castAs<ComplexType>()->getElementType();
1505 
1506   // Handle conversions to bool first, they are special: comparisons against 0.
1507   if (DstTy->isBooleanType()) {
1508     //  Complex != 0  -> (Real != 0) | (Imag != 0)
1509     Src.first = EmitScalarConversion(Src.first, SrcTy, DstTy, Loc);
1510     Src.second = EmitScalarConversion(Src.second, SrcTy, DstTy, Loc);
1511     return Builder.CreateOr(Src.first, Src.second, "tobool");
1512   }
1513 
1514   // C99 6.3.1.7p2: "When a value of complex type is converted to a real type,
1515   // the imaginary part of the complex value is discarded and the value of the
1516   // real part is converted according to the conversion rules for the
1517   // corresponding real type.
1518   return EmitScalarConversion(Src.first, SrcTy, DstTy, Loc);
1519 }
1520 
1521 Value *ScalarExprEmitter::EmitNullValue(QualType Ty) {
1522   return CGF.EmitFromMemory(CGF.CGM.EmitNullConstant(Ty), Ty);
1523 }
1524 
1525 /// Emit a sanitization check for the given "binary" operation (which
1526 /// might actually be a unary increment which has been lowered to a binary
1527 /// operation). The check passes if all values in \p Checks (which are \c i1),
1528 /// are \c true.
1529 void ScalarExprEmitter::EmitBinOpCheck(
1530     ArrayRef<std::pair<Value *, SanitizerMask>> Checks, const BinOpInfo &Info) {
1531   assert(CGF.IsSanitizerScope);
1532   SanitizerHandler Check;
1533   SmallVector<llvm::Constant *, 4> StaticData;
1534   SmallVector<llvm::Value *, 2> DynamicData;
1535 
1536   BinaryOperatorKind Opcode = Info.Opcode;
1537   if (BinaryOperator::isCompoundAssignmentOp(Opcode))
1538     Opcode = BinaryOperator::getOpForCompoundAssignment(Opcode);
1539 
1540   StaticData.push_back(CGF.EmitCheckSourceLocation(Info.E->getExprLoc()));
1541   const UnaryOperator *UO = dyn_cast<UnaryOperator>(Info.E);
1542   if (UO && UO->getOpcode() == UO_Minus) {
1543     Check = SanitizerHandler::NegateOverflow;
1544     StaticData.push_back(CGF.EmitCheckTypeDescriptor(UO->getType()));
1545     DynamicData.push_back(Info.RHS);
1546   } else {
1547     if (BinaryOperator::isShiftOp(Opcode)) {
1548       // Shift LHS negative or too large, or RHS out of bounds.
1549       Check = SanitizerHandler::ShiftOutOfBounds;
1550       const BinaryOperator *BO = cast<BinaryOperator>(Info.E);
1551       StaticData.push_back(
1552         CGF.EmitCheckTypeDescriptor(BO->getLHS()->getType()));
1553       StaticData.push_back(
1554         CGF.EmitCheckTypeDescriptor(BO->getRHS()->getType()));
1555     } else if (Opcode == BO_Div || Opcode == BO_Rem) {
1556       // Divide or modulo by zero, or signed overflow (eg INT_MAX / -1).
1557       Check = SanitizerHandler::DivremOverflow;
1558       StaticData.push_back(CGF.EmitCheckTypeDescriptor(Info.Ty));
1559     } else {
1560       // Arithmetic overflow (+, -, *).
1561       switch (Opcode) {
1562       case BO_Add: Check = SanitizerHandler::AddOverflow; break;
1563       case BO_Sub: Check = SanitizerHandler::SubOverflow; break;
1564       case BO_Mul: Check = SanitizerHandler::MulOverflow; break;
1565       default: llvm_unreachable("unexpected opcode for bin op check");
1566       }
1567       StaticData.push_back(CGF.EmitCheckTypeDescriptor(Info.Ty));
1568     }
1569     DynamicData.push_back(Info.LHS);
1570     DynamicData.push_back(Info.RHS);
1571   }
1572 
1573   CGF.EmitCheck(Checks, Check, StaticData, DynamicData);
1574 }
1575 
1576 //===----------------------------------------------------------------------===//
1577 //                            Visitor Methods
1578 //===----------------------------------------------------------------------===//
1579 
1580 Value *ScalarExprEmitter::VisitExpr(Expr *E) {
1581   CGF.ErrorUnsupported(E, "scalar expression");
1582   if (E->getType()->isVoidType())
1583     return nullptr;
1584   return llvm::UndefValue::get(CGF.ConvertType(E->getType()));
1585 }
1586 
1587 Value *ScalarExprEmitter::VisitShuffleVectorExpr(ShuffleVectorExpr *E) {
1588   // Vector Mask Case
1589   if (E->getNumSubExprs() == 2) {
1590     Value *LHS = CGF.EmitScalarExpr(E->getExpr(0));
1591     Value *RHS = CGF.EmitScalarExpr(E->getExpr(1));
1592     Value *Mask;
1593 
1594     auto *LTy = cast<llvm::FixedVectorType>(LHS->getType());
1595     unsigned LHSElts = LTy->getNumElements();
1596 
1597     Mask = RHS;
1598 
1599     auto *MTy = cast<llvm::FixedVectorType>(Mask->getType());
1600 
1601     // Mask off the high bits of each shuffle index.
1602     Value *MaskBits =
1603         llvm::ConstantInt::get(MTy, llvm::NextPowerOf2(LHSElts - 1) - 1);
1604     Mask = Builder.CreateAnd(Mask, MaskBits, "mask");
1605 
1606     // newv = undef
1607     // mask = mask & maskbits
1608     // for each elt
1609     //   n = extract mask i
1610     //   x = extract val n
1611     //   newv = insert newv, x, i
1612     auto *RTy = llvm::FixedVectorType::get(LTy->getElementType(),
1613                                            MTy->getNumElements());
1614     Value* NewV = llvm::UndefValue::get(RTy);
1615     for (unsigned i = 0, e = MTy->getNumElements(); i != e; ++i) {
1616       Value *IIndx = llvm::ConstantInt::get(CGF.SizeTy, i);
1617       Value *Indx = Builder.CreateExtractElement(Mask, IIndx, "shuf_idx");
1618 
1619       Value *VExt = Builder.CreateExtractElement(LHS, Indx, "shuf_elt");
1620       NewV = Builder.CreateInsertElement(NewV, VExt, IIndx, "shuf_ins");
1621     }
1622     return NewV;
1623   }
1624 
1625   Value* V1 = CGF.EmitScalarExpr(E->getExpr(0));
1626   Value* V2 = CGF.EmitScalarExpr(E->getExpr(1));
1627 
1628   SmallVector<int, 32> Indices;
1629   for (unsigned i = 2; i < E->getNumSubExprs(); ++i) {
1630     llvm::APSInt Idx = E->getShuffleMaskIdx(CGF.getContext(), i-2);
1631     // Check for -1 and output it as undef in the IR.
1632     if (Idx.isSigned() && Idx.isAllOnesValue())
1633       Indices.push_back(-1);
1634     else
1635       Indices.push_back(Idx.getZExtValue());
1636   }
1637 
1638   return Builder.CreateShuffleVector(V1, V2, Indices, "shuffle");
1639 }
1640 
1641 Value *ScalarExprEmitter::VisitConvertVectorExpr(ConvertVectorExpr *E) {
1642   QualType SrcType = E->getSrcExpr()->getType(),
1643            DstType = E->getType();
1644 
1645   Value *Src  = CGF.EmitScalarExpr(E->getSrcExpr());
1646 
1647   SrcType = CGF.getContext().getCanonicalType(SrcType);
1648   DstType = CGF.getContext().getCanonicalType(DstType);
1649   if (SrcType == DstType) return Src;
1650 
1651   assert(SrcType->isVectorType() &&
1652          "ConvertVector source type must be a vector");
1653   assert(DstType->isVectorType() &&
1654          "ConvertVector destination type must be a vector");
1655 
1656   llvm::Type *SrcTy = Src->getType();
1657   llvm::Type *DstTy = ConvertType(DstType);
1658 
1659   // Ignore conversions like int -> uint.
1660   if (SrcTy == DstTy)
1661     return Src;
1662 
1663   QualType SrcEltType = SrcType->castAs<VectorType>()->getElementType(),
1664            DstEltType = DstType->castAs<VectorType>()->getElementType();
1665 
1666   assert(SrcTy->isVectorTy() &&
1667          "ConvertVector source IR type must be a vector");
1668   assert(DstTy->isVectorTy() &&
1669          "ConvertVector destination IR type must be a vector");
1670 
1671   llvm::Type *SrcEltTy = cast<llvm::VectorType>(SrcTy)->getElementType(),
1672              *DstEltTy = cast<llvm::VectorType>(DstTy)->getElementType();
1673 
1674   if (DstEltType->isBooleanType()) {
1675     assert((SrcEltTy->isFloatingPointTy() ||
1676             isa<llvm::IntegerType>(SrcEltTy)) && "Unknown boolean conversion");
1677 
1678     llvm::Value *Zero = llvm::Constant::getNullValue(SrcTy);
1679     if (SrcEltTy->isFloatingPointTy()) {
1680       return Builder.CreateFCmpUNE(Src, Zero, "tobool");
1681     } else {
1682       return Builder.CreateICmpNE(Src, Zero, "tobool");
1683     }
1684   }
1685 
1686   // We have the arithmetic types: real int/float.
1687   Value *Res = nullptr;
1688 
1689   if (isa<llvm::IntegerType>(SrcEltTy)) {
1690     bool InputSigned = SrcEltType->isSignedIntegerOrEnumerationType();
1691     if (isa<llvm::IntegerType>(DstEltTy))
1692       Res = Builder.CreateIntCast(Src, DstTy, InputSigned, "conv");
1693     else if (InputSigned)
1694       Res = Builder.CreateSIToFP(Src, DstTy, "conv");
1695     else
1696       Res = Builder.CreateUIToFP(Src, DstTy, "conv");
1697   } else if (isa<llvm::IntegerType>(DstEltTy)) {
1698     assert(SrcEltTy->isFloatingPointTy() && "Unknown real conversion");
1699     if (DstEltType->isSignedIntegerOrEnumerationType())
1700       Res = Builder.CreateFPToSI(Src, DstTy, "conv");
1701     else
1702       Res = Builder.CreateFPToUI(Src, DstTy, "conv");
1703   } else {
1704     assert(SrcEltTy->isFloatingPointTy() && DstEltTy->isFloatingPointTy() &&
1705            "Unknown real conversion");
1706     if (DstEltTy->getTypeID() < SrcEltTy->getTypeID())
1707       Res = Builder.CreateFPTrunc(Src, DstTy, "conv");
1708     else
1709       Res = Builder.CreateFPExt(Src, DstTy, "conv");
1710   }
1711 
1712   return Res;
1713 }
1714 
1715 Value *ScalarExprEmitter::VisitMemberExpr(MemberExpr *E) {
1716   if (CodeGenFunction::ConstantEmission Constant = CGF.tryEmitAsConstant(E)) {
1717     CGF.EmitIgnoredExpr(E->getBase());
1718     return CGF.emitScalarConstant(Constant, E);
1719   } else {
1720     Expr::EvalResult Result;
1721     if (E->EvaluateAsInt(Result, CGF.getContext(), Expr::SE_AllowSideEffects)) {
1722       llvm::APSInt Value = Result.Val.getInt();
1723       CGF.EmitIgnoredExpr(E->getBase());
1724       return Builder.getInt(Value);
1725     }
1726   }
1727 
1728   return EmitLoadOfLValue(E);
1729 }
1730 
1731 Value *ScalarExprEmitter::VisitArraySubscriptExpr(ArraySubscriptExpr *E) {
1732   TestAndClearIgnoreResultAssign();
1733 
1734   // Emit subscript expressions in rvalue context's.  For most cases, this just
1735   // loads the lvalue formed by the subscript expr.  However, we have to be
1736   // careful, because the base of a vector subscript is occasionally an rvalue,
1737   // so we can't get it as an lvalue.
1738   if (!E->getBase()->getType()->isVectorType())
1739     return EmitLoadOfLValue(E);
1740 
1741   // Handle the vector case.  The base must be a vector, the index must be an
1742   // integer value.
1743   Value *Base = Visit(E->getBase());
1744   Value *Idx  = Visit(E->getIdx());
1745   QualType IdxTy = E->getIdx()->getType();
1746 
1747   if (CGF.SanOpts.has(SanitizerKind::ArrayBounds))
1748     CGF.EmitBoundsCheck(E, E->getBase(), Idx, IdxTy, /*Accessed*/true);
1749 
1750   return Builder.CreateExtractElement(Base, Idx, "vecext");
1751 }
1752 
1753 Value *ScalarExprEmitter::VisitMatrixSubscriptExpr(MatrixSubscriptExpr *E) {
1754   TestAndClearIgnoreResultAssign();
1755 
1756   // Handle the vector case.  The base must be a vector, the index must be an
1757   // integer value.
1758   Value *RowIdx = Visit(E->getRowIdx());
1759   Value *ColumnIdx = Visit(E->getColumnIdx());
1760   Value *Matrix = Visit(E->getBase());
1761 
1762   // TODO: Should we emit bounds checks with SanitizerKind::ArrayBounds?
1763   llvm::MatrixBuilder<CGBuilderTy> MB(Builder);
1764   return MB.CreateExtractElement(
1765       Matrix, RowIdx, ColumnIdx,
1766       E->getBase()->getType()->castAs<ConstantMatrixType>()->getNumRows());
1767 }
1768 
1769 static int getMaskElt(llvm::ShuffleVectorInst *SVI, unsigned Idx,
1770                       unsigned Off) {
1771   int MV = SVI->getMaskValue(Idx);
1772   if (MV == -1)
1773     return -1;
1774   return Off + MV;
1775 }
1776 
1777 static int getAsInt32(llvm::ConstantInt *C, llvm::Type *I32Ty) {
1778   assert(llvm::ConstantInt::isValueValidForType(I32Ty, C->getZExtValue()) &&
1779          "Index operand too large for shufflevector mask!");
1780   return C->getZExtValue();
1781 }
1782 
1783 Value *ScalarExprEmitter::VisitInitListExpr(InitListExpr *E) {
1784   bool Ignore = TestAndClearIgnoreResultAssign();
1785   (void)Ignore;
1786   assert (Ignore == false && "init list ignored");
1787   unsigned NumInitElements = E->getNumInits();
1788 
1789   if (E->hadArrayRangeDesignator())
1790     CGF.ErrorUnsupported(E, "GNU array range designator extension");
1791 
1792   llvm::VectorType *VType =
1793     dyn_cast<llvm::VectorType>(ConvertType(E->getType()));
1794 
1795   if (!VType) {
1796     if (NumInitElements == 0) {
1797       // C++11 value-initialization for the scalar.
1798       return EmitNullValue(E->getType());
1799     }
1800     // We have a scalar in braces. Just use the first element.
1801     return Visit(E->getInit(0));
1802   }
1803 
1804   unsigned ResElts = cast<llvm::FixedVectorType>(VType)->getNumElements();
1805 
1806   // Loop over initializers collecting the Value for each, and remembering
1807   // whether the source was swizzle (ExtVectorElementExpr).  This will allow
1808   // us to fold the shuffle for the swizzle into the shuffle for the vector
1809   // initializer, since LLVM optimizers generally do not want to touch
1810   // shuffles.
1811   unsigned CurIdx = 0;
1812   bool VIsUndefShuffle = false;
1813   llvm::Value *V = llvm::UndefValue::get(VType);
1814   for (unsigned i = 0; i != NumInitElements; ++i) {
1815     Expr *IE = E->getInit(i);
1816     Value *Init = Visit(IE);
1817     SmallVector<int, 16> Args;
1818 
1819     llvm::VectorType *VVT = dyn_cast<llvm::VectorType>(Init->getType());
1820 
1821     // Handle scalar elements.  If the scalar initializer is actually one
1822     // element of a different vector of the same width, use shuffle instead of
1823     // extract+insert.
1824     if (!VVT) {
1825       if (isa<ExtVectorElementExpr>(IE)) {
1826         llvm::ExtractElementInst *EI = cast<llvm::ExtractElementInst>(Init);
1827 
1828         if (cast<llvm::FixedVectorType>(EI->getVectorOperandType())
1829                 ->getNumElements() == ResElts) {
1830           llvm::ConstantInt *C = cast<llvm::ConstantInt>(EI->getIndexOperand());
1831           Value *LHS = nullptr, *RHS = nullptr;
1832           if (CurIdx == 0) {
1833             // insert into undef -> shuffle (src, undef)
1834             // shufflemask must use an i32
1835             Args.push_back(getAsInt32(C, CGF.Int32Ty));
1836             Args.resize(ResElts, -1);
1837 
1838             LHS = EI->getVectorOperand();
1839             RHS = V;
1840             VIsUndefShuffle = true;
1841           } else if (VIsUndefShuffle) {
1842             // insert into undefshuffle && size match -> shuffle (v, src)
1843             llvm::ShuffleVectorInst *SVV = cast<llvm::ShuffleVectorInst>(V);
1844             for (unsigned j = 0; j != CurIdx; ++j)
1845               Args.push_back(getMaskElt(SVV, j, 0));
1846             Args.push_back(ResElts + C->getZExtValue());
1847             Args.resize(ResElts, -1);
1848 
1849             LHS = cast<llvm::ShuffleVectorInst>(V)->getOperand(0);
1850             RHS = EI->getVectorOperand();
1851             VIsUndefShuffle = false;
1852           }
1853           if (!Args.empty()) {
1854             V = Builder.CreateShuffleVector(LHS, RHS, Args);
1855             ++CurIdx;
1856             continue;
1857           }
1858         }
1859       }
1860       V = Builder.CreateInsertElement(V, Init, Builder.getInt32(CurIdx),
1861                                       "vecinit");
1862       VIsUndefShuffle = false;
1863       ++CurIdx;
1864       continue;
1865     }
1866 
1867     unsigned InitElts = cast<llvm::FixedVectorType>(VVT)->getNumElements();
1868 
1869     // If the initializer is an ExtVecEltExpr (a swizzle), and the swizzle's
1870     // input is the same width as the vector being constructed, generate an
1871     // optimized shuffle of the swizzle input into the result.
1872     unsigned Offset = (CurIdx == 0) ? 0 : ResElts;
1873     if (isa<ExtVectorElementExpr>(IE)) {
1874       llvm::ShuffleVectorInst *SVI = cast<llvm::ShuffleVectorInst>(Init);
1875       Value *SVOp = SVI->getOperand(0);
1876       auto *OpTy = cast<llvm::FixedVectorType>(SVOp->getType());
1877 
1878       if (OpTy->getNumElements() == ResElts) {
1879         for (unsigned j = 0; j != CurIdx; ++j) {
1880           // If the current vector initializer is a shuffle with undef, merge
1881           // this shuffle directly into it.
1882           if (VIsUndefShuffle) {
1883             Args.push_back(getMaskElt(cast<llvm::ShuffleVectorInst>(V), j, 0));
1884           } else {
1885             Args.push_back(j);
1886           }
1887         }
1888         for (unsigned j = 0, je = InitElts; j != je; ++j)
1889           Args.push_back(getMaskElt(SVI, j, Offset));
1890         Args.resize(ResElts, -1);
1891 
1892         if (VIsUndefShuffle)
1893           V = cast<llvm::ShuffleVectorInst>(V)->getOperand(0);
1894 
1895         Init = SVOp;
1896       }
1897     }
1898 
1899     // Extend init to result vector length, and then shuffle its contribution
1900     // to the vector initializer into V.
1901     if (Args.empty()) {
1902       for (unsigned j = 0; j != InitElts; ++j)
1903         Args.push_back(j);
1904       Args.resize(ResElts, -1);
1905       Init = Builder.CreateShuffleVector(Init, Args, "vext");
1906 
1907       Args.clear();
1908       for (unsigned j = 0; j != CurIdx; ++j)
1909         Args.push_back(j);
1910       for (unsigned j = 0; j != InitElts; ++j)
1911         Args.push_back(j + Offset);
1912       Args.resize(ResElts, -1);
1913     }
1914 
1915     // If V is undef, make sure it ends up on the RHS of the shuffle to aid
1916     // merging subsequent shuffles into this one.
1917     if (CurIdx == 0)
1918       std::swap(V, Init);
1919     V = Builder.CreateShuffleVector(V, Init, Args, "vecinit");
1920     VIsUndefShuffle = isa<llvm::UndefValue>(Init);
1921     CurIdx += InitElts;
1922   }
1923 
1924   // FIXME: evaluate codegen vs. shuffling against constant null vector.
1925   // Emit remaining default initializers.
1926   llvm::Type *EltTy = VType->getElementType();
1927 
1928   // Emit remaining default initializers
1929   for (/* Do not initialize i*/; CurIdx < ResElts; ++CurIdx) {
1930     Value *Idx = Builder.getInt32(CurIdx);
1931     llvm::Value *Init = llvm::Constant::getNullValue(EltTy);
1932     V = Builder.CreateInsertElement(V, Init, Idx, "vecinit");
1933   }
1934   return V;
1935 }
1936 
1937 bool CodeGenFunction::ShouldNullCheckClassCastValue(const CastExpr *CE) {
1938   const Expr *E = CE->getSubExpr();
1939 
1940   if (CE->getCastKind() == CK_UncheckedDerivedToBase)
1941     return false;
1942 
1943   if (isa<CXXThisExpr>(E->IgnoreParens())) {
1944     // We always assume that 'this' is never null.
1945     return false;
1946   }
1947 
1948   if (const ImplicitCastExpr *ICE = dyn_cast<ImplicitCastExpr>(CE)) {
1949     // And that glvalue casts are never null.
1950     if (ICE->getValueKind() != VK_RValue)
1951       return false;
1952   }
1953 
1954   return true;
1955 }
1956 
1957 // VisitCastExpr - Emit code for an explicit or implicit cast.  Implicit casts
1958 // have to handle a more broad range of conversions than explicit casts, as they
1959 // handle things like function to ptr-to-function decay etc.
1960 Value *ScalarExprEmitter::VisitCastExpr(CastExpr *CE) {
1961   Expr *E = CE->getSubExpr();
1962   QualType DestTy = CE->getType();
1963   CastKind Kind = CE->getCastKind();
1964 
1965   // These cases are generally not written to ignore the result of
1966   // evaluating their sub-expressions, so we clear this now.
1967   bool Ignored = TestAndClearIgnoreResultAssign();
1968 
1969   // Since almost all cast kinds apply to scalars, this switch doesn't have
1970   // a default case, so the compiler will warn on a missing case.  The cases
1971   // are in the same order as in the CastKind enum.
1972   switch (Kind) {
1973   case CK_Dependent: llvm_unreachable("dependent cast kind in IR gen!");
1974   case CK_BuiltinFnToFnPtr:
1975     llvm_unreachable("builtin functions are handled elsewhere");
1976 
1977   case CK_LValueBitCast:
1978   case CK_ObjCObjectLValueCast: {
1979     Address Addr = EmitLValue(E).getAddress(CGF);
1980     Addr = Builder.CreateElementBitCast(Addr, CGF.ConvertTypeForMem(DestTy));
1981     LValue LV = CGF.MakeAddrLValue(Addr, DestTy);
1982     return EmitLoadOfLValue(LV, CE->getExprLoc());
1983   }
1984 
1985   case CK_LValueToRValueBitCast: {
1986     LValue SourceLVal = CGF.EmitLValue(E);
1987     Address Addr = Builder.CreateElementBitCast(SourceLVal.getAddress(CGF),
1988                                                 CGF.ConvertTypeForMem(DestTy));
1989     LValue DestLV = CGF.MakeAddrLValue(Addr, DestTy);
1990     DestLV.setTBAAInfo(TBAAAccessInfo::getMayAliasInfo());
1991     return EmitLoadOfLValue(DestLV, CE->getExprLoc());
1992   }
1993 
1994   case CK_CPointerToObjCPointerCast:
1995   case CK_BlockPointerToObjCPointerCast:
1996   case CK_AnyPointerToBlockPointerCast:
1997   case CK_BitCast: {
1998     Value *Src = Visit(const_cast<Expr*>(E));
1999     llvm::Type *SrcTy = Src->getType();
2000     llvm::Type *DstTy = ConvertType(DestTy);
2001     if (SrcTy->isPtrOrPtrVectorTy() && DstTy->isPtrOrPtrVectorTy() &&
2002         SrcTy->getPointerAddressSpace() != DstTy->getPointerAddressSpace()) {
2003       llvm_unreachable("wrong cast for pointers in different address spaces"
2004                        "(must be an address space cast)!");
2005     }
2006 
2007     if (CGF.SanOpts.has(SanitizerKind::CFIUnrelatedCast)) {
2008       if (auto PT = DestTy->getAs<PointerType>())
2009         CGF.EmitVTablePtrCheckForCast(PT->getPointeeType(), Src,
2010                                       /*MayBeNull=*/true,
2011                                       CodeGenFunction::CFITCK_UnrelatedCast,
2012                                       CE->getBeginLoc());
2013     }
2014 
2015     if (CGF.CGM.getCodeGenOpts().StrictVTablePointers) {
2016       const QualType SrcType = E->getType();
2017 
2018       if (SrcType.mayBeNotDynamicClass() && DestTy.mayBeDynamicClass()) {
2019         // Casting to pointer that could carry dynamic information (provided by
2020         // invariant.group) requires launder.
2021         Src = Builder.CreateLaunderInvariantGroup(Src);
2022       } else if (SrcType.mayBeDynamicClass() && DestTy.mayBeNotDynamicClass()) {
2023         // Casting to pointer that does not carry dynamic information (provided
2024         // by invariant.group) requires stripping it.  Note that we don't do it
2025         // if the source could not be dynamic type and destination could be
2026         // dynamic because dynamic information is already laundered.  It is
2027         // because launder(strip(src)) == launder(src), so there is no need to
2028         // add extra strip before launder.
2029         Src = Builder.CreateStripInvariantGroup(Src);
2030       }
2031     }
2032 
2033     // Update heapallocsite metadata when there is an explicit pointer cast.
2034     if (auto *CI = dyn_cast<llvm::CallBase>(Src)) {
2035       if (CI->getMetadata("heapallocsite") && isa<ExplicitCastExpr>(CE)) {
2036         QualType PointeeType = DestTy->getPointeeType();
2037         if (!PointeeType.isNull())
2038           CGF.getDebugInfo()->addHeapAllocSiteMetadata(CI, PointeeType,
2039                                                        CE->getExprLoc());
2040       }
2041     }
2042 
2043     // If Src is a fixed vector and Dst is a scalable vector, and both have the
2044     // same element type, use the llvm.experimental.vector.insert intrinsic to
2045     // perform the bitcast.
2046     if (const auto *FixedSrc = dyn_cast<llvm::FixedVectorType>(SrcTy)) {
2047       if (const auto *ScalableDst = dyn_cast<llvm::ScalableVectorType>(DstTy)) {
2048         if (FixedSrc->getElementType() == ScalableDst->getElementType()) {
2049           llvm::Value *UndefVec = llvm::UndefValue::get(DstTy);
2050           llvm::Value *Zero = llvm::Constant::getNullValue(CGF.CGM.Int64Ty);
2051           return Builder.CreateInsertVector(DstTy, UndefVec, Src, Zero,
2052                                             "castScalableSve");
2053         }
2054       }
2055     }
2056 
2057     // If Src is a scalable vector and Dst is a fixed vector, and both have the
2058     // same element type, use the llvm.experimental.vector.extract intrinsic to
2059     // perform the bitcast.
2060     if (const auto *ScalableSrc = dyn_cast<llvm::ScalableVectorType>(SrcTy)) {
2061       if (const auto *FixedDst = dyn_cast<llvm::FixedVectorType>(DstTy)) {
2062         if (ScalableSrc->getElementType() == FixedDst->getElementType()) {
2063           llvm::Value *Zero = llvm::Constant::getNullValue(CGF.CGM.Int64Ty);
2064           return Builder.CreateExtractVector(DstTy, Src, Zero, "castFixedSve");
2065         }
2066       }
2067     }
2068 
2069     // Perform VLAT <-> VLST bitcast through memory.
2070     // TODO: since the llvm.experimental.vector.{insert,extract} intrinsics
2071     //       require the element types of the vectors to be the same, we
2072     //       need to keep this around for casting between predicates, or more
2073     //       generally for bitcasts between VLAT <-> VLST where the element
2074     //       types of the vectors are not the same, until we figure out a better
2075     //       way of doing these casts.
2076     if ((isa<llvm::FixedVectorType>(SrcTy) &&
2077          isa<llvm::ScalableVectorType>(DstTy)) ||
2078         (isa<llvm::ScalableVectorType>(SrcTy) &&
2079          isa<llvm::FixedVectorType>(DstTy))) {
2080       if (const CallExpr *CE = dyn_cast<CallExpr>(E)) {
2081         // Call expressions can't have a scalar return unless the return type
2082         // is a reference type so an lvalue can't be emitted. Create a temp
2083         // alloca to store the call, bitcast the address then load.
2084         QualType RetTy = CE->getCallReturnType(CGF.getContext());
2085         Address Addr =
2086             CGF.CreateDefaultAlignTempAlloca(SrcTy, "saved-call-rvalue");
2087         LValue LV = CGF.MakeAddrLValue(Addr, RetTy);
2088         CGF.EmitStoreOfScalar(Src, LV);
2089         Addr = Builder.CreateElementBitCast(Addr, CGF.ConvertTypeForMem(DestTy),
2090                                             "castFixedSve");
2091         LValue DestLV = CGF.MakeAddrLValue(Addr, DestTy);
2092         DestLV.setTBAAInfo(TBAAAccessInfo::getMayAliasInfo());
2093         return EmitLoadOfLValue(DestLV, CE->getExprLoc());
2094       }
2095 
2096       Address Addr = EmitLValue(E).getAddress(CGF);
2097       Addr = Builder.CreateElementBitCast(Addr, CGF.ConvertTypeForMem(DestTy));
2098       LValue DestLV = CGF.MakeAddrLValue(Addr, DestTy);
2099       DestLV.setTBAAInfo(TBAAAccessInfo::getMayAliasInfo());
2100       return EmitLoadOfLValue(DestLV, CE->getExprLoc());
2101     }
2102 
2103     return Builder.CreateBitCast(Src, DstTy);
2104   }
2105   case CK_AddressSpaceConversion: {
2106     Expr::EvalResult Result;
2107     if (E->EvaluateAsRValue(Result, CGF.getContext()) &&
2108         Result.Val.isNullPointer()) {
2109       // If E has side effect, it is emitted even if its final result is a
2110       // null pointer. In that case, a DCE pass should be able to
2111       // eliminate the useless instructions emitted during translating E.
2112       if (Result.HasSideEffects)
2113         Visit(E);
2114       return CGF.CGM.getNullPointer(cast<llvm::PointerType>(
2115           ConvertType(DestTy)), DestTy);
2116     }
2117     // Since target may map different address spaces in AST to the same address
2118     // space, an address space conversion may end up as a bitcast.
2119     return CGF.CGM.getTargetCodeGenInfo().performAddrSpaceCast(
2120         CGF, Visit(E), E->getType()->getPointeeType().getAddressSpace(),
2121         DestTy->getPointeeType().getAddressSpace(), ConvertType(DestTy));
2122   }
2123   case CK_AtomicToNonAtomic:
2124   case CK_NonAtomicToAtomic:
2125   case CK_NoOp:
2126   case CK_UserDefinedConversion:
2127     return Visit(const_cast<Expr*>(E));
2128 
2129   case CK_BaseToDerived: {
2130     const CXXRecordDecl *DerivedClassDecl = DestTy->getPointeeCXXRecordDecl();
2131     assert(DerivedClassDecl && "BaseToDerived arg isn't a C++ object pointer!");
2132 
2133     Address Base = CGF.EmitPointerWithAlignment(E);
2134     Address Derived =
2135       CGF.GetAddressOfDerivedClass(Base, DerivedClassDecl,
2136                                    CE->path_begin(), CE->path_end(),
2137                                    CGF.ShouldNullCheckClassCastValue(CE));
2138 
2139     // C++11 [expr.static.cast]p11: Behavior is undefined if a downcast is
2140     // performed and the object is not of the derived type.
2141     if (CGF.sanitizePerformTypeCheck())
2142       CGF.EmitTypeCheck(CodeGenFunction::TCK_DowncastPointer, CE->getExprLoc(),
2143                         Derived.getPointer(), DestTy->getPointeeType());
2144 
2145     if (CGF.SanOpts.has(SanitizerKind::CFIDerivedCast))
2146       CGF.EmitVTablePtrCheckForCast(
2147           DestTy->getPointeeType(), Derived.getPointer(),
2148           /*MayBeNull=*/true, CodeGenFunction::CFITCK_DerivedCast,
2149           CE->getBeginLoc());
2150 
2151     return Derived.getPointer();
2152   }
2153   case CK_UncheckedDerivedToBase:
2154   case CK_DerivedToBase: {
2155     // The EmitPointerWithAlignment path does this fine; just discard
2156     // the alignment.
2157     return CGF.EmitPointerWithAlignment(CE).getPointer();
2158   }
2159 
2160   case CK_Dynamic: {
2161     Address V = CGF.EmitPointerWithAlignment(E);
2162     const CXXDynamicCastExpr *DCE = cast<CXXDynamicCastExpr>(CE);
2163     return CGF.EmitDynamicCast(V, DCE);
2164   }
2165 
2166   case CK_ArrayToPointerDecay:
2167     return CGF.EmitArrayToPointerDecay(E).getPointer();
2168   case CK_FunctionToPointerDecay:
2169     return EmitLValue(E).getPointer(CGF);
2170 
2171   case CK_NullToPointer:
2172     if (MustVisitNullValue(E))
2173       CGF.EmitIgnoredExpr(E);
2174 
2175     return CGF.CGM.getNullPointer(cast<llvm::PointerType>(ConvertType(DestTy)),
2176                               DestTy);
2177 
2178   case CK_NullToMemberPointer: {
2179     if (MustVisitNullValue(E))
2180       CGF.EmitIgnoredExpr(E);
2181 
2182     const MemberPointerType *MPT = CE->getType()->getAs<MemberPointerType>();
2183     return CGF.CGM.getCXXABI().EmitNullMemberPointer(MPT);
2184   }
2185 
2186   case CK_ReinterpretMemberPointer:
2187   case CK_BaseToDerivedMemberPointer:
2188   case CK_DerivedToBaseMemberPointer: {
2189     Value *Src = Visit(E);
2190 
2191     // Note that the AST doesn't distinguish between checked and
2192     // unchecked member pointer conversions, so we always have to
2193     // implement checked conversions here.  This is inefficient when
2194     // actual control flow may be required in order to perform the
2195     // check, which it is for data member pointers (but not member
2196     // function pointers on Itanium and ARM).
2197     return CGF.CGM.getCXXABI().EmitMemberPointerConversion(CGF, CE, Src);
2198   }
2199 
2200   case CK_ARCProduceObject:
2201     return CGF.EmitARCRetainScalarExpr(E);
2202   case CK_ARCConsumeObject:
2203     return CGF.EmitObjCConsumeObject(E->getType(), Visit(E));
2204   case CK_ARCReclaimReturnedObject:
2205     return CGF.EmitARCReclaimReturnedObject(E, /*allowUnsafe*/ Ignored);
2206   case CK_ARCExtendBlockObject:
2207     return CGF.EmitARCExtendBlockObject(E);
2208 
2209   case CK_CopyAndAutoreleaseBlockObject:
2210     return CGF.EmitBlockCopyAndAutorelease(Visit(E), E->getType());
2211 
2212   case CK_FloatingRealToComplex:
2213   case CK_FloatingComplexCast:
2214   case CK_IntegralRealToComplex:
2215   case CK_IntegralComplexCast:
2216   case CK_IntegralComplexToFloatingComplex:
2217   case CK_FloatingComplexToIntegralComplex:
2218   case CK_ConstructorConversion:
2219   case CK_ToUnion:
2220     llvm_unreachable("scalar cast to non-scalar value");
2221 
2222   case CK_LValueToRValue:
2223     assert(CGF.getContext().hasSameUnqualifiedType(E->getType(), DestTy));
2224     assert(E->isGLValue() && "lvalue-to-rvalue applied to r-value!");
2225     return Visit(const_cast<Expr*>(E));
2226 
2227   case CK_IntegralToPointer: {
2228     Value *Src = Visit(const_cast<Expr*>(E));
2229 
2230     // First, convert to the correct width so that we control the kind of
2231     // extension.
2232     auto DestLLVMTy = ConvertType(DestTy);
2233     llvm::Type *MiddleTy = CGF.CGM.getDataLayout().getIntPtrType(DestLLVMTy);
2234     bool InputSigned = E->getType()->isSignedIntegerOrEnumerationType();
2235     llvm::Value* IntResult =
2236       Builder.CreateIntCast(Src, MiddleTy, InputSigned, "conv");
2237 
2238     auto *IntToPtr = Builder.CreateIntToPtr(IntResult, DestLLVMTy);
2239 
2240     if (CGF.CGM.getCodeGenOpts().StrictVTablePointers) {
2241       // Going from integer to pointer that could be dynamic requires reloading
2242       // dynamic information from invariant.group.
2243       if (DestTy.mayBeDynamicClass())
2244         IntToPtr = Builder.CreateLaunderInvariantGroup(IntToPtr);
2245     }
2246     return IntToPtr;
2247   }
2248   case CK_PointerToIntegral: {
2249     assert(!DestTy->isBooleanType() && "bool should use PointerToBool");
2250     auto *PtrExpr = Visit(E);
2251 
2252     if (CGF.CGM.getCodeGenOpts().StrictVTablePointers) {
2253       const QualType SrcType = E->getType();
2254 
2255       // Casting to integer requires stripping dynamic information as it does
2256       // not carries it.
2257       if (SrcType.mayBeDynamicClass())
2258         PtrExpr = Builder.CreateStripInvariantGroup(PtrExpr);
2259     }
2260 
2261     return Builder.CreatePtrToInt(PtrExpr, ConvertType(DestTy));
2262   }
2263   case CK_ToVoid: {
2264     CGF.EmitIgnoredExpr(E);
2265     return nullptr;
2266   }
2267   case CK_MatrixCast: {
2268     return EmitScalarConversion(Visit(E), E->getType(), DestTy,
2269                                 CE->getExprLoc());
2270   }
2271   case CK_VectorSplat: {
2272     llvm::Type *DstTy = ConvertType(DestTy);
2273     Value *Elt = Visit(const_cast<Expr*>(E));
2274     // Splat the element across to all elements
2275     unsigned NumElements = cast<llvm::FixedVectorType>(DstTy)->getNumElements();
2276     return Builder.CreateVectorSplat(NumElements, Elt, "splat");
2277   }
2278 
2279   case CK_FixedPointCast:
2280     return EmitScalarConversion(Visit(E), E->getType(), DestTy,
2281                                 CE->getExprLoc());
2282 
2283   case CK_FixedPointToBoolean:
2284     assert(E->getType()->isFixedPointType() &&
2285            "Expected src type to be fixed point type");
2286     assert(DestTy->isBooleanType() && "Expected dest type to be boolean type");
2287     return EmitScalarConversion(Visit(E), E->getType(), DestTy,
2288                                 CE->getExprLoc());
2289 
2290   case CK_FixedPointToIntegral:
2291     assert(E->getType()->isFixedPointType() &&
2292            "Expected src type to be fixed point type");
2293     assert(DestTy->isIntegerType() && "Expected dest type to be an integer");
2294     return EmitScalarConversion(Visit(E), E->getType(), DestTy,
2295                                 CE->getExprLoc());
2296 
2297   case CK_IntegralToFixedPoint:
2298     assert(E->getType()->isIntegerType() &&
2299            "Expected src type to be an integer");
2300     assert(DestTy->isFixedPointType() &&
2301            "Expected dest type to be fixed point type");
2302     return EmitScalarConversion(Visit(E), E->getType(), DestTy,
2303                                 CE->getExprLoc());
2304 
2305   case CK_IntegralCast: {
2306     ScalarConversionOpts Opts;
2307     if (auto *ICE = dyn_cast<ImplicitCastExpr>(CE)) {
2308       if (!ICE->isPartOfExplicitCast())
2309         Opts = ScalarConversionOpts(CGF.SanOpts);
2310     }
2311     return EmitScalarConversion(Visit(E), E->getType(), DestTy,
2312                                 CE->getExprLoc(), Opts);
2313   }
2314   case CK_IntegralToFloating:
2315   case CK_FloatingToIntegral:
2316   case CK_FloatingCast:
2317   case CK_FixedPointToFloating:
2318   case CK_FloatingToFixedPoint: {
2319     CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, CE);
2320     return EmitScalarConversion(Visit(E), E->getType(), DestTy,
2321                                 CE->getExprLoc());
2322   }
2323   case CK_BooleanToSignedIntegral: {
2324     ScalarConversionOpts Opts;
2325     Opts.TreatBooleanAsSigned = true;
2326     return EmitScalarConversion(Visit(E), E->getType(), DestTy,
2327                                 CE->getExprLoc(), Opts);
2328   }
2329   case CK_IntegralToBoolean:
2330     return EmitIntToBoolConversion(Visit(E));
2331   case CK_PointerToBoolean:
2332     return EmitPointerToBoolConversion(Visit(E), E->getType());
2333   case CK_FloatingToBoolean: {
2334     CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, CE);
2335     return EmitFloatToBoolConversion(Visit(E));
2336   }
2337   case CK_MemberPointerToBoolean: {
2338     llvm::Value *MemPtr = Visit(E);
2339     const MemberPointerType *MPT = E->getType()->getAs<MemberPointerType>();
2340     return CGF.CGM.getCXXABI().EmitMemberPointerIsNotNull(CGF, MemPtr, MPT);
2341   }
2342 
2343   case CK_FloatingComplexToReal:
2344   case CK_IntegralComplexToReal:
2345     return CGF.EmitComplexExpr(E, false, true).first;
2346 
2347   case CK_FloatingComplexToBoolean:
2348   case CK_IntegralComplexToBoolean: {
2349     CodeGenFunction::ComplexPairTy V = CGF.EmitComplexExpr(E);
2350 
2351     // TODO: kill this function off, inline appropriate case here
2352     return EmitComplexToScalarConversion(V, E->getType(), DestTy,
2353                                          CE->getExprLoc());
2354   }
2355 
2356   case CK_ZeroToOCLOpaqueType: {
2357     assert((DestTy->isEventT() || DestTy->isQueueT() ||
2358             DestTy->isOCLIntelSubgroupAVCType()) &&
2359            "CK_ZeroToOCLEvent cast on non-event type");
2360     return llvm::Constant::getNullValue(ConvertType(DestTy));
2361   }
2362 
2363   case CK_IntToOCLSampler:
2364     return CGF.CGM.createOpenCLIntToSamplerConversion(E, CGF);
2365 
2366   } // end of switch
2367 
2368   llvm_unreachable("unknown scalar cast");
2369 }
2370 
2371 Value *ScalarExprEmitter::VisitStmtExpr(const StmtExpr *E) {
2372   CodeGenFunction::StmtExprEvaluation eval(CGF);
2373   Address RetAlloca = CGF.EmitCompoundStmt(*E->getSubStmt(),
2374                                            !E->getType()->isVoidType());
2375   if (!RetAlloca.isValid())
2376     return nullptr;
2377   return CGF.EmitLoadOfScalar(CGF.MakeAddrLValue(RetAlloca, E->getType()),
2378                               E->getExprLoc());
2379 }
2380 
2381 Value *ScalarExprEmitter::VisitExprWithCleanups(ExprWithCleanups *E) {
2382   CodeGenFunction::RunCleanupsScope Scope(CGF);
2383   Value *V = Visit(E->getSubExpr());
2384   // Defend against dominance problems caused by jumps out of expression
2385   // evaluation through the shared cleanup block.
2386   Scope.ForceCleanup({&V});
2387   return V;
2388 }
2389 
2390 //===----------------------------------------------------------------------===//
2391 //                             Unary Operators
2392 //===----------------------------------------------------------------------===//
2393 
2394 static BinOpInfo createBinOpInfoFromIncDec(const UnaryOperator *E,
2395                                            llvm::Value *InVal, bool IsInc,
2396                                            FPOptions FPFeatures) {
2397   BinOpInfo BinOp;
2398   BinOp.LHS = InVal;
2399   BinOp.RHS = llvm::ConstantInt::get(InVal->getType(), 1, false);
2400   BinOp.Ty = E->getType();
2401   BinOp.Opcode = IsInc ? BO_Add : BO_Sub;
2402   BinOp.FPFeatures = FPFeatures;
2403   BinOp.E = E;
2404   return BinOp;
2405 }
2406 
2407 llvm::Value *ScalarExprEmitter::EmitIncDecConsiderOverflowBehavior(
2408     const UnaryOperator *E, llvm::Value *InVal, bool IsInc) {
2409   llvm::Value *Amount =
2410       llvm::ConstantInt::get(InVal->getType(), IsInc ? 1 : -1, true);
2411   StringRef Name = IsInc ? "inc" : "dec";
2412   switch (CGF.getLangOpts().getSignedOverflowBehavior()) {
2413   case LangOptions::SOB_Defined:
2414     return Builder.CreateAdd(InVal, Amount, Name);
2415   case LangOptions::SOB_Undefined:
2416     if (!CGF.SanOpts.has(SanitizerKind::SignedIntegerOverflow))
2417       return Builder.CreateNSWAdd(InVal, Amount, Name);
2418     LLVM_FALLTHROUGH;
2419   case LangOptions::SOB_Trapping:
2420     if (!E->canOverflow())
2421       return Builder.CreateNSWAdd(InVal, Amount, Name);
2422     return EmitOverflowCheckedBinOp(createBinOpInfoFromIncDec(
2423         E, InVal, IsInc, E->getFPFeaturesInEffect(CGF.getLangOpts())));
2424   }
2425   llvm_unreachable("Unknown SignedOverflowBehaviorTy");
2426 }
2427 
2428 namespace {
2429 /// Handles check and update for lastprivate conditional variables.
2430 class OMPLastprivateConditionalUpdateRAII {
2431 private:
2432   CodeGenFunction &CGF;
2433   const UnaryOperator *E;
2434 
2435 public:
2436   OMPLastprivateConditionalUpdateRAII(CodeGenFunction &CGF,
2437                                       const UnaryOperator *E)
2438       : CGF(CGF), E(E) {}
2439   ~OMPLastprivateConditionalUpdateRAII() {
2440     if (CGF.getLangOpts().OpenMP)
2441       CGF.CGM.getOpenMPRuntime().checkAndEmitLastprivateConditional(
2442           CGF, E->getSubExpr());
2443   }
2444 };
2445 } // namespace
2446 
2447 llvm::Value *
2448 ScalarExprEmitter::EmitScalarPrePostIncDec(const UnaryOperator *E, LValue LV,
2449                                            bool isInc, bool isPre) {
2450   OMPLastprivateConditionalUpdateRAII OMPRegion(CGF, E);
2451   QualType type = E->getSubExpr()->getType();
2452   llvm::PHINode *atomicPHI = nullptr;
2453   llvm::Value *value;
2454   llvm::Value *input;
2455 
2456   int amount = (isInc ? 1 : -1);
2457   bool isSubtraction = !isInc;
2458 
2459   if (const AtomicType *atomicTy = type->getAs<AtomicType>()) {
2460     type = atomicTy->getValueType();
2461     if (isInc && type->isBooleanType()) {
2462       llvm::Value *True = CGF.EmitToMemory(Builder.getTrue(), type);
2463       if (isPre) {
2464         Builder.CreateStore(True, LV.getAddress(CGF), LV.isVolatileQualified())
2465             ->setAtomic(llvm::AtomicOrdering::SequentiallyConsistent);
2466         return Builder.getTrue();
2467       }
2468       // For atomic bool increment, we just store true and return it for
2469       // preincrement, do an atomic swap with true for postincrement
2470       return Builder.CreateAtomicRMW(
2471           llvm::AtomicRMWInst::Xchg, LV.getPointer(CGF), True,
2472           llvm::AtomicOrdering::SequentiallyConsistent);
2473     }
2474     // Special case for atomic increment / decrement on integers, emit
2475     // atomicrmw instructions.  We skip this if we want to be doing overflow
2476     // checking, and fall into the slow path with the atomic cmpxchg loop.
2477     if (!type->isBooleanType() && type->isIntegerType() &&
2478         !(type->isUnsignedIntegerType() &&
2479           CGF.SanOpts.has(SanitizerKind::UnsignedIntegerOverflow)) &&
2480         CGF.getLangOpts().getSignedOverflowBehavior() !=
2481             LangOptions::SOB_Trapping) {
2482       llvm::AtomicRMWInst::BinOp aop = isInc ? llvm::AtomicRMWInst::Add :
2483         llvm::AtomicRMWInst::Sub;
2484       llvm::Instruction::BinaryOps op = isInc ? llvm::Instruction::Add :
2485         llvm::Instruction::Sub;
2486       llvm::Value *amt = CGF.EmitToMemory(
2487           llvm::ConstantInt::get(ConvertType(type), 1, true), type);
2488       llvm::Value *old =
2489           Builder.CreateAtomicRMW(aop, LV.getPointer(CGF), amt,
2490                                   llvm::AtomicOrdering::SequentiallyConsistent);
2491       return isPre ? Builder.CreateBinOp(op, old, amt) : old;
2492     }
2493     value = EmitLoadOfLValue(LV, E->getExprLoc());
2494     input = value;
2495     // For every other atomic operation, we need to emit a load-op-cmpxchg loop
2496     llvm::BasicBlock *startBB = Builder.GetInsertBlock();
2497     llvm::BasicBlock *opBB = CGF.createBasicBlock("atomic_op", CGF.CurFn);
2498     value = CGF.EmitToMemory(value, type);
2499     Builder.CreateBr(opBB);
2500     Builder.SetInsertPoint(opBB);
2501     atomicPHI = Builder.CreatePHI(value->getType(), 2);
2502     atomicPHI->addIncoming(value, startBB);
2503     value = atomicPHI;
2504   } else {
2505     value = EmitLoadOfLValue(LV, E->getExprLoc());
2506     input = value;
2507   }
2508 
2509   // Special case of integer increment that we have to check first: bool++.
2510   // Due to promotion rules, we get:
2511   //   bool++ -> bool = bool + 1
2512   //          -> bool = (int)bool + 1
2513   //          -> bool = ((int)bool + 1 != 0)
2514   // An interesting aspect of this is that increment is always true.
2515   // Decrement does not have this property.
2516   if (isInc && type->isBooleanType()) {
2517     value = Builder.getTrue();
2518 
2519   // Most common case by far: integer increment.
2520   } else if (type->isIntegerType()) {
2521     QualType promotedType;
2522     bool canPerformLossyDemotionCheck = false;
2523     if (type->isPromotableIntegerType()) {
2524       promotedType = CGF.getContext().getPromotedIntegerType(type);
2525       assert(promotedType != type && "Shouldn't promote to the same type.");
2526       canPerformLossyDemotionCheck = true;
2527       canPerformLossyDemotionCheck &=
2528           CGF.getContext().getCanonicalType(type) !=
2529           CGF.getContext().getCanonicalType(promotedType);
2530       canPerformLossyDemotionCheck &=
2531           PromotionIsPotentiallyEligibleForImplicitIntegerConversionCheck(
2532               type, promotedType);
2533       assert((!canPerformLossyDemotionCheck ||
2534               type->isSignedIntegerOrEnumerationType() ||
2535               promotedType->isSignedIntegerOrEnumerationType() ||
2536               ConvertType(type)->getScalarSizeInBits() ==
2537                   ConvertType(promotedType)->getScalarSizeInBits()) &&
2538              "The following check expects that if we do promotion to different "
2539              "underlying canonical type, at least one of the types (either "
2540              "base or promoted) will be signed, or the bitwidths will match.");
2541     }
2542     if (CGF.SanOpts.hasOneOf(
2543             SanitizerKind::ImplicitIntegerArithmeticValueChange) &&
2544         canPerformLossyDemotionCheck) {
2545       // While `x += 1` (for `x` with width less than int) is modeled as
2546       // promotion+arithmetics+demotion, and we can catch lossy demotion with
2547       // ease; inc/dec with width less than int can't overflow because of
2548       // promotion rules, so we omit promotion+demotion, which means that we can
2549       // not catch lossy "demotion". Because we still want to catch these cases
2550       // when the sanitizer is enabled, we perform the promotion, then perform
2551       // the increment/decrement in the wider type, and finally
2552       // perform the demotion. This will catch lossy demotions.
2553 
2554       value = EmitScalarConversion(value, type, promotedType, E->getExprLoc());
2555       Value *amt = llvm::ConstantInt::get(value->getType(), amount, true);
2556       value = Builder.CreateAdd(value, amt, isInc ? "inc" : "dec");
2557       // Do pass non-default ScalarConversionOpts so that sanitizer check is
2558       // emitted.
2559       value = EmitScalarConversion(value, promotedType, type, E->getExprLoc(),
2560                                    ScalarConversionOpts(CGF.SanOpts));
2561 
2562       // Note that signed integer inc/dec with width less than int can't
2563       // overflow because of promotion rules; we're just eliding a few steps
2564       // here.
2565     } else if (E->canOverflow() && type->isSignedIntegerOrEnumerationType()) {
2566       value = EmitIncDecConsiderOverflowBehavior(E, value, isInc);
2567     } else if (E->canOverflow() && type->isUnsignedIntegerType() &&
2568                CGF.SanOpts.has(SanitizerKind::UnsignedIntegerOverflow)) {
2569       value = EmitOverflowCheckedBinOp(createBinOpInfoFromIncDec(
2570           E, value, isInc, E->getFPFeaturesInEffect(CGF.getLangOpts())));
2571     } else {
2572       llvm::Value *amt = llvm::ConstantInt::get(value->getType(), amount, true);
2573       value = Builder.CreateAdd(value, amt, isInc ? "inc" : "dec");
2574     }
2575 
2576   // Next most common: pointer increment.
2577   } else if (const PointerType *ptr = type->getAs<PointerType>()) {
2578     QualType type = ptr->getPointeeType();
2579 
2580     // VLA types don't have constant size.
2581     if (const VariableArrayType *vla
2582           = CGF.getContext().getAsVariableArrayType(type)) {
2583       llvm::Value *numElts = CGF.getVLASize(vla).NumElts;
2584       if (!isInc) numElts = Builder.CreateNSWNeg(numElts, "vla.negsize");
2585       if (CGF.getLangOpts().isSignedOverflowDefined())
2586         value = Builder.CreateGEP(value, numElts, "vla.inc");
2587       else
2588         value = CGF.EmitCheckedInBoundsGEP(
2589             value, numElts, /*SignedIndices=*/false, isSubtraction,
2590             E->getExprLoc(), "vla.inc");
2591 
2592     // Arithmetic on function pointers (!) is just +-1.
2593     } else if (type->isFunctionType()) {
2594       llvm::Value *amt = Builder.getInt32(amount);
2595 
2596       value = CGF.EmitCastToVoidPtr(value);
2597       if (CGF.getLangOpts().isSignedOverflowDefined())
2598         value = Builder.CreateGEP(value, amt, "incdec.funcptr");
2599       else
2600         value = CGF.EmitCheckedInBoundsGEP(value, amt, /*SignedIndices=*/false,
2601                                            isSubtraction, E->getExprLoc(),
2602                                            "incdec.funcptr");
2603       value = Builder.CreateBitCast(value, input->getType());
2604 
2605     // For everything else, we can just do a simple increment.
2606     } else {
2607       llvm::Value *amt = Builder.getInt32(amount);
2608       if (CGF.getLangOpts().isSignedOverflowDefined())
2609         value = Builder.CreateGEP(value, amt, "incdec.ptr");
2610       else
2611         value = CGF.EmitCheckedInBoundsGEP(value, amt, /*SignedIndices=*/false,
2612                                            isSubtraction, E->getExprLoc(),
2613                                            "incdec.ptr");
2614     }
2615 
2616   // Vector increment/decrement.
2617   } else if (type->isVectorType()) {
2618     if (type->hasIntegerRepresentation()) {
2619       llvm::Value *amt = llvm::ConstantInt::get(value->getType(), amount);
2620 
2621       value = Builder.CreateAdd(value, amt, isInc ? "inc" : "dec");
2622     } else {
2623       value = Builder.CreateFAdd(
2624                   value,
2625                   llvm::ConstantFP::get(value->getType(), amount),
2626                   isInc ? "inc" : "dec");
2627     }
2628 
2629   // Floating point.
2630   } else if (type->isRealFloatingType()) {
2631     // Add the inc/dec to the real part.
2632     llvm::Value *amt;
2633     CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, E);
2634 
2635     if (type->isHalfType() && !CGF.getContext().getLangOpts().NativeHalfType) {
2636       // Another special case: half FP increment should be done via float
2637       if (CGF.getContext().getTargetInfo().useFP16ConversionIntrinsics()) {
2638         value = Builder.CreateCall(
2639             CGF.CGM.getIntrinsic(llvm::Intrinsic::convert_from_fp16,
2640                                  CGF.CGM.FloatTy),
2641             input, "incdec.conv");
2642       } else {
2643         value = Builder.CreateFPExt(input, CGF.CGM.FloatTy, "incdec.conv");
2644       }
2645     }
2646 
2647     if (value->getType()->isFloatTy())
2648       amt = llvm::ConstantFP::get(VMContext,
2649                                   llvm::APFloat(static_cast<float>(amount)));
2650     else if (value->getType()->isDoubleTy())
2651       amt = llvm::ConstantFP::get(VMContext,
2652                                   llvm::APFloat(static_cast<double>(amount)));
2653     else {
2654       // Remaining types are Half, LongDouble or __float128. Convert from float.
2655       llvm::APFloat F(static_cast<float>(amount));
2656       bool ignored;
2657       const llvm::fltSemantics *FS;
2658       // Don't use getFloatTypeSemantics because Half isn't
2659       // necessarily represented using the "half" LLVM type.
2660       if (value->getType()->isFP128Ty())
2661         FS = &CGF.getTarget().getFloat128Format();
2662       else if (value->getType()->isHalfTy())
2663         FS = &CGF.getTarget().getHalfFormat();
2664       else
2665         FS = &CGF.getTarget().getLongDoubleFormat();
2666       F.convert(*FS, llvm::APFloat::rmTowardZero, &ignored);
2667       amt = llvm::ConstantFP::get(VMContext, F);
2668     }
2669     value = Builder.CreateFAdd(value, amt, isInc ? "inc" : "dec");
2670 
2671     if (type->isHalfType() && !CGF.getContext().getLangOpts().NativeHalfType) {
2672       if (CGF.getContext().getTargetInfo().useFP16ConversionIntrinsics()) {
2673         value = Builder.CreateCall(
2674             CGF.CGM.getIntrinsic(llvm::Intrinsic::convert_to_fp16,
2675                                  CGF.CGM.FloatTy),
2676             value, "incdec.conv");
2677       } else {
2678         value = Builder.CreateFPTrunc(value, input->getType(), "incdec.conv");
2679       }
2680     }
2681 
2682   // Fixed-point types.
2683   } else if (type->isFixedPointType()) {
2684     // Fixed-point types are tricky. In some cases, it isn't possible to
2685     // represent a 1 or a -1 in the type at all. Piggyback off of
2686     // EmitFixedPointBinOp to avoid having to reimplement saturation.
2687     BinOpInfo Info;
2688     Info.E = E;
2689     Info.Ty = E->getType();
2690     Info.Opcode = isInc ? BO_Add : BO_Sub;
2691     Info.LHS = value;
2692     Info.RHS = llvm::ConstantInt::get(value->getType(), 1, false);
2693     // If the type is signed, it's better to represent this as +(-1) or -(-1),
2694     // since -1 is guaranteed to be representable.
2695     if (type->isSignedFixedPointType()) {
2696       Info.Opcode = isInc ? BO_Sub : BO_Add;
2697       Info.RHS = Builder.CreateNeg(Info.RHS);
2698     }
2699     // Now, convert from our invented integer literal to the type of the unary
2700     // op. This will upscale and saturate if necessary. This value can become
2701     // undef in some cases.
2702     llvm::FixedPointBuilder<CGBuilderTy> FPBuilder(Builder);
2703     auto DstSema = CGF.getContext().getFixedPointSemantics(Info.Ty);
2704     Info.RHS = FPBuilder.CreateIntegerToFixed(Info.RHS, true, DstSema);
2705     value = EmitFixedPointBinOp(Info);
2706 
2707   // Objective-C pointer types.
2708   } else {
2709     const ObjCObjectPointerType *OPT = type->castAs<ObjCObjectPointerType>();
2710     value = CGF.EmitCastToVoidPtr(value);
2711 
2712     CharUnits size = CGF.getContext().getTypeSizeInChars(OPT->getObjectType());
2713     if (!isInc) size = -size;
2714     llvm::Value *sizeValue =
2715       llvm::ConstantInt::get(CGF.SizeTy, size.getQuantity());
2716 
2717     if (CGF.getLangOpts().isSignedOverflowDefined())
2718       value = Builder.CreateGEP(value, sizeValue, "incdec.objptr");
2719     else
2720       value = CGF.EmitCheckedInBoundsGEP(value, sizeValue,
2721                                          /*SignedIndices=*/false, isSubtraction,
2722                                          E->getExprLoc(), "incdec.objptr");
2723     value = Builder.CreateBitCast(value, input->getType());
2724   }
2725 
2726   if (atomicPHI) {
2727     llvm::BasicBlock *curBlock = Builder.GetInsertBlock();
2728     llvm::BasicBlock *contBB = CGF.createBasicBlock("atomic_cont", CGF.CurFn);
2729     auto Pair = CGF.EmitAtomicCompareExchange(
2730         LV, RValue::get(atomicPHI), RValue::get(value), E->getExprLoc());
2731     llvm::Value *old = CGF.EmitToMemory(Pair.first.getScalarVal(), type);
2732     llvm::Value *success = Pair.second;
2733     atomicPHI->addIncoming(old, curBlock);
2734     Builder.CreateCondBr(success, contBB, atomicPHI->getParent());
2735     Builder.SetInsertPoint(contBB);
2736     return isPre ? value : input;
2737   }
2738 
2739   // Store the updated result through the lvalue.
2740   if (LV.isBitField())
2741     CGF.EmitStoreThroughBitfieldLValue(RValue::get(value), LV, &value);
2742   else
2743     CGF.EmitStoreThroughLValue(RValue::get(value), LV);
2744 
2745   // If this is a postinc, return the value read from memory, otherwise use the
2746   // updated value.
2747   return isPre ? value : input;
2748 }
2749 
2750 
2751 
2752 Value *ScalarExprEmitter::VisitUnaryMinus(const UnaryOperator *E) {
2753   TestAndClearIgnoreResultAssign();
2754   Value *Op = Visit(E->getSubExpr());
2755 
2756   // Generate a unary FNeg for FP ops.
2757   if (Op->getType()->isFPOrFPVectorTy())
2758     return Builder.CreateFNeg(Op, "fneg");
2759 
2760   // Emit unary minus with EmitSub so we handle overflow cases etc.
2761   BinOpInfo BinOp;
2762   BinOp.RHS = Op;
2763   BinOp.LHS = llvm::Constant::getNullValue(BinOp.RHS->getType());
2764   BinOp.Ty = E->getType();
2765   BinOp.Opcode = BO_Sub;
2766   BinOp.FPFeatures = E->getFPFeaturesInEffect(CGF.getLangOpts());
2767   BinOp.E = E;
2768   return EmitSub(BinOp);
2769 }
2770 
2771 Value *ScalarExprEmitter::VisitUnaryNot(const UnaryOperator *E) {
2772   TestAndClearIgnoreResultAssign();
2773   Value *Op = Visit(E->getSubExpr());
2774   return Builder.CreateNot(Op, "neg");
2775 }
2776 
2777 Value *ScalarExprEmitter::VisitUnaryLNot(const UnaryOperator *E) {
2778   // Perform vector logical not on comparison with zero vector.
2779   if (E->getType()->isVectorType() &&
2780       E->getType()->castAs<VectorType>()->getVectorKind() ==
2781           VectorType::GenericVector) {
2782     Value *Oper = Visit(E->getSubExpr());
2783     Value *Zero = llvm::Constant::getNullValue(Oper->getType());
2784     Value *Result;
2785     if (Oper->getType()->isFPOrFPVectorTy()) {
2786       CodeGenFunction::CGFPOptionsRAII FPOptsRAII(
2787           CGF, E->getFPFeaturesInEffect(CGF.getLangOpts()));
2788       Result = Builder.CreateFCmp(llvm::CmpInst::FCMP_OEQ, Oper, Zero, "cmp");
2789     } else
2790       Result = Builder.CreateICmp(llvm::CmpInst::ICMP_EQ, Oper, Zero, "cmp");
2791     return Builder.CreateSExt(Result, ConvertType(E->getType()), "sext");
2792   }
2793 
2794   // Compare operand to zero.
2795   Value *BoolVal = CGF.EvaluateExprAsBool(E->getSubExpr());
2796 
2797   // Invert value.
2798   // TODO: Could dynamically modify easy computations here.  For example, if
2799   // the operand is an icmp ne, turn into icmp eq.
2800   BoolVal = Builder.CreateNot(BoolVal, "lnot");
2801 
2802   // ZExt result to the expr type.
2803   return Builder.CreateZExt(BoolVal, ConvertType(E->getType()), "lnot.ext");
2804 }
2805 
2806 Value *ScalarExprEmitter::VisitOffsetOfExpr(OffsetOfExpr *E) {
2807   // Try folding the offsetof to a constant.
2808   Expr::EvalResult EVResult;
2809   if (E->EvaluateAsInt(EVResult, CGF.getContext())) {
2810     llvm::APSInt Value = EVResult.Val.getInt();
2811     return Builder.getInt(Value);
2812   }
2813 
2814   // Loop over the components of the offsetof to compute the value.
2815   unsigned n = E->getNumComponents();
2816   llvm::Type* ResultType = ConvertType(E->getType());
2817   llvm::Value* Result = llvm::Constant::getNullValue(ResultType);
2818   QualType CurrentType = E->getTypeSourceInfo()->getType();
2819   for (unsigned i = 0; i != n; ++i) {
2820     OffsetOfNode ON = E->getComponent(i);
2821     llvm::Value *Offset = nullptr;
2822     switch (ON.getKind()) {
2823     case OffsetOfNode::Array: {
2824       // Compute the index
2825       Expr *IdxExpr = E->getIndexExpr(ON.getArrayExprIndex());
2826       llvm::Value* Idx = CGF.EmitScalarExpr(IdxExpr);
2827       bool IdxSigned = IdxExpr->getType()->isSignedIntegerOrEnumerationType();
2828       Idx = Builder.CreateIntCast(Idx, ResultType, IdxSigned, "conv");
2829 
2830       // Save the element type
2831       CurrentType =
2832           CGF.getContext().getAsArrayType(CurrentType)->getElementType();
2833 
2834       // Compute the element size
2835       llvm::Value* ElemSize = llvm::ConstantInt::get(ResultType,
2836           CGF.getContext().getTypeSizeInChars(CurrentType).getQuantity());
2837 
2838       // Multiply out to compute the result
2839       Offset = Builder.CreateMul(Idx, ElemSize);
2840       break;
2841     }
2842 
2843     case OffsetOfNode::Field: {
2844       FieldDecl *MemberDecl = ON.getField();
2845       RecordDecl *RD = CurrentType->castAs<RecordType>()->getDecl();
2846       const ASTRecordLayout &RL = CGF.getContext().getASTRecordLayout(RD);
2847 
2848       // Compute the index of the field in its parent.
2849       unsigned i = 0;
2850       // FIXME: It would be nice if we didn't have to loop here!
2851       for (RecordDecl::field_iterator Field = RD->field_begin(),
2852                                       FieldEnd = RD->field_end();
2853            Field != FieldEnd; ++Field, ++i) {
2854         if (*Field == MemberDecl)
2855           break;
2856       }
2857       assert(i < RL.getFieldCount() && "offsetof field in wrong type");
2858 
2859       // Compute the offset to the field
2860       int64_t OffsetInt = RL.getFieldOffset(i) /
2861                           CGF.getContext().getCharWidth();
2862       Offset = llvm::ConstantInt::get(ResultType, OffsetInt);
2863 
2864       // Save the element type.
2865       CurrentType = MemberDecl->getType();
2866       break;
2867     }
2868 
2869     case OffsetOfNode::Identifier:
2870       llvm_unreachable("dependent __builtin_offsetof");
2871 
2872     case OffsetOfNode::Base: {
2873       if (ON.getBase()->isVirtual()) {
2874         CGF.ErrorUnsupported(E, "virtual base in offsetof");
2875         continue;
2876       }
2877 
2878       RecordDecl *RD = CurrentType->castAs<RecordType>()->getDecl();
2879       const ASTRecordLayout &RL = CGF.getContext().getASTRecordLayout(RD);
2880 
2881       // Save the element type.
2882       CurrentType = ON.getBase()->getType();
2883 
2884       // Compute the offset to the base.
2885       const RecordType *BaseRT = CurrentType->getAs<RecordType>();
2886       CXXRecordDecl *BaseRD = cast<CXXRecordDecl>(BaseRT->getDecl());
2887       CharUnits OffsetInt = RL.getBaseClassOffset(BaseRD);
2888       Offset = llvm::ConstantInt::get(ResultType, OffsetInt.getQuantity());
2889       break;
2890     }
2891     }
2892     Result = Builder.CreateAdd(Result, Offset);
2893   }
2894   return Result;
2895 }
2896 
2897 /// VisitUnaryExprOrTypeTraitExpr - Return the size or alignment of the type of
2898 /// argument of the sizeof expression as an integer.
2899 Value *
2900 ScalarExprEmitter::VisitUnaryExprOrTypeTraitExpr(
2901                               const UnaryExprOrTypeTraitExpr *E) {
2902   QualType TypeToSize = E->getTypeOfArgument();
2903   if (E->getKind() == UETT_SizeOf) {
2904     if (const VariableArrayType *VAT =
2905           CGF.getContext().getAsVariableArrayType(TypeToSize)) {
2906       if (E->isArgumentType()) {
2907         // sizeof(type) - make sure to emit the VLA size.
2908         CGF.EmitVariablyModifiedType(TypeToSize);
2909       } else {
2910         // C99 6.5.3.4p2: If the argument is an expression of type
2911         // VLA, it is evaluated.
2912         CGF.EmitIgnoredExpr(E->getArgumentExpr());
2913       }
2914 
2915       auto VlaSize = CGF.getVLASize(VAT);
2916       llvm::Value *size = VlaSize.NumElts;
2917 
2918       // Scale the number of non-VLA elements by the non-VLA element size.
2919       CharUnits eltSize = CGF.getContext().getTypeSizeInChars(VlaSize.Type);
2920       if (!eltSize.isOne())
2921         size = CGF.Builder.CreateNUWMul(CGF.CGM.getSize(eltSize), size);
2922 
2923       return size;
2924     }
2925   } else if (E->getKind() == UETT_OpenMPRequiredSimdAlign) {
2926     auto Alignment =
2927         CGF.getContext()
2928             .toCharUnitsFromBits(CGF.getContext().getOpenMPDefaultSimdAlign(
2929                 E->getTypeOfArgument()->getPointeeType()))
2930             .getQuantity();
2931     return llvm::ConstantInt::get(CGF.SizeTy, Alignment);
2932   }
2933 
2934   // If this isn't sizeof(vla), the result must be constant; use the constant
2935   // folding logic so we don't have to duplicate it here.
2936   return Builder.getInt(E->EvaluateKnownConstInt(CGF.getContext()));
2937 }
2938 
2939 Value *ScalarExprEmitter::VisitUnaryReal(const UnaryOperator *E) {
2940   Expr *Op = E->getSubExpr();
2941   if (Op->getType()->isAnyComplexType()) {
2942     // If it's an l-value, load through the appropriate subobject l-value.
2943     // Note that we have to ask E because Op might be an l-value that
2944     // this won't work for, e.g. an Obj-C property.
2945     if (E->isGLValue())
2946       return CGF.EmitLoadOfLValue(CGF.EmitLValue(E),
2947                                   E->getExprLoc()).getScalarVal();
2948 
2949     // Otherwise, calculate and project.
2950     return CGF.EmitComplexExpr(Op, false, true).first;
2951   }
2952 
2953   return Visit(Op);
2954 }
2955 
2956 Value *ScalarExprEmitter::VisitUnaryImag(const UnaryOperator *E) {
2957   Expr *Op = E->getSubExpr();
2958   if (Op->getType()->isAnyComplexType()) {
2959     // If it's an l-value, load through the appropriate subobject l-value.
2960     // Note that we have to ask E because Op might be an l-value that
2961     // this won't work for, e.g. an Obj-C property.
2962     if (Op->isGLValue())
2963       return CGF.EmitLoadOfLValue(CGF.EmitLValue(E),
2964                                   E->getExprLoc()).getScalarVal();
2965 
2966     // Otherwise, calculate and project.
2967     return CGF.EmitComplexExpr(Op, true, false).second;
2968   }
2969 
2970   // __imag on a scalar returns zero.  Emit the subexpr to ensure side
2971   // effects are evaluated, but not the actual value.
2972   if (Op->isGLValue())
2973     CGF.EmitLValue(Op);
2974   else
2975     CGF.EmitScalarExpr(Op, true);
2976   return llvm::Constant::getNullValue(ConvertType(E->getType()));
2977 }
2978 
2979 //===----------------------------------------------------------------------===//
2980 //                           Binary Operators
2981 //===----------------------------------------------------------------------===//
2982 
2983 BinOpInfo ScalarExprEmitter::EmitBinOps(const BinaryOperator *E) {
2984   TestAndClearIgnoreResultAssign();
2985   BinOpInfo Result;
2986   Result.LHS = Visit(E->getLHS());
2987   Result.RHS = Visit(E->getRHS());
2988   Result.Ty  = E->getType();
2989   Result.Opcode = E->getOpcode();
2990   Result.FPFeatures = E->getFPFeaturesInEffect(CGF.getLangOpts());
2991   Result.E = E;
2992   return Result;
2993 }
2994 
2995 LValue ScalarExprEmitter::EmitCompoundAssignLValue(
2996                                               const CompoundAssignOperator *E,
2997                         Value *(ScalarExprEmitter::*Func)(const BinOpInfo &),
2998                                                    Value *&Result) {
2999   QualType LHSTy = E->getLHS()->getType();
3000   BinOpInfo OpInfo;
3001 
3002   if (E->getComputationResultType()->isAnyComplexType())
3003     return CGF.EmitScalarCompoundAssignWithComplex(E, Result);
3004 
3005   // Emit the RHS first.  __block variables need to have the rhs evaluated
3006   // first, plus this should improve codegen a little.
3007   OpInfo.RHS = Visit(E->getRHS());
3008   OpInfo.Ty = E->getComputationResultType();
3009   OpInfo.Opcode = E->getOpcode();
3010   OpInfo.FPFeatures = E->getFPFeaturesInEffect(CGF.getLangOpts());
3011   OpInfo.E = E;
3012   // Load/convert the LHS.
3013   LValue LHSLV = EmitCheckedLValue(E->getLHS(), CodeGenFunction::TCK_Store);
3014 
3015   llvm::PHINode *atomicPHI = nullptr;
3016   if (const AtomicType *atomicTy = LHSTy->getAs<AtomicType>()) {
3017     QualType type = atomicTy->getValueType();
3018     if (!type->isBooleanType() && type->isIntegerType() &&
3019         !(type->isUnsignedIntegerType() &&
3020           CGF.SanOpts.has(SanitizerKind::UnsignedIntegerOverflow)) &&
3021         CGF.getLangOpts().getSignedOverflowBehavior() !=
3022             LangOptions::SOB_Trapping) {
3023       llvm::AtomicRMWInst::BinOp AtomicOp = llvm::AtomicRMWInst::BAD_BINOP;
3024       llvm::Instruction::BinaryOps Op;
3025       switch (OpInfo.Opcode) {
3026         // We don't have atomicrmw operands for *, %, /, <<, >>
3027         case BO_MulAssign: case BO_DivAssign:
3028         case BO_RemAssign:
3029         case BO_ShlAssign:
3030         case BO_ShrAssign:
3031           break;
3032         case BO_AddAssign:
3033           AtomicOp = llvm::AtomicRMWInst::Add;
3034           Op = llvm::Instruction::Add;
3035           break;
3036         case BO_SubAssign:
3037           AtomicOp = llvm::AtomicRMWInst::Sub;
3038           Op = llvm::Instruction::Sub;
3039           break;
3040         case BO_AndAssign:
3041           AtomicOp = llvm::AtomicRMWInst::And;
3042           Op = llvm::Instruction::And;
3043           break;
3044         case BO_XorAssign:
3045           AtomicOp = llvm::AtomicRMWInst::Xor;
3046           Op = llvm::Instruction::Xor;
3047           break;
3048         case BO_OrAssign:
3049           AtomicOp = llvm::AtomicRMWInst::Or;
3050           Op = llvm::Instruction::Or;
3051           break;
3052         default:
3053           llvm_unreachable("Invalid compound assignment type");
3054       }
3055       if (AtomicOp != llvm::AtomicRMWInst::BAD_BINOP) {
3056         llvm::Value *Amt = CGF.EmitToMemory(
3057             EmitScalarConversion(OpInfo.RHS, E->getRHS()->getType(), LHSTy,
3058                                  E->getExprLoc()),
3059             LHSTy);
3060         Value *OldVal = Builder.CreateAtomicRMW(
3061             AtomicOp, LHSLV.getPointer(CGF), Amt,
3062             llvm::AtomicOrdering::SequentiallyConsistent);
3063 
3064         // Since operation is atomic, the result type is guaranteed to be the
3065         // same as the input in LLVM terms.
3066         Result = Builder.CreateBinOp(Op, OldVal, Amt);
3067         return LHSLV;
3068       }
3069     }
3070     // FIXME: For floating point types, we should be saving and restoring the
3071     // floating point environment in the loop.
3072     llvm::BasicBlock *startBB = Builder.GetInsertBlock();
3073     llvm::BasicBlock *opBB = CGF.createBasicBlock("atomic_op", CGF.CurFn);
3074     OpInfo.LHS = EmitLoadOfLValue(LHSLV, E->getExprLoc());
3075     OpInfo.LHS = CGF.EmitToMemory(OpInfo.LHS, type);
3076     Builder.CreateBr(opBB);
3077     Builder.SetInsertPoint(opBB);
3078     atomicPHI = Builder.CreatePHI(OpInfo.LHS->getType(), 2);
3079     atomicPHI->addIncoming(OpInfo.LHS, startBB);
3080     OpInfo.LHS = atomicPHI;
3081   }
3082   else
3083     OpInfo.LHS = EmitLoadOfLValue(LHSLV, E->getExprLoc());
3084 
3085   CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, OpInfo.FPFeatures);
3086   SourceLocation Loc = E->getExprLoc();
3087   OpInfo.LHS =
3088       EmitScalarConversion(OpInfo.LHS, LHSTy, E->getComputationLHSType(), Loc);
3089 
3090   // Expand the binary operator.
3091   Result = (this->*Func)(OpInfo);
3092 
3093   // Convert the result back to the LHS type,
3094   // potentially with Implicit Conversion sanitizer check.
3095   Result = EmitScalarConversion(Result, E->getComputationResultType(), LHSTy,
3096                                 Loc, ScalarConversionOpts(CGF.SanOpts));
3097 
3098   if (atomicPHI) {
3099     llvm::BasicBlock *curBlock = Builder.GetInsertBlock();
3100     llvm::BasicBlock *contBB = CGF.createBasicBlock("atomic_cont", CGF.CurFn);
3101     auto Pair = CGF.EmitAtomicCompareExchange(
3102         LHSLV, RValue::get(atomicPHI), RValue::get(Result), E->getExprLoc());
3103     llvm::Value *old = CGF.EmitToMemory(Pair.first.getScalarVal(), LHSTy);
3104     llvm::Value *success = Pair.second;
3105     atomicPHI->addIncoming(old, curBlock);
3106     Builder.CreateCondBr(success, contBB, atomicPHI->getParent());
3107     Builder.SetInsertPoint(contBB);
3108     return LHSLV;
3109   }
3110 
3111   // Store the result value into the LHS lvalue. Bit-fields are handled
3112   // specially because the result is altered by the store, i.e., [C99 6.5.16p1]
3113   // 'An assignment expression has the value of the left operand after the
3114   // assignment...'.
3115   if (LHSLV.isBitField())
3116     CGF.EmitStoreThroughBitfieldLValue(RValue::get(Result), LHSLV, &Result);
3117   else
3118     CGF.EmitStoreThroughLValue(RValue::get(Result), LHSLV);
3119 
3120   if (CGF.getLangOpts().OpenMP)
3121     CGF.CGM.getOpenMPRuntime().checkAndEmitLastprivateConditional(CGF,
3122                                                                   E->getLHS());
3123   return LHSLV;
3124 }
3125 
3126 Value *ScalarExprEmitter::EmitCompoundAssign(const CompoundAssignOperator *E,
3127                       Value *(ScalarExprEmitter::*Func)(const BinOpInfo &)) {
3128   bool Ignore = TestAndClearIgnoreResultAssign();
3129   Value *RHS = nullptr;
3130   LValue LHS = EmitCompoundAssignLValue(E, Func, RHS);
3131 
3132   // If the result is clearly ignored, return now.
3133   if (Ignore)
3134     return nullptr;
3135 
3136   // The result of an assignment in C is the assigned r-value.
3137   if (!CGF.getLangOpts().CPlusPlus)
3138     return RHS;
3139 
3140   // If the lvalue is non-volatile, return the computed value of the assignment.
3141   if (!LHS.isVolatileQualified())
3142     return RHS;
3143 
3144   // Otherwise, reload the value.
3145   return EmitLoadOfLValue(LHS, E->getExprLoc());
3146 }
3147 
3148 void ScalarExprEmitter::EmitUndefinedBehaviorIntegerDivAndRemCheck(
3149     const BinOpInfo &Ops, llvm::Value *Zero, bool isDiv) {
3150   SmallVector<std::pair<llvm::Value *, SanitizerMask>, 2> Checks;
3151 
3152   if (CGF.SanOpts.has(SanitizerKind::IntegerDivideByZero)) {
3153     Checks.push_back(std::make_pair(Builder.CreateICmpNE(Ops.RHS, Zero),
3154                                     SanitizerKind::IntegerDivideByZero));
3155   }
3156 
3157   const auto *BO = cast<BinaryOperator>(Ops.E);
3158   if (CGF.SanOpts.has(SanitizerKind::SignedIntegerOverflow) &&
3159       Ops.Ty->hasSignedIntegerRepresentation() &&
3160       !IsWidenedIntegerOp(CGF.getContext(), BO->getLHS()) &&
3161       Ops.mayHaveIntegerOverflow()) {
3162     llvm::IntegerType *Ty = cast<llvm::IntegerType>(Zero->getType());
3163 
3164     llvm::Value *IntMin =
3165       Builder.getInt(llvm::APInt::getSignedMinValue(Ty->getBitWidth()));
3166     llvm::Value *NegOne = llvm::Constant::getAllOnesValue(Ty);
3167 
3168     llvm::Value *LHSCmp = Builder.CreateICmpNE(Ops.LHS, IntMin);
3169     llvm::Value *RHSCmp = Builder.CreateICmpNE(Ops.RHS, NegOne);
3170     llvm::Value *NotOverflow = Builder.CreateOr(LHSCmp, RHSCmp, "or");
3171     Checks.push_back(
3172         std::make_pair(NotOverflow, SanitizerKind::SignedIntegerOverflow));
3173   }
3174 
3175   if (Checks.size() > 0)
3176     EmitBinOpCheck(Checks, Ops);
3177 }
3178 
3179 Value *ScalarExprEmitter::EmitDiv(const BinOpInfo &Ops) {
3180   {
3181     CodeGenFunction::SanitizerScope SanScope(&CGF);
3182     if ((CGF.SanOpts.has(SanitizerKind::IntegerDivideByZero) ||
3183          CGF.SanOpts.has(SanitizerKind::SignedIntegerOverflow)) &&
3184         Ops.Ty->isIntegerType() &&
3185         (Ops.mayHaveIntegerDivisionByZero() || Ops.mayHaveIntegerOverflow())) {
3186       llvm::Value *Zero = llvm::Constant::getNullValue(ConvertType(Ops.Ty));
3187       EmitUndefinedBehaviorIntegerDivAndRemCheck(Ops, Zero, true);
3188     } else if (CGF.SanOpts.has(SanitizerKind::FloatDivideByZero) &&
3189                Ops.Ty->isRealFloatingType() &&
3190                Ops.mayHaveFloatDivisionByZero()) {
3191       llvm::Value *Zero = llvm::Constant::getNullValue(ConvertType(Ops.Ty));
3192       llvm::Value *NonZero = Builder.CreateFCmpUNE(Ops.RHS, Zero);
3193       EmitBinOpCheck(std::make_pair(NonZero, SanitizerKind::FloatDivideByZero),
3194                      Ops);
3195     }
3196   }
3197 
3198   if (Ops.Ty->isConstantMatrixType()) {
3199     llvm::MatrixBuilder<CGBuilderTy> MB(Builder);
3200     // We need to check the types of the operands of the operator to get the
3201     // correct matrix dimensions.
3202     auto *BO = cast<BinaryOperator>(Ops.E);
3203     (void)BO;
3204     assert(
3205         isa<ConstantMatrixType>(BO->getLHS()->getType().getCanonicalType()) &&
3206         "first operand must be a matrix");
3207     assert(BO->getRHS()->getType().getCanonicalType()->isArithmeticType() &&
3208            "second operand must be an arithmetic type");
3209     return MB.CreateScalarDiv(Ops.LHS, Ops.RHS,
3210                               Ops.Ty->hasUnsignedIntegerRepresentation());
3211   }
3212 
3213   if (Ops.LHS->getType()->isFPOrFPVectorTy()) {
3214     llvm::Value *Val;
3215     CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, Ops.FPFeatures);
3216     Val = Builder.CreateFDiv(Ops.LHS, Ops.RHS, "div");
3217     if (CGF.getLangOpts().OpenCL &&
3218         !CGF.CGM.getCodeGenOpts().CorrectlyRoundedDivSqrt) {
3219       // OpenCL v1.1 s7.4: minimum accuracy of single precision / is 2.5ulp
3220       // OpenCL v1.2 s5.6.4.2: The -cl-fp32-correctly-rounded-divide-sqrt
3221       // build option allows an application to specify that single precision
3222       // floating-point divide (x/y and 1/x) and sqrt used in the program
3223       // source are correctly rounded.
3224       llvm::Type *ValTy = Val->getType();
3225       if (ValTy->isFloatTy() ||
3226           (isa<llvm::VectorType>(ValTy) &&
3227            cast<llvm::VectorType>(ValTy)->getElementType()->isFloatTy()))
3228         CGF.SetFPAccuracy(Val, 2.5);
3229     }
3230     return Val;
3231   }
3232   else if (Ops.isFixedPointOp())
3233     return EmitFixedPointBinOp(Ops);
3234   else if (Ops.Ty->hasUnsignedIntegerRepresentation())
3235     return Builder.CreateUDiv(Ops.LHS, Ops.RHS, "div");
3236   else
3237     return Builder.CreateSDiv(Ops.LHS, Ops.RHS, "div");
3238 }
3239 
3240 Value *ScalarExprEmitter::EmitRem(const BinOpInfo &Ops) {
3241   // Rem in C can't be a floating point type: C99 6.5.5p2.
3242   if ((CGF.SanOpts.has(SanitizerKind::IntegerDivideByZero) ||
3243        CGF.SanOpts.has(SanitizerKind::SignedIntegerOverflow)) &&
3244       Ops.Ty->isIntegerType() &&
3245       (Ops.mayHaveIntegerDivisionByZero() || Ops.mayHaveIntegerOverflow())) {
3246     CodeGenFunction::SanitizerScope SanScope(&CGF);
3247     llvm::Value *Zero = llvm::Constant::getNullValue(ConvertType(Ops.Ty));
3248     EmitUndefinedBehaviorIntegerDivAndRemCheck(Ops, Zero, false);
3249   }
3250 
3251   if (Ops.Ty->hasUnsignedIntegerRepresentation())
3252     return Builder.CreateURem(Ops.LHS, Ops.RHS, "rem");
3253   else
3254     return Builder.CreateSRem(Ops.LHS, Ops.RHS, "rem");
3255 }
3256 
3257 Value *ScalarExprEmitter::EmitOverflowCheckedBinOp(const BinOpInfo &Ops) {
3258   unsigned IID;
3259   unsigned OpID = 0;
3260   SanitizerHandler OverflowKind;
3261 
3262   bool isSigned = Ops.Ty->isSignedIntegerOrEnumerationType();
3263   switch (Ops.Opcode) {
3264   case BO_Add:
3265   case BO_AddAssign:
3266     OpID = 1;
3267     IID = isSigned ? llvm::Intrinsic::sadd_with_overflow :
3268                      llvm::Intrinsic::uadd_with_overflow;
3269     OverflowKind = SanitizerHandler::AddOverflow;
3270     break;
3271   case BO_Sub:
3272   case BO_SubAssign:
3273     OpID = 2;
3274     IID = isSigned ? llvm::Intrinsic::ssub_with_overflow :
3275                      llvm::Intrinsic::usub_with_overflow;
3276     OverflowKind = SanitizerHandler::SubOverflow;
3277     break;
3278   case BO_Mul:
3279   case BO_MulAssign:
3280     OpID = 3;
3281     IID = isSigned ? llvm::Intrinsic::smul_with_overflow :
3282                      llvm::Intrinsic::umul_with_overflow;
3283     OverflowKind = SanitizerHandler::MulOverflow;
3284     break;
3285   default:
3286     llvm_unreachable("Unsupported operation for overflow detection");
3287   }
3288   OpID <<= 1;
3289   if (isSigned)
3290     OpID |= 1;
3291 
3292   CodeGenFunction::SanitizerScope SanScope(&CGF);
3293   llvm::Type *opTy = CGF.CGM.getTypes().ConvertType(Ops.Ty);
3294 
3295   llvm::Function *intrinsic = CGF.CGM.getIntrinsic(IID, opTy);
3296 
3297   Value *resultAndOverflow = Builder.CreateCall(intrinsic, {Ops.LHS, Ops.RHS});
3298   Value *result = Builder.CreateExtractValue(resultAndOverflow, 0);
3299   Value *overflow = Builder.CreateExtractValue(resultAndOverflow, 1);
3300 
3301   // Handle overflow with llvm.trap if no custom handler has been specified.
3302   const std::string *handlerName =
3303     &CGF.getLangOpts().OverflowHandler;
3304   if (handlerName->empty()) {
3305     // If the signed-integer-overflow sanitizer is enabled, emit a call to its
3306     // runtime. Otherwise, this is a -ftrapv check, so just emit a trap.
3307     if (!isSigned || CGF.SanOpts.has(SanitizerKind::SignedIntegerOverflow)) {
3308       llvm::Value *NotOverflow = Builder.CreateNot(overflow);
3309       SanitizerMask Kind = isSigned ? SanitizerKind::SignedIntegerOverflow
3310                               : SanitizerKind::UnsignedIntegerOverflow;
3311       EmitBinOpCheck(std::make_pair(NotOverflow, Kind), Ops);
3312     } else
3313       CGF.EmitTrapCheck(Builder.CreateNot(overflow), OverflowKind);
3314     return result;
3315   }
3316 
3317   // Branch in case of overflow.
3318   llvm::BasicBlock *initialBB = Builder.GetInsertBlock();
3319   llvm::BasicBlock *continueBB =
3320       CGF.createBasicBlock("nooverflow", CGF.CurFn, initialBB->getNextNode());
3321   llvm::BasicBlock *overflowBB = CGF.createBasicBlock("overflow", CGF.CurFn);
3322 
3323   Builder.CreateCondBr(overflow, overflowBB, continueBB);
3324 
3325   // If an overflow handler is set, then we want to call it and then use its
3326   // result, if it returns.
3327   Builder.SetInsertPoint(overflowBB);
3328 
3329   // Get the overflow handler.
3330   llvm::Type *Int8Ty = CGF.Int8Ty;
3331   llvm::Type *argTypes[] = { CGF.Int64Ty, CGF.Int64Ty, Int8Ty, Int8Ty };
3332   llvm::FunctionType *handlerTy =
3333       llvm::FunctionType::get(CGF.Int64Ty, argTypes, true);
3334   llvm::FunctionCallee handler =
3335       CGF.CGM.CreateRuntimeFunction(handlerTy, *handlerName);
3336 
3337   // Sign extend the args to 64-bit, so that we can use the same handler for
3338   // all types of overflow.
3339   llvm::Value *lhs = Builder.CreateSExt(Ops.LHS, CGF.Int64Ty);
3340   llvm::Value *rhs = Builder.CreateSExt(Ops.RHS, CGF.Int64Ty);
3341 
3342   // Call the handler with the two arguments, the operation, and the size of
3343   // the result.
3344   llvm::Value *handlerArgs[] = {
3345     lhs,
3346     rhs,
3347     Builder.getInt8(OpID),
3348     Builder.getInt8(cast<llvm::IntegerType>(opTy)->getBitWidth())
3349   };
3350   llvm::Value *handlerResult =
3351     CGF.EmitNounwindRuntimeCall(handler, handlerArgs);
3352 
3353   // Truncate the result back to the desired size.
3354   handlerResult = Builder.CreateTrunc(handlerResult, opTy);
3355   Builder.CreateBr(continueBB);
3356 
3357   Builder.SetInsertPoint(continueBB);
3358   llvm::PHINode *phi = Builder.CreatePHI(opTy, 2);
3359   phi->addIncoming(result, initialBB);
3360   phi->addIncoming(handlerResult, overflowBB);
3361 
3362   return phi;
3363 }
3364 
3365 /// Emit pointer + index arithmetic.
3366 static Value *emitPointerArithmetic(CodeGenFunction &CGF,
3367                                     const BinOpInfo &op,
3368                                     bool isSubtraction) {
3369   // Must have binary (not unary) expr here.  Unary pointer
3370   // increment/decrement doesn't use this path.
3371   const BinaryOperator *expr = cast<BinaryOperator>(op.E);
3372 
3373   Value *pointer = op.LHS;
3374   Expr *pointerOperand = expr->getLHS();
3375   Value *index = op.RHS;
3376   Expr *indexOperand = expr->getRHS();
3377 
3378   // In a subtraction, the LHS is always the pointer.
3379   if (!isSubtraction && !pointer->getType()->isPointerTy()) {
3380     std::swap(pointer, index);
3381     std::swap(pointerOperand, indexOperand);
3382   }
3383 
3384   bool isSigned = indexOperand->getType()->isSignedIntegerOrEnumerationType();
3385 
3386   unsigned width = cast<llvm::IntegerType>(index->getType())->getBitWidth();
3387   auto &DL = CGF.CGM.getDataLayout();
3388   auto PtrTy = cast<llvm::PointerType>(pointer->getType());
3389 
3390   // Some versions of glibc and gcc use idioms (particularly in their malloc
3391   // routines) that add a pointer-sized integer (known to be a pointer value)
3392   // to a null pointer in order to cast the value back to an integer or as
3393   // part of a pointer alignment algorithm.  This is undefined behavior, but
3394   // we'd like to be able to compile programs that use it.
3395   //
3396   // Normally, we'd generate a GEP with a null-pointer base here in response
3397   // to that code, but it's also UB to dereference a pointer created that
3398   // way.  Instead (as an acknowledged hack to tolerate the idiom) we will
3399   // generate a direct cast of the integer value to a pointer.
3400   //
3401   // The idiom (p = nullptr + N) is not met if any of the following are true:
3402   //
3403   //   The operation is subtraction.
3404   //   The index is not pointer-sized.
3405   //   The pointer type is not byte-sized.
3406   //
3407   if (BinaryOperator::isNullPointerArithmeticExtension(CGF.getContext(),
3408                                                        op.Opcode,
3409                                                        expr->getLHS(),
3410                                                        expr->getRHS()))
3411     return CGF.Builder.CreateIntToPtr(index, pointer->getType());
3412 
3413   if (width != DL.getIndexTypeSizeInBits(PtrTy)) {
3414     // Zero-extend or sign-extend the pointer value according to
3415     // whether the index is signed or not.
3416     index = CGF.Builder.CreateIntCast(index, DL.getIndexType(PtrTy), isSigned,
3417                                       "idx.ext");
3418   }
3419 
3420   // If this is subtraction, negate the index.
3421   if (isSubtraction)
3422     index = CGF.Builder.CreateNeg(index, "idx.neg");
3423 
3424   if (CGF.SanOpts.has(SanitizerKind::ArrayBounds))
3425     CGF.EmitBoundsCheck(op.E, pointerOperand, index, indexOperand->getType(),
3426                         /*Accessed*/ false);
3427 
3428   const PointerType *pointerType
3429     = pointerOperand->getType()->getAs<PointerType>();
3430   if (!pointerType) {
3431     QualType objectType = pointerOperand->getType()
3432                                         ->castAs<ObjCObjectPointerType>()
3433                                         ->getPointeeType();
3434     llvm::Value *objectSize
3435       = CGF.CGM.getSize(CGF.getContext().getTypeSizeInChars(objectType));
3436 
3437     index = CGF.Builder.CreateMul(index, objectSize);
3438 
3439     Value *result = CGF.Builder.CreateBitCast(pointer, CGF.VoidPtrTy);
3440     result = CGF.Builder.CreateGEP(result, index, "add.ptr");
3441     return CGF.Builder.CreateBitCast(result, pointer->getType());
3442   }
3443 
3444   QualType elementType = pointerType->getPointeeType();
3445   if (const VariableArrayType *vla
3446         = CGF.getContext().getAsVariableArrayType(elementType)) {
3447     // The element count here is the total number of non-VLA elements.
3448     llvm::Value *numElements = CGF.getVLASize(vla).NumElts;
3449 
3450     // Effectively, the multiply by the VLA size is part of the GEP.
3451     // GEP indexes are signed, and scaling an index isn't permitted to
3452     // signed-overflow, so we use the same semantics for our explicit
3453     // multiply.  We suppress this if overflow is not undefined behavior.
3454     if (CGF.getLangOpts().isSignedOverflowDefined()) {
3455       index = CGF.Builder.CreateMul(index, numElements, "vla.index");
3456       pointer = CGF.Builder.CreateGEP(pointer, index, "add.ptr");
3457     } else {
3458       index = CGF.Builder.CreateNSWMul(index, numElements, "vla.index");
3459       pointer =
3460           CGF.EmitCheckedInBoundsGEP(pointer, index, isSigned, isSubtraction,
3461                                      op.E->getExprLoc(), "add.ptr");
3462     }
3463     return pointer;
3464   }
3465 
3466   // Explicitly handle GNU void* and function pointer arithmetic extensions. The
3467   // GNU void* casts amount to no-ops since our void* type is i8*, but this is
3468   // future proof.
3469   if (elementType->isVoidType() || elementType->isFunctionType()) {
3470     Value *result = CGF.EmitCastToVoidPtr(pointer);
3471     result = CGF.Builder.CreateGEP(result, index, "add.ptr");
3472     return CGF.Builder.CreateBitCast(result, pointer->getType());
3473   }
3474 
3475   if (CGF.getLangOpts().isSignedOverflowDefined())
3476     return CGF.Builder.CreateGEP(pointer, index, "add.ptr");
3477 
3478   return CGF.EmitCheckedInBoundsGEP(pointer, index, isSigned, isSubtraction,
3479                                     op.E->getExprLoc(), "add.ptr");
3480 }
3481 
3482 // Construct an fmuladd intrinsic to represent a fused mul-add of MulOp and
3483 // Addend. Use negMul and negAdd to negate the first operand of the Mul or
3484 // the add operand respectively. This allows fmuladd to represent a*b-c, or
3485 // c-a*b. Patterns in LLVM should catch the negated forms and translate them to
3486 // efficient operations.
3487 static Value* buildFMulAdd(llvm::Instruction *MulOp, Value *Addend,
3488                            const CodeGenFunction &CGF, CGBuilderTy &Builder,
3489                            bool negMul, bool negAdd) {
3490   assert(!(negMul && negAdd) && "Only one of negMul and negAdd should be set.");
3491 
3492   Value *MulOp0 = MulOp->getOperand(0);
3493   Value *MulOp1 = MulOp->getOperand(1);
3494   if (negMul)
3495     MulOp0 = Builder.CreateFNeg(MulOp0, "neg");
3496   if (negAdd)
3497     Addend = Builder.CreateFNeg(Addend, "neg");
3498 
3499   Value *FMulAdd = nullptr;
3500   if (Builder.getIsFPConstrained()) {
3501     assert(isa<llvm::ConstrainedFPIntrinsic>(MulOp) &&
3502            "Only constrained operation should be created when Builder is in FP "
3503            "constrained mode");
3504     FMulAdd = Builder.CreateConstrainedFPCall(
3505         CGF.CGM.getIntrinsic(llvm::Intrinsic::experimental_constrained_fmuladd,
3506                              Addend->getType()),
3507         {MulOp0, MulOp1, Addend});
3508   } else {
3509     FMulAdd = Builder.CreateCall(
3510         CGF.CGM.getIntrinsic(llvm::Intrinsic::fmuladd, Addend->getType()),
3511         {MulOp0, MulOp1, Addend});
3512   }
3513   MulOp->eraseFromParent();
3514 
3515   return FMulAdd;
3516 }
3517 
3518 // Check whether it would be legal to emit an fmuladd intrinsic call to
3519 // represent op and if so, build the fmuladd.
3520 //
3521 // Checks that (a) the operation is fusable, and (b) -ffp-contract=on.
3522 // Does NOT check the type of the operation - it's assumed that this function
3523 // will be called from contexts where it's known that the type is contractable.
3524 static Value* tryEmitFMulAdd(const BinOpInfo &op,
3525                          const CodeGenFunction &CGF, CGBuilderTy &Builder,
3526                          bool isSub=false) {
3527 
3528   assert((op.Opcode == BO_Add || op.Opcode == BO_AddAssign ||
3529           op.Opcode == BO_Sub || op.Opcode == BO_SubAssign) &&
3530          "Only fadd/fsub can be the root of an fmuladd.");
3531 
3532   // Check whether this op is marked as fusable.
3533   if (!op.FPFeatures.allowFPContractWithinStatement())
3534     return nullptr;
3535 
3536   // We have a potentially fusable op. Look for a mul on one of the operands.
3537   // Also, make sure that the mul result isn't used directly. In that case,
3538   // there's no point creating a muladd operation.
3539   if (auto *LHSBinOp = dyn_cast<llvm::BinaryOperator>(op.LHS)) {
3540     if (LHSBinOp->getOpcode() == llvm::Instruction::FMul &&
3541         LHSBinOp->use_empty())
3542       return buildFMulAdd(LHSBinOp, op.RHS, CGF, Builder, false, isSub);
3543   }
3544   if (auto *RHSBinOp = dyn_cast<llvm::BinaryOperator>(op.RHS)) {
3545     if (RHSBinOp->getOpcode() == llvm::Instruction::FMul &&
3546         RHSBinOp->use_empty())
3547       return buildFMulAdd(RHSBinOp, op.LHS, CGF, Builder, isSub, false);
3548   }
3549 
3550   if (auto *LHSBinOp = dyn_cast<llvm::CallBase>(op.LHS)) {
3551     if (LHSBinOp->getIntrinsicID() ==
3552             llvm::Intrinsic::experimental_constrained_fmul &&
3553         LHSBinOp->use_empty())
3554       return buildFMulAdd(LHSBinOp, op.RHS, CGF, Builder, false, isSub);
3555   }
3556   if (auto *RHSBinOp = dyn_cast<llvm::CallBase>(op.RHS)) {
3557     if (RHSBinOp->getIntrinsicID() ==
3558             llvm::Intrinsic::experimental_constrained_fmul &&
3559         RHSBinOp->use_empty())
3560       return buildFMulAdd(RHSBinOp, op.LHS, CGF, Builder, isSub, false);
3561   }
3562 
3563   return nullptr;
3564 }
3565 
3566 Value *ScalarExprEmitter::EmitAdd(const BinOpInfo &op) {
3567   if (op.LHS->getType()->isPointerTy() ||
3568       op.RHS->getType()->isPointerTy())
3569     return emitPointerArithmetic(CGF, op, CodeGenFunction::NotSubtraction);
3570 
3571   if (op.Ty->isSignedIntegerOrEnumerationType()) {
3572     switch (CGF.getLangOpts().getSignedOverflowBehavior()) {
3573     case LangOptions::SOB_Defined:
3574       return Builder.CreateAdd(op.LHS, op.RHS, "add");
3575     case LangOptions::SOB_Undefined:
3576       if (!CGF.SanOpts.has(SanitizerKind::SignedIntegerOverflow))
3577         return Builder.CreateNSWAdd(op.LHS, op.RHS, "add");
3578       LLVM_FALLTHROUGH;
3579     case LangOptions::SOB_Trapping:
3580       if (CanElideOverflowCheck(CGF.getContext(), op))
3581         return Builder.CreateNSWAdd(op.LHS, op.RHS, "add");
3582       return EmitOverflowCheckedBinOp(op);
3583     }
3584   }
3585 
3586   if (op.Ty->isConstantMatrixType()) {
3587     llvm::MatrixBuilder<CGBuilderTy> MB(Builder);
3588     return MB.CreateAdd(op.LHS, op.RHS);
3589   }
3590 
3591   if (op.Ty->isUnsignedIntegerType() &&
3592       CGF.SanOpts.has(SanitizerKind::UnsignedIntegerOverflow) &&
3593       !CanElideOverflowCheck(CGF.getContext(), op))
3594     return EmitOverflowCheckedBinOp(op);
3595 
3596   if (op.LHS->getType()->isFPOrFPVectorTy()) {
3597     CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, op.FPFeatures);
3598     // Try to form an fmuladd.
3599     if (Value *FMulAdd = tryEmitFMulAdd(op, CGF, Builder))
3600       return FMulAdd;
3601 
3602     return Builder.CreateFAdd(op.LHS, op.RHS, "add");
3603   }
3604 
3605   if (op.isFixedPointOp())
3606     return EmitFixedPointBinOp(op);
3607 
3608   return Builder.CreateAdd(op.LHS, op.RHS, "add");
3609 }
3610 
3611 /// The resulting value must be calculated with exact precision, so the operands
3612 /// may not be the same type.
3613 Value *ScalarExprEmitter::EmitFixedPointBinOp(const BinOpInfo &op) {
3614   using llvm::APSInt;
3615   using llvm::ConstantInt;
3616 
3617   // This is either a binary operation where at least one of the operands is
3618   // a fixed-point type, or a unary operation where the operand is a fixed-point
3619   // type. The result type of a binary operation is determined by
3620   // Sema::handleFixedPointConversions().
3621   QualType ResultTy = op.Ty;
3622   QualType LHSTy, RHSTy;
3623   if (const auto *BinOp = dyn_cast<BinaryOperator>(op.E)) {
3624     RHSTy = BinOp->getRHS()->getType();
3625     if (const auto *CAO = dyn_cast<CompoundAssignOperator>(BinOp)) {
3626       // For compound assignment, the effective type of the LHS at this point
3627       // is the computation LHS type, not the actual LHS type, and the final
3628       // result type is not the type of the expression but rather the
3629       // computation result type.
3630       LHSTy = CAO->getComputationLHSType();
3631       ResultTy = CAO->getComputationResultType();
3632     } else
3633       LHSTy = BinOp->getLHS()->getType();
3634   } else if (const auto *UnOp = dyn_cast<UnaryOperator>(op.E)) {
3635     LHSTy = UnOp->getSubExpr()->getType();
3636     RHSTy = UnOp->getSubExpr()->getType();
3637   }
3638   ASTContext &Ctx = CGF.getContext();
3639   Value *LHS = op.LHS;
3640   Value *RHS = op.RHS;
3641 
3642   auto LHSFixedSema = Ctx.getFixedPointSemantics(LHSTy);
3643   auto RHSFixedSema = Ctx.getFixedPointSemantics(RHSTy);
3644   auto ResultFixedSema = Ctx.getFixedPointSemantics(ResultTy);
3645   auto CommonFixedSema = LHSFixedSema.getCommonSemantics(RHSFixedSema);
3646 
3647   // Perform the actual operation.
3648   Value *Result;
3649   llvm::FixedPointBuilder<CGBuilderTy> FPBuilder(Builder);
3650   switch (op.Opcode) {
3651   case BO_AddAssign:
3652   case BO_Add:
3653     Result = FPBuilder.CreateAdd(LHS, LHSFixedSema, RHS, RHSFixedSema);
3654     break;
3655   case BO_SubAssign:
3656   case BO_Sub:
3657     Result = FPBuilder.CreateSub(LHS, LHSFixedSema, RHS, RHSFixedSema);
3658     break;
3659   case BO_MulAssign:
3660   case BO_Mul:
3661     Result = FPBuilder.CreateMul(LHS, LHSFixedSema, RHS, RHSFixedSema);
3662     break;
3663   case BO_DivAssign:
3664   case BO_Div:
3665     Result = FPBuilder.CreateDiv(LHS, LHSFixedSema, RHS, RHSFixedSema);
3666     break;
3667   case BO_ShlAssign:
3668   case BO_Shl:
3669     Result = FPBuilder.CreateShl(LHS, LHSFixedSema, RHS);
3670     break;
3671   case BO_ShrAssign:
3672   case BO_Shr:
3673     Result = FPBuilder.CreateShr(LHS, LHSFixedSema, RHS);
3674     break;
3675   case BO_LT:
3676     return FPBuilder.CreateLT(LHS, LHSFixedSema, RHS, RHSFixedSema);
3677   case BO_GT:
3678     return FPBuilder.CreateGT(LHS, LHSFixedSema, RHS, RHSFixedSema);
3679   case BO_LE:
3680     return FPBuilder.CreateLE(LHS, LHSFixedSema, RHS, RHSFixedSema);
3681   case BO_GE:
3682     return FPBuilder.CreateGE(LHS, LHSFixedSema, RHS, RHSFixedSema);
3683   case BO_EQ:
3684     // For equality operations, we assume any padding bits on unsigned types are
3685     // zero'd out. They could be overwritten through non-saturating operations
3686     // that cause overflow, but this leads to undefined behavior.
3687     return FPBuilder.CreateEQ(LHS, LHSFixedSema, RHS, RHSFixedSema);
3688   case BO_NE:
3689     return FPBuilder.CreateNE(LHS, LHSFixedSema, RHS, RHSFixedSema);
3690   case BO_Cmp:
3691   case BO_LAnd:
3692   case BO_LOr:
3693     llvm_unreachable("Found unimplemented fixed point binary operation");
3694   case BO_PtrMemD:
3695   case BO_PtrMemI:
3696   case BO_Rem:
3697   case BO_Xor:
3698   case BO_And:
3699   case BO_Or:
3700   case BO_Assign:
3701   case BO_RemAssign:
3702   case BO_AndAssign:
3703   case BO_XorAssign:
3704   case BO_OrAssign:
3705   case BO_Comma:
3706     llvm_unreachable("Found unsupported binary operation for fixed point types.");
3707   }
3708 
3709   bool IsShift = BinaryOperator::isShiftOp(op.Opcode) ||
3710                  BinaryOperator::isShiftAssignOp(op.Opcode);
3711   // Convert to the result type.
3712   return FPBuilder.CreateFixedToFixed(Result, IsShift ? LHSFixedSema
3713                                                       : CommonFixedSema,
3714                                       ResultFixedSema);
3715 }
3716 
3717 Value *ScalarExprEmitter::EmitSub(const BinOpInfo &op) {
3718   // The LHS is always a pointer if either side is.
3719   if (!op.LHS->getType()->isPointerTy()) {
3720     if (op.Ty->isSignedIntegerOrEnumerationType()) {
3721       switch (CGF.getLangOpts().getSignedOverflowBehavior()) {
3722       case LangOptions::SOB_Defined:
3723         return Builder.CreateSub(op.LHS, op.RHS, "sub");
3724       case LangOptions::SOB_Undefined:
3725         if (!CGF.SanOpts.has(SanitizerKind::SignedIntegerOverflow))
3726           return Builder.CreateNSWSub(op.LHS, op.RHS, "sub");
3727         LLVM_FALLTHROUGH;
3728       case LangOptions::SOB_Trapping:
3729         if (CanElideOverflowCheck(CGF.getContext(), op))
3730           return Builder.CreateNSWSub(op.LHS, op.RHS, "sub");
3731         return EmitOverflowCheckedBinOp(op);
3732       }
3733     }
3734 
3735     if (op.Ty->isConstantMatrixType()) {
3736       llvm::MatrixBuilder<CGBuilderTy> MB(Builder);
3737       return MB.CreateSub(op.LHS, op.RHS);
3738     }
3739 
3740     if (op.Ty->isUnsignedIntegerType() &&
3741         CGF.SanOpts.has(SanitizerKind::UnsignedIntegerOverflow) &&
3742         !CanElideOverflowCheck(CGF.getContext(), op))
3743       return EmitOverflowCheckedBinOp(op);
3744 
3745     if (op.LHS->getType()->isFPOrFPVectorTy()) {
3746       CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, op.FPFeatures);
3747       // Try to form an fmuladd.
3748       if (Value *FMulAdd = tryEmitFMulAdd(op, CGF, Builder, true))
3749         return FMulAdd;
3750       return Builder.CreateFSub(op.LHS, op.RHS, "sub");
3751     }
3752 
3753     if (op.isFixedPointOp())
3754       return EmitFixedPointBinOp(op);
3755 
3756     return Builder.CreateSub(op.LHS, op.RHS, "sub");
3757   }
3758 
3759   // If the RHS is not a pointer, then we have normal pointer
3760   // arithmetic.
3761   if (!op.RHS->getType()->isPointerTy())
3762     return emitPointerArithmetic(CGF, op, CodeGenFunction::IsSubtraction);
3763 
3764   // Otherwise, this is a pointer subtraction.
3765 
3766   // Do the raw subtraction part.
3767   llvm::Value *LHS
3768     = Builder.CreatePtrToInt(op.LHS, CGF.PtrDiffTy, "sub.ptr.lhs.cast");
3769   llvm::Value *RHS
3770     = Builder.CreatePtrToInt(op.RHS, CGF.PtrDiffTy, "sub.ptr.rhs.cast");
3771   Value *diffInChars = Builder.CreateSub(LHS, RHS, "sub.ptr.sub");
3772 
3773   // Okay, figure out the element size.
3774   const BinaryOperator *expr = cast<BinaryOperator>(op.E);
3775   QualType elementType = expr->getLHS()->getType()->getPointeeType();
3776 
3777   llvm::Value *divisor = nullptr;
3778 
3779   // For a variable-length array, this is going to be non-constant.
3780   if (const VariableArrayType *vla
3781         = CGF.getContext().getAsVariableArrayType(elementType)) {
3782     auto VlaSize = CGF.getVLASize(vla);
3783     elementType = VlaSize.Type;
3784     divisor = VlaSize.NumElts;
3785 
3786     // Scale the number of non-VLA elements by the non-VLA element size.
3787     CharUnits eltSize = CGF.getContext().getTypeSizeInChars(elementType);
3788     if (!eltSize.isOne())
3789       divisor = CGF.Builder.CreateNUWMul(CGF.CGM.getSize(eltSize), divisor);
3790 
3791   // For everything elese, we can just compute it, safe in the
3792   // assumption that Sema won't let anything through that we can't
3793   // safely compute the size of.
3794   } else {
3795     CharUnits elementSize;
3796     // Handle GCC extension for pointer arithmetic on void* and
3797     // function pointer types.
3798     if (elementType->isVoidType() || elementType->isFunctionType())
3799       elementSize = CharUnits::One();
3800     else
3801       elementSize = CGF.getContext().getTypeSizeInChars(elementType);
3802 
3803     // Don't even emit the divide for element size of 1.
3804     if (elementSize.isOne())
3805       return diffInChars;
3806 
3807     divisor = CGF.CGM.getSize(elementSize);
3808   }
3809 
3810   // Otherwise, do a full sdiv. This uses the "exact" form of sdiv, since
3811   // pointer difference in C is only defined in the case where both operands
3812   // are pointing to elements of an array.
3813   return Builder.CreateExactSDiv(diffInChars, divisor, "sub.ptr.div");
3814 }
3815 
3816 Value *ScalarExprEmitter::GetWidthMinusOneValue(Value* LHS,Value* RHS) {
3817   llvm::IntegerType *Ty;
3818   if (llvm::VectorType *VT = dyn_cast<llvm::VectorType>(LHS->getType()))
3819     Ty = cast<llvm::IntegerType>(VT->getElementType());
3820   else
3821     Ty = cast<llvm::IntegerType>(LHS->getType());
3822   return llvm::ConstantInt::get(RHS->getType(), Ty->getBitWidth() - 1);
3823 }
3824 
3825 Value *ScalarExprEmitter::ConstrainShiftValue(Value *LHS, Value *RHS,
3826                                               const Twine &Name) {
3827   llvm::IntegerType *Ty;
3828   if (auto *VT = dyn_cast<llvm::VectorType>(LHS->getType()))
3829     Ty = cast<llvm::IntegerType>(VT->getElementType());
3830   else
3831     Ty = cast<llvm::IntegerType>(LHS->getType());
3832 
3833   if (llvm::isPowerOf2_64(Ty->getBitWidth()))
3834         return Builder.CreateAnd(RHS, GetWidthMinusOneValue(LHS, RHS), Name);
3835 
3836   return Builder.CreateURem(
3837       RHS, llvm::ConstantInt::get(RHS->getType(), Ty->getBitWidth()), Name);
3838 }
3839 
3840 Value *ScalarExprEmitter::EmitShl(const BinOpInfo &Ops) {
3841   // TODO: This misses out on the sanitizer check below.
3842   if (Ops.isFixedPointOp())
3843     return EmitFixedPointBinOp(Ops);
3844 
3845   // LLVM requires the LHS and RHS to be the same type: promote or truncate the
3846   // RHS to the same size as the LHS.
3847   Value *RHS = Ops.RHS;
3848   if (Ops.LHS->getType() != RHS->getType())
3849     RHS = Builder.CreateIntCast(RHS, Ops.LHS->getType(), false, "sh_prom");
3850 
3851   bool SanitizeSignedBase = CGF.SanOpts.has(SanitizerKind::ShiftBase) &&
3852                             Ops.Ty->hasSignedIntegerRepresentation() &&
3853                             !CGF.getLangOpts().isSignedOverflowDefined() &&
3854                             !CGF.getLangOpts().CPlusPlus20;
3855   bool SanitizeUnsignedBase =
3856       CGF.SanOpts.has(SanitizerKind::UnsignedShiftBase) &&
3857       Ops.Ty->hasUnsignedIntegerRepresentation();
3858   bool SanitizeBase = SanitizeSignedBase || SanitizeUnsignedBase;
3859   bool SanitizeExponent = CGF.SanOpts.has(SanitizerKind::ShiftExponent);
3860   // OpenCL 6.3j: shift values are effectively % word size of LHS.
3861   if (CGF.getLangOpts().OpenCL)
3862     RHS = ConstrainShiftValue(Ops.LHS, RHS, "shl.mask");
3863   else if ((SanitizeBase || SanitizeExponent) &&
3864            isa<llvm::IntegerType>(Ops.LHS->getType())) {
3865     CodeGenFunction::SanitizerScope SanScope(&CGF);
3866     SmallVector<std::pair<Value *, SanitizerMask>, 2> Checks;
3867     llvm::Value *WidthMinusOne = GetWidthMinusOneValue(Ops.LHS, Ops.RHS);
3868     llvm::Value *ValidExponent = Builder.CreateICmpULE(Ops.RHS, WidthMinusOne);
3869 
3870     if (SanitizeExponent) {
3871       Checks.push_back(
3872           std::make_pair(ValidExponent, SanitizerKind::ShiftExponent));
3873     }
3874 
3875     if (SanitizeBase) {
3876       // Check whether we are shifting any non-zero bits off the top of the
3877       // integer. We only emit this check if exponent is valid - otherwise
3878       // instructions below will have undefined behavior themselves.
3879       llvm::BasicBlock *Orig = Builder.GetInsertBlock();
3880       llvm::BasicBlock *Cont = CGF.createBasicBlock("cont");
3881       llvm::BasicBlock *CheckShiftBase = CGF.createBasicBlock("check");
3882       Builder.CreateCondBr(ValidExponent, CheckShiftBase, Cont);
3883       llvm::Value *PromotedWidthMinusOne =
3884           (RHS == Ops.RHS) ? WidthMinusOne
3885                            : GetWidthMinusOneValue(Ops.LHS, RHS);
3886       CGF.EmitBlock(CheckShiftBase);
3887       llvm::Value *BitsShiftedOff = Builder.CreateLShr(
3888           Ops.LHS, Builder.CreateSub(PromotedWidthMinusOne, RHS, "shl.zeros",
3889                                      /*NUW*/ true, /*NSW*/ true),
3890           "shl.check");
3891       if (SanitizeUnsignedBase || CGF.getLangOpts().CPlusPlus) {
3892         // In C99, we are not permitted to shift a 1 bit into the sign bit.
3893         // Under C++11's rules, shifting a 1 bit into the sign bit is
3894         // OK, but shifting a 1 bit out of it is not. (C89 and C++03 don't
3895         // define signed left shifts, so we use the C99 and C++11 rules there).
3896         // Unsigned shifts can always shift into the top bit.
3897         llvm::Value *One = llvm::ConstantInt::get(BitsShiftedOff->getType(), 1);
3898         BitsShiftedOff = Builder.CreateLShr(BitsShiftedOff, One);
3899       }
3900       llvm::Value *Zero = llvm::ConstantInt::get(BitsShiftedOff->getType(), 0);
3901       llvm::Value *ValidBase = Builder.CreateICmpEQ(BitsShiftedOff, Zero);
3902       CGF.EmitBlock(Cont);
3903       llvm::PHINode *BaseCheck = Builder.CreatePHI(ValidBase->getType(), 2);
3904       BaseCheck->addIncoming(Builder.getTrue(), Orig);
3905       BaseCheck->addIncoming(ValidBase, CheckShiftBase);
3906       Checks.push_back(std::make_pair(
3907           BaseCheck, SanitizeSignedBase ? SanitizerKind::ShiftBase
3908                                         : SanitizerKind::UnsignedShiftBase));
3909     }
3910 
3911     assert(!Checks.empty());
3912     EmitBinOpCheck(Checks, Ops);
3913   }
3914 
3915   return Builder.CreateShl(Ops.LHS, RHS, "shl");
3916 }
3917 
3918 Value *ScalarExprEmitter::EmitShr(const BinOpInfo &Ops) {
3919   // TODO: This misses out on the sanitizer check below.
3920   if (Ops.isFixedPointOp())
3921     return EmitFixedPointBinOp(Ops);
3922 
3923   // LLVM requires the LHS and RHS to be the same type: promote or truncate the
3924   // RHS to the same size as the LHS.
3925   Value *RHS = Ops.RHS;
3926   if (Ops.LHS->getType() != RHS->getType())
3927     RHS = Builder.CreateIntCast(RHS, Ops.LHS->getType(), false, "sh_prom");
3928 
3929   // OpenCL 6.3j: shift values are effectively % word size of LHS.
3930   if (CGF.getLangOpts().OpenCL)
3931     RHS = ConstrainShiftValue(Ops.LHS, RHS, "shr.mask");
3932   else if (CGF.SanOpts.has(SanitizerKind::ShiftExponent) &&
3933            isa<llvm::IntegerType>(Ops.LHS->getType())) {
3934     CodeGenFunction::SanitizerScope SanScope(&CGF);
3935     llvm::Value *Valid =
3936         Builder.CreateICmpULE(RHS, GetWidthMinusOneValue(Ops.LHS, RHS));
3937     EmitBinOpCheck(std::make_pair(Valid, SanitizerKind::ShiftExponent), Ops);
3938   }
3939 
3940   if (Ops.Ty->hasUnsignedIntegerRepresentation())
3941     return Builder.CreateLShr(Ops.LHS, RHS, "shr");
3942   return Builder.CreateAShr(Ops.LHS, RHS, "shr");
3943 }
3944 
3945 enum IntrinsicType { VCMPEQ, VCMPGT };
3946 // return corresponding comparison intrinsic for given vector type
3947 static llvm::Intrinsic::ID GetIntrinsic(IntrinsicType IT,
3948                                         BuiltinType::Kind ElemKind) {
3949   switch (ElemKind) {
3950   default: llvm_unreachable("unexpected element type");
3951   case BuiltinType::Char_U:
3952   case BuiltinType::UChar:
3953     return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpequb_p :
3954                             llvm::Intrinsic::ppc_altivec_vcmpgtub_p;
3955   case BuiltinType::Char_S:
3956   case BuiltinType::SChar:
3957     return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpequb_p :
3958                             llvm::Intrinsic::ppc_altivec_vcmpgtsb_p;
3959   case BuiltinType::UShort:
3960     return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpequh_p :
3961                             llvm::Intrinsic::ppc_altivec_vcmpgtuh_p;
3962   case BuiltinType::Short:
3963     return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpequh_p :
3964                             llvm::Intrinsic::ppc_altivec_vcmpgtsh_p;
3965   case BuiltinType::UInt:
3966     return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpequw_p :
3967                             llvm::Intrinsic::ppc_altivec_vcmpgtuw_p;
3968   case BuiltinType::Int:
3969     return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpequw_p :
3970                             llvm::Intrinsic::ppc_altivec_vcmpgtsw_p;
3971   case BuiltinType::ULong:
3972   case BuiltinType::ULongLong:
3973     return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpequd_p :
3974                             llvm::Intrinsic::ppc_altivec_vcmpgtud_p;
3975   case BuiltinType::Long:
3976   case BuiltinType::LongLong:
3977     return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpequd_p :
3978                             llvm::Intrinsic::ppc_altivec_vcmpgtsd_p;
3979   case BuiltinType::Float:
3980     return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpeqfp_p :
3981                             llvm::Intrinsic::ppc_altivec_vcmpgtfp_p;
3982   case BuiltinType::Double:
3983     return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_vsx_xvcmpeqdp_p :
3984                             llvm::Intrinsic::ppc_vsx_xvcmpgtdp_p;
3985   case BuiltinType::UInt128:
3986     return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpequq_p
3987                           : llvm::Intrinsic::ppc_altivec_vcmpgtuq_p;
3988   case BuiltinType::Int128:
3989     return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpequq_p
3990                           : llvm::Intrinsic::ppc_altivec_vcmpgtsq_p;
3991   }
3992 }
3993 
3994 Value *ScalarExprEmitter::EmitCompare(const BinaryOperator *E,
3995                                       llvm::CmpInst::Predicate UICmpOpc,
3996                                       llvm::CmpInst::Predicate SICmpOpc,
3997                                       llvm::CmpInst::Predicate FCmpOpc,
3998                                       bool IsSignaling) {
3999   TestAndClearIgnoreResultAssign();
4000   Value *Result;
4001   QualType LHSTy = E->getLHS()->getType();
4002   QualType RHSTy = E->getRHS()->getType();
4003   if (const MemberPointerType *MPT = LHSTy->getAs<MemberPointerType>()) {
4004     assert(E->getOpcode() == BO_EQ ||
4005            E->getOpcode() == BO_NE);
4006     Value *LHS = CGF.EmitScalarExpr(E->getLHS());
4007     Value *RHS = CGF.EmitScalarExpr(E->getRHS());
4008     Result = CGF.CGM.getCXXABI().EmitMemberPointerComparison(
4009                    CGF, LHS, RHS, MPT, E->getOpcode() == BO_NE);
4010   } else if (!LHSTy->isAnyComplexType() && !RHSTy->isAnyComplexType()) {
4011     BinOpInfo BOInfo = EmitBinOps(E);
4012     Value *LHS = BOInfo.LHS;
4013     Value *RHS = BOInfo.RHS;
4014 
4015     // If AltiVec, the comparison results in a numeric type, so we use
4016     // intrinsics comparing vectors and giving 0 or 1 as a result
4017     if (LHSTy->isVectorType() && !E->getType()->isVectorType()) {
4018       // constants for mapping CR6 register bits to predicate result
4019       enum { CR6_EQ=0, CR6_EQ_REV, CR6_LT, CR6_LT_REV } CR6;
4020 
4021       llvm::Intrinsic::ID ID = llvm::Intrinsic::not_intrinsic;
4022 
4023       // in several cases vector arguments order will be reversed
4024       Value *FirstVecArg = LHS,
4025             *SecondVecArg = RHS;
4026 
4027       QualType ElTy = LHSTy->castAs<VectorType>()->getElementType();
4028       BuiltinType::Kind ElementKind = ElTy->castAs<BuiltinType>()->getKind();
4029 
4030       switch(E->getOpcode()) {
4031       default: llvm_unreachable("is not a comparison operation");
4032       case BO_EQ:
4033         CR6 = CR6_LT;
4034         ID = GetIntrinsic(VCMPEQ, ElementKind);
4035         break;
4036       case BO_NE:
4037         CR6 = CR6_EQ;
4038         ID = GetIntrinsic(VCMPEQ, ElementKind);
4039         break;
4040       case BO_LT:
4041         CR6 = CR6_LT;
4042         ID = GetIntrinsic(VCMPGT, ElementKind);
4043         std::swap(FirstVecArg, SecondVecArg);
4044         break;
4045       case BO_GT:
4046         CR6 = CR6_LT;
4047         ID = GetIntrinsic(VCMPGT, ElementKind);
4048         break;
4049       case BO_LE:
4050         if (ElementKind == BuiltinType::Float) {
4051           CR6 = CR6_LT;
4052           ID = llvm::Intrinsic::ppc_altivec_vcmpgefp_p;
4053           std::swap(FirstVecArg, SecondVecArg);
4054         }
4055         else {
4056           CR6 = CR6_EQ;
4057           ID = GetIntrinsic(VCMPGT, ElementKind);
4058         }
4059         break;
4060       case BO_GE:
4061         if (ElementKind == BuiltinType::Float) {
4062           CR6 = CR6_LT;
4063           ID = llvm::Intrinsic::ppc_altivec_vcmpgefp_p;
4064         }
4065         else {
4066           CR6 = CR6_EQ;
4067           ID = GetIntrinsic(VCMPGT, ElementKind);
4068           std::swap(FirstVecArg, SecondVecArg);
4069         }
4070         break;
4071       }
4072 
4073       Value *CR6Param = Builder.getInt32(CR6);
4074       llvm::Function *F = CGF.CGM.getIntrinsic(ID);
4075       Result = Builder.CreateCall(F, {CR6Param, FirstVecArg, SecondVecArg});
4076 
4077       // The result type of intrinsic may not be same as E->getType().
4078       // If E->getType() is not BoolTy, EmitScalarConversion will do the
4079       // conversion work. If E->getType() is BoolTy, EmitScalarConversion will
4080       // do nothing, if ResultTy is not i1 at the same time, it will cause
4081       // crash later.
4082       llvm::IntegerType *ResultTy = cast<llvm::IntegerType>(Result->getType());
4083       if (ResultTy->getBitWidth() > 1 &&
4084           E->getType() == CGF.getContext().BoolTy)
4085         Result = Builder.CreateTrunc(Result, Builder.getInt1Ty());
4086       return EmitScalarConversion(Result, CGF.getContext().BoolTy, E->getType(),
4087                                   E->getExprLoc());
4088     }
4089 
4090     if (BOInfo.isFixedPointOp()) {
4091       Result = EmitFixedPointBinOp(BOInfo);
4092     } else if (LHS->getType()->isFPOrFPVectorTy()) {
4093       CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, BOInfo.FPFeatures);
4094       if (!IsSignaling)
4095         Result = Builder.CreateFCmp(FCmpOpc, LHS, RHS, "cmp");
4096       else
4097         Result = Builder.CreateFCmpS(FCmpOpc, LHS, RHS, "cmp");
4098     } else if (LHSTy->hasSignedIntegerRepresentation()) {
4099       Result = Builder.CreateICmp(SICmpOpc, LHS, RHS, "cmp");
4100     } else {
4101       // Unsigned integers and pointers.
4102 
4103       if (CGF.CGM.getCodeGenOpts().StrictVTablePointers &&
4104           !isa<llvm::ConstantPointerNull>(LHS) &&
4105           !isa<llvm::ConstantPointerNull>(RHS)) {
4106 
4107         // Dynamic information is required to be stripped for comparisons,
4108         // because it could leak the dynamic information.  Based on comparisons
4109         // of pointers to dynamic objects, the optimizer can replace one pointer
4110         // with another, which might be incorrect in presence of invariant
4111         // groups. Comparison with null is safe because null does not carry any
4112         // dynamic information.
4113         if (LHSTy.mayBeDynamicClass())
4114           LHS = Builder.CreateStripInvariantGroup(LHS);
4115         if (RHSTy.mayBeDynamicClass())
4116           RHS = Builder.CreateStripInvariantGroup(RHS);
4117       }
4118 
4119       Result = Builder.CreateICmp(UICmpOpc, LHS, RHS, "cmp");
4120     }
4121 
4122     // If this is a vector comparison, sign extend the result to the appropriate
4123     // vector integer type and return it (don't convert to bool).
4124     if (LHSTy->isVectorType())
4125       return Builder.CreateSExt(Result, ConvertType(E->getType()), "sext");
4126 
4127   } else {
4128     // Complex Comparison: can only be an equality comparison.
4129     CodeGenFunction::ComplexPairTy LHS, RHS;
4130     QualType CETy;
4131     if (auto *CTy = LHSTy->getAs<ComplexType>()) {
4132       LHS = CGF.EmitComplexExpr(E->getLHS());
4133       CETy = CTy->getElementType();
4134     } else {
4135       LHS.first = Visit(E->getLHS());
4136       LHS.second = llvm::Constant::getNullValue(LHS.first->getType());
4137       CETy = LHSTy;
4138     }
4139     if (auto *CTy = RHSTy->getAs<ComplexType>()) {
4140       RHS = CGF.EmitComplexExpr(E->getRHS());
4141       assert(CGF.getContext().hasSameUnqualifiedType(CETy,
4142                                                      CTy->getElementType()) &&
4143              "The element types must always match.");
4144       (void)CTy;
4145     } else {
4146       RHS.first = Visit(E->getRHS());
4147       RHS.second = llvm::Constant::getNullValue(RHS.first->getType());
4148       assert(CGF.getContext().hasSameUnqualifiedType(CETy, RHSTy) &&
4149              "The element types must always match.");
4150     }
4151 
4152     Value *ResultR, *ResultI;
4153     if (CETy->isRealFloatingType()) {
4154       // As complex comparisons can only be equality comparisons, they
4155       // are never signaling comparisons.
4156       ResultR = Builder.CreateFCmp(FCmpOpc, LHS.first, RHS.first, "cmp.r");
4157       ResultI = Builder.CreateFCmp(FCmpOpc, LHS.second, RHS.second, "cmp.i");
4158     } else {
4159       // Complex comparisons can only be equality comparisons.  As such, signed
4160       // and unsigned opcodes are the same.
4161       ResultR = Builder.CreateICmp(UICmpOpc, LHS.first, RHS.first, "cmp.r");
4162       ResultI = Builder.CreateICmp(UICmpOpc, LHS.second, RHS.second, "cmp.i");
4163     }
4164 
4165     if (E->getOpcode() == BO_EQ) {
4166       Result = Builder.CreateAnd(ResultR, ResultI, "and.ri");
4167     } else {
4168       assert(E->getOpcode() == BO_NE &&
4169              "Complex comparison other than == or != ?");
4170       Result = Builder.CreateOr(ResultR, ResultI, "or.ri");
4171     }
4172   }
4173 
4174   return EmitScalarConversion(Result, CGF.getContext().BoolTy, E->getType(),
4175                               E->getExprLoc());
4176 }
4177 
4178 Value *ScalarExprEmitter::VisitBinAssign(const BinaryOperator *E) {
4179   bool Ignore = TestAndClearIgnoreResultAssign();
4180 
4181   Value *RHS;
4182   LValue LHS;
4183 
4184   switch (E->getLHS()->getType().getObjCLifetime()) {
4185   case Qualifiers::OCL_Strong:
4186     std::tie(LHS, RHS) = CGF.EmitARCStoreStrong(E, Ignore);
4187     break;
4188 
4189   case Qualifiers::OCL_Autoreleasing:
4190     std::tie(LHS, RHS) = CGF.EmitARCStoreAutoreleasing(E);
4191     break;
4192 
4193   case Qualifiers::OCL_ExplicitNone:
4194     std::tie(LHS, RHS) = CGF.EmitARCStoreUnsafeUnretained(E, Ignore);
4195     break;
4196 
4197   case Qualifiers::OCL_Weak:
4198     RHS = Visit(E->getRHS());
4199     LHS = EmitCheckedLValue(E->getLHS(), CodeGenFunction::TCK_Store);
4200     RHS = CGF.EmitARCStoreWeak(LHS.getAddress(CGF), RHS, Ignore);
4201     break;
4202 
4203   case Qualifiers::OCL_None:
4204     // __block variables need to have the rhs evaluated first, plus
4205     // this should improve codegen just a little.
4206     RHS = Visit(E->getRHS());
4207     LHS = EmitCheckedLValue(E->getLHS(), CodeGenFunction::TCK_Store);
4208 
4209     // Store the value into the LHS.  Bit-fields are handled specially
4210     // because the result is altered by the store, i.e., [C99 6.5.16p1]
4211     // 'An assignment expression has the value of the left operand after
4212     // the assignment...'.
4213     if (LHS.isBitField()) {
4214       CGF.EmitStoreThroughBitfieldLValue(RValue::get(RHS), LHS, &RHS);
4215     } else {
4216       CGF.EmitNullabilityCheck(LHS, RHS, E->getExprLoc());
4217       CGF.EmitStoreThroughLValue(RValue::get(RHS), LHS);
4218     }
4219   }
4220 
4221   // If the result is clearly ignored, return now.
4222   if (Ignore)
4223     return nullptr;
4224 
4225   // The result of an assignment in C is the assigned r-value.
4226   if (!CGF.getLangOpts().CPlusPlus)
4227     return RHS;
4228 
4229   // If the lvalue is non-volatile, return the computed value of the assignment.
4230   if (!LHS.isVolatileQualified())
4231     return RHS;
4232 
4233   // Otherwise, reload the value.
4234   return EmitLoadOfLValue(LHS, E->getExprLoc());
4235 }
4236 
4237 Value *ScalarExprEmitter::VisitBinLAnd(const BinaryOperator *E) {
4238   // Perform vector logical and on comparisons with zero vectors.
4239   if (E->getType()->isVectorType()) {
4240     CGF.incrementProfileCounter(E);
4241 
4242     Value *LHS = Visit(E->getLHS());
4243     Value *RHS = Visit(E->getRHS());
4244     Value *Zero = llvm::ConstantAggregateZero::get(LHS->getType());
4245     if (LHS->getType()->isFPOrFPVectorTy()) {
4246       CodeGenFunction::CGFPOptionsRAII FPOptsRAII(
4247           CGF, E->getFPFeaturesInEffect(CGF.getLangOpts()));
4248       LHS = Builder.CreateFCmp(llvm::CmpInst::FCMP_UNE, LHS, Zero, "cmp");
4249       RHS = Builder.CreateFCmp(llvm::CmpInst::FCMP_UNE, RHS, Zero, "cmp");
4250     } else {
4251       LHS = Builder.CreateICmp(llvm::CmpInst::ICMP_NE, LHS, Zero, "cmp");
4252       RHS = Builder.CreateICmp(llvm::CmpInst::ICMP_NE, RHS, Zero, "cmp");
4253     }
4254     Value *And = Builder.CreateAnd(LHS, RHS);
4255     return Builder.CreateSExt(And, ConvertType(E->getType()), "sext");
4256   }
4257 
4258   bool InstrumentRegions = CGF.CGM.getCodeGenOpts().hasProfileClangInstr();
4259   llvm::Type *ResTy = ConvertType(E->getType());
4260 
4261   // If we have 0 && RHS, see if we can elide RHS, if so, just return 0.
4262   // If we have 1 && X, just emit X without inserting the control flow.
4263   bool LHSCondVal;
4264   if (CGF.ConstantFoldsToSimpleInteger(E->getLHS(), LHSCondVal)) {
4265     if (LHSCondVal) { // If we have 1 && X, just emit X.
4266       CGF.incrementProfileCounter(E);
4267 
4268       Value *RHSCond = CGF.EvaluateExprAsBool(E->getRHS());
4269 
4270       // If we're generating for profiling or coverage, generate a branch to a
4271       // block that increments the RHS counter needed to track branch condition
4272       // coverage. In this case, use "FBlock" as both the final "TrueBlock" and
4273       // "FalseBlock" after the increment is done.
4274       if (InstrumentRegions &&
4275           CodeGenFunction::isInstrumentedCondition(E->getRHS())) {
4276         llvm::BasicBlock *FBlock = CGF.createBasicBlock("land.end");
4277         llvm::BasicBlock *RHSBlockCnt = CGF.createBasicBlock("land.rhscnt");
4278         Builder.CreateCondBr(RHSCond, RHSBlockCnt, FBlock);
4279         CGF.EmitBlock(RHSBlockCnt);
4280         CGF.incrementProfileCounter(E->getRHS());
4281         CGF.EmitBranch(FBlock);
4282         CGF.EmitBlock(FBlock);
4283       }
4284 
4285       // ZExt result to int or bool.
4286       return Builder.CreateZExtOrBitCast(RHSCond, ResTy, "land.ext");
4287     }
4288 
4289     // 0 && RHS: If it is safe, just elide the RHS, and return 0/false.
4290     if (!CGF.ContainsLabel(E->getRHS()))
4291       return llvm::Constant::getNullValue(ResTy);
4292   }
4293 
4294   llvm::BasicBlock *ContBlock = CGF.createBasicBlock("land.end");
4295   llvm::BasicBlock *RHSBlock  = CGF.createBasicBlock("land.rhs");
4296 
4297   CodeGenFunction::ConditionalEvaluation eval(CGF);
4298 
4299   // Branch on the LHS first.  If it is false, go to the failure (cont) block.
4300   CGF.EmitBranchOnBoolExpr(E->getLHS(), RHSBlock, ContBlock,
4301                            CGF.getProfileCount(E->getRHS()));
4302 
4303   // Any edges into the ContBlock are now from an (indeterminate number of)
4304   // edges from this first condition.  All of these values will be false.  Start
4305   // setting up the PHI node in the Cont Block for this.
4306   llvm::PHINode *PN = llvm::PHINode::Create(llvm::Type::getInt1Ty(VMContext), 2,
4307                                             "", ContBlock);
4308   for (llvm::pred_iterator PI = pred_begin(ContBlock), PE = pred_end(ContBlock);
4309        PI != PE; ++PI)
4310     PN->addIncoming(llvm::ConstantInt::getFalse(VMContext), *PI);
4311 
4312   eval.begin(CGF);
4313   CGF.EmitBlock(RHSBlock);
4314   CGF.incrementProfileCounter(E);
4315   Value *RHSCond = CGF.EvaluateExprAsBool(E->getRHS());
4316   eval.end(CGF);
4317 
4318   // Reaquire the RHS block, as there may be subblocks inserted.
4319   RHSBlock = Builder.GetInsertBlock();
4320 
4321   // If we're generating for profiling or coverage, generate a branch on the
4322   // RHS to a block that increments the RHS true counter needed to track branch
4323   // condition coverage.
4324   if (InstrumentRegions &&
4325       CodeGenFunction::isInstrumentedCondition(E->getRHS())) {
4326     llvm::BasicBlock *RHSBlockCnt = CGF.createBasicBlock("land.rhscnt");
4327     Builder.CreateCondBr(RHSCond, RHSBlockCnt, ContBlock);
4328     CGF.EmitBlock(RHSBlockCnt);
4329     CGF.incrementProfileCounter(E->getRHS());
4330     CGF.EmitBranch(ContBlock);
4331     PN->addIncoming(RHSCond, RHSBlockCnt);
4332   }
4333 
4334   // Emit an unconditional branch from this block to ContBlock.
4335   {
4336     // There is no need to emit line number for unconditional branch.
4337     auto NL = ApplyDebugLocation::CreateEmpty(CGF);
4338     CGF.EmitBlock(ContBlock);
4339   }
4340   // Insert an entry into the phi node for the edge with the value of RHSCond.
4341   PN->addIncoming(RHSCond, RHSBlock);
4342 
4343   // Artificial location to preserve the scope information
4344   {
4345     auto NL = ApplyDebugLocation::CreateArtificial(CGF);
4346     PN->setDebugLoc(Builder.getCurrentDebugLocation());
4347   }
4348 
4349   // ZExt result to int.
4350   return Builder.CreateZExtOrBitCast(PN, ResTy, "land.ext");
4351 }
4352 
4353 Value *ScalarExprEmitter::VisitBinLOr(const BinaryOperator *E) {
4354   // Perform vector logical or on comparisons with zero vectors.
4355   if (E->getType()->isVectorType()) {
4356     CGF.incrementProfileCounter(E);
4357 
4358     Value *LHS = Visit(E->getLHS());
4359     Value *RHS = Visit(E->getRHS());
4360     Value *Zero = llvm::ConstantAggregateZero::get(LHS->getType());
4361     if (LHS->getType()->isFPOrFPVectorTy()) {
4362       CodeGenFunction::CGFPOptionsRAII FPOptsRAII(
4363           CGF, E->getFPFeaturesInEffect(CGF.getLangOpts()));
4364       LHS = Builder.CreateFCmp(llvm::CmpInst::FCMP_UNE, LHS, Zero, "cmp");
4365       RHS = Builder.CreateFCmp(llvm::CmpInst::FCMP_UNE, RHS, Zero, "cmp");
4366     } else {
4367       LHS = Builder.CreateICmp(llvm::CmpInst::ICMP_NE, LHS, Zero, "cmp");
4368       RHS = Builder.CreateICmp(llvm::CmpInst::ICMP_NE, RHS, Zero, "cmp");
4369     }
4370     Value *Or = Builder.CreateOr(LHS, RHS);
4371     return Builder.CreateSExt(Or, ConvertType(E->getType()), "sext");
4372   }
4373 
4374   bool InstrumentRegions = CGF.CGM.getCodeGenOpts().hasProfileClangInstr();
4375   llvm::Type *ResTy = ConvertType(E->getType());
4376 
4377   // If we have 1 || RHS, see if we can elide RHS, if so, just return 1.
4378   // If we have 0 || X, just emit X without inserting the control flow.
4379   bool LHSCondVal;
4380   if (CGF.ConstantFoldsToSimpleInteger(E->getLHS(), LHSCondVal)) {
4381     if (!LHSCondVal) { // If we have 0 || X, just emit X.
4382       CGF.incrementProfileCounter(E);
4383 
4384       Value *RHSCond = CGF.EvaluateExprAsBool(E->getRHS());
4385 
4386       // If we're generating for profiling or coverage, generate a branch to a
4387       // block that increments the RHS counter need to track branch condition
4388       // coverage. In this case, use "FBlock" as both the final "TrueBlock" and
4389       // "FalseBlock" after the increment is done.
4390       if (InstrumentRegions &&
4391           CodeGenFunction::isInstrumentedCondition(E->getRHS())) {
4392         llvm::BasicBlock *FBlock = CGF.createBasicBlock("lor.end");
4393         llvm::BasicBlock *RHSBlockCnt = CGF.createBasicBlock("lor.rhscnt");
4394         Builder.CreateCondBr(RHSCond, FBlock, RHSBlockCnt);
4395         CGF.EmitBlock(RHSBlockCnt);
4396         CGF.incrementProfileCounter(E->getRHS());
4397         CGF.EmitBranch(FBlock);
4398         CGF.EmitBlock(FBlock);
4399       }
4400 
4401       // ZExt result to int or bool.
4402       return Builder.CreateZExtOrBitCast(RHSCond, ResTy, "lor.ext");
4403     }
4404 
4405     // 1 || RHS: If it is safe, just elide the RHS, and return 1/true.
4406     if (!CGF.ContainsLabel(E->getRHS()))
4407       return llvm::ConstantInt::get(ResTy, 1);
4408   }
4409 
4410   llvm::BasicBlock *ContBlock = CGF.createBasicBlock("lor.end");
4411   llvm::BasicBlock *RHSBlock = CGF.createBasicBlock("lor.rhs");
4412 
4413   CodeGenFunction::ConditionalEvaluation eval(CGF);
4414 
4415   // Branch on the LHS first.  If it is true, go to the success (cont) block.
4416   CGF.EmitBranchOnBoolExpr(E->getLHS(), ContBlock, RHSBlock,
4417                            CGF.getCurrentProfileCount() -
4418                                CGF.getProfileCount(E->getRHS()));
4419 
4420   // Any edges into the ContBlock are now from an (indeterminate number of)
4421   // edges from this first condition.  All of these values will be true.  Start
4422   // setting up the PHI node in the Cont Block for this.
4423   llvm::PHINode *PN = llvm::PHINode::Create(llvm::Type::getInt1Ty(VMContext), 2,
4424                                             "", ContBlock);
4425   for (llvm::pred_iterator PI = pred_begin(ContBlock), PE = pred_end(ContBlock);
4426        PI != PE; ++PI)
4427     PN->addIncoming(llvm::ConstantInt::getTrue(VMContext), *PI);
4428 
4429   eval.begin(CGF);
4430 
4431   // Emit the RHS condition as a bool value.
4432   CGF.EmitBlock(RHSBlock);
4433   CGF.incrementProfileCounter(E);
4434   Value *RHSCond = CGF.EvaluateExprAsBool(E->getRHS());
4435 
4436   eval.end(CGF);
4437 
4438   // Reaquire the RHS block, as there may be subblocks inserted.
4439   RHSBlock = Builder.GetInsertBlock();
4440 
4441   // If we're generating for profiling or coverage, generate a branch on the
4442   // RHS to a block that increments the RHS true counter needed to track branch
4443   // condition coverage.
4444   if (InstrumentRegions &&
4445       CodeGenFunction::isInstrumentedCondition(E->getRHS())) {
4446     llvm::BasicBlock *RHSBlockCnt = CGF.createBasicBlock("lor.rhscnt");
4447     Builder.CreateCondBr(RHSCond, ContBlock, RHSBlockCnt);
4448     CGF.EmitBlock(RHSBlockCnt);
4449     CGF.incrementProfileCounter(E->getRHS());
4450     CGF.EmitBranch(ContBlock);
4451     PN->addIncoming(RHSCond, RHSBlockCnt);
4452   }
4453 
4454   // Emit an unconditional branch from this block to ContBlock.  Insert an entry
4455   // into the phi node for the edge with the value of RHSCond.
4456   CGF.EmitBlock(ContBlock);
4457   PN->addIncoming(RHSCond, RHSBlock);
4458 
4459   // ZExt result to int.
4460   return Builder.CreateZExtOrBitCast(PN, ResTy, "lor.ext");
4461 }
4462 
4463 Value *ScalarExprEmitter::VisitBinComma(const BinaryOperator *E) {
4464   CGF.EmitIgnoredExpr(E->getLHS());
4465   CGF.EnsureInsertPoint();
4466   return Visit(E->getRHS());
4467 }
4468 
4469 //===----------------------------------------------------------------------===//
4470 //                             Other Operators
4471 //===----------------------------------------------------------------------===//
4472 
4473 /// isCheapEnoughToEvaluateUnconditionally - Return true if the specified
4474 /// expression is cheap enough and side-effect-free enough to evaluate
4475 /// unconditionally instead of conditionally.  This is used to convert control
4476 /// flow into selects in some cases.
4477 static bool isCheapEnoughToEvaluateUnconditionally(const Expr *E,
4478                                                    CodeGenFunction &CGF) {
4479   // Anything that is an integer or floating point constant is fine.
4480   return E->IgnoreParens()->isEvaluatable(CGF.getContext());
4481 
4482   // Even non-volatile automatic variables can't be evaluated unconditionally.
4483   // Referencing a thread_local may cause non-trivial initialization work to
4484   // occur. If we're inside a lambda and one of the variables is from the scope
4485   // outside the lambda, that function may have returned already. Reading its
4486   // locals is a bad idea. Also, these reads may introduce races there didn't
4487   // exist in the source-level program.
4488 }
4489 
4490 
4491 Value *ScalarExprEmitter::
4492 VisitAbstractConditionalOperator(const AbstractConditionalOperator *E) {
4493   TestAndClearIgnoreResultAssign();
4494 
4495   // Bind the common expression if necessary.
4496   CodeGenFunction::OpaqueValueMapping binding(CGF, E);
4497 
4498   Expr *condExpr = E->getCond();
4499   Expr *lhsExpr = E->getTrueExpr();
4500   Expr *rhsExpr = E->getFalseExpr();
4501 
4502   // If the condition constant folds and can be elided, try to avoid emitting
4503   // the condition and the dead arm.
4504   bool CondExprBool;
4505   if (CGF.ConstantFoldsToSimpleInteger(condExpr, CondExprBool)) {
4506     Expr *live = lhsExpr, *dead = rhsExpr;
4507     if (!CondExprBool) std::swap(live, dead);
4508 
4509     // If the dead side doesn't have labels we need, just emit the Live part.
4510     if (!CGF.ContainsLabel(dead)) {
4511       if (CondExprBool)
4512         CGF.incrementProfileCounter(E);
4513       Value *Result = Visit(live);
4514 
4515       // If the live part is a throw expression, it acts like it has a void
4516       // type, so evaluating it returns a null Value*.  However, a conditional
4517       // with non-void type must return a non-null Value*.
4518       if (!Result && !E->getType()->isVoidType())
4519         Result = llvm::UndefValue::get(CGF.ConvertType(E->getType()));
4520 
4521       return Result;
4522     }
4523   }
4524 
4525   // OpenCL: If the condition is a vector, we can treat this condition like
4526   // the select function.
4527   if ((CGF.getLangOpts().OpenCL && condExpr->getType()->isVectorType()) ||
4528       condExpr->getType()->isExtVectorType()) {
4529     CGF.incrementProfileCounter(E);
4530 
4531     llvm::Value *CondV = CGF.EmitScalarExpr(condExpr);
4532     llvm::Value *LHS = Visit(lhsExpr);
4533     llvm::Value *RHS = Visit(rhsExpr);
4534 
4535     llvm::Type *condType = ConvertType(condExpr->getType());
4536     auto *vecTy = cast<llvm::FixedVectorType>(condType);
4537 
4538     unsigned numElem = vecTy->getNumElements();
4539     llvm::Type *elemType = vecTy->getElementType();
4540 
4541     llvm::Value *zeroVec = llvm::Constant::getNullValue(vecTy);
4542     llvm::Value *TestMSB = Builder.CreateICmpSLT(CondV, zeroVec);
4543     llvm::Value *tmp = Builder.CreateSExt(
4544         TestMSB, llvm::FixedVectorType::get(elemType, numElem), "sext");
4545     llvm::Value *tmp2 = Builder.CreateNot(tmp);
4546 
4547     // Cast float to int to perform ANDs if necessary.
4548     llvm::Value *RHSTmp = RHS;
4549     llvm::Value *LHSTmp = LHS;
4550     bool wasCast = false;
4551     llvm::VectorType *rhsVTy = cast<llvm::VectorType>(RHS->getType());
4552     if (rhsVTy->getElementType()->isFloatingPointTy()) {
4553       RHSTmp = Builder.CreateBitCast(RHS, tmp2->getType());
4554       LHSTmp = Builder.CreateBitCast(LHS, tmp->getType());
4555       wasCast = true;
4556     }
4557 
4558     llvm::Value *tmp3 = Builder.CreateAnd(RHSTmp, tmp2);
4559     llvm::Value *tmp4 = Builder.CreateAnd(LHSTmp, tmp);
4560     llvm::Value *tmp5 = Builder.CreateOr(tmp3, tmp4, "cond");
4561     if (wasCast)
4562       tmp5 = Builder.CreateBitCast(tmp5, RHS->getType());
4563 
4564     return tmp5;
4565   }
4566 
4567   if (condExpr->getType()->isVectorType()) {
4568     CGF.incrementProfileCounter(E);
4569 
4570     llvm::Value *CondV = CGF.EmitScalarExpr(condExpr);
4571     llvm::Value *LHS = Visit(lhsExpr);
4572     llvm::Value *RHS = Visit(rhsExpr);
4573 
4574     llvm::Type *CondType = ConvertType(condExpr->getType());
4575     auto *VecTy = cast<llvm::VectorType>(CondType);
4576     llvm::Value *ZeroVec = llvm::Constant::getNullValue(VecTy);
4577 
4578     CondV = Builder.CreateICmpNE(CondV, ZeroVec, "vector_cond");
4579     return Builder.CreateSelect(CondV, LHS, RHS, "vector_select");
4580   }
4581 
4582   // If this is a really simple expression (like x ? 4 : 5), emit this as a
4583   // select instead of as control flow.  We can only do this if it is cheap and
4584   // safe to evaluate the LHS and RHS unconditionally.
4585   if (isCheapEnoughToEvaluateUnconditionally(lhsExpr, CGF) &&
4586       isCheapEnoughToEvaluateUnconditionally(rhsExpr, CGF)) {
4587     llvm::Value *CondV = CGF.EvaluateExprAsBool(condExpr);
4588     llvm::Value *StepV = Builder.CreateZExtOrBitCast(CondV, CGF.Int64Ty);
4589 
4590     CGF.incrementProfileCounter(E, StepV);
4591 
4592     llvm::Value *LHS = Visit(lhsExpr);
4593     llvm::Value *RHS = Visit(rhsExpr);
4594     if (!LHS) {
4595       // If the conditional has void type, make sure we return a null Value*.
4596       assert(!RHS && "LHS and RHS types must match");
4597       return nullptr;
4598     }
4599     return Builder.CreateSelect(CondV, LHS, RHS, "cond");
4600   }
4601 
4602   llvm::BasicBlock *LHSBlock = CGF.createBasicBlock("cond.true");
4603   llvm::BasicBlock *RHSBlock = CGF.createBasicBlock("cond.false");
4604   llvm::BasicBlock *ContBlock = CGF.createBasicBlock("cond.end");
4605 
4606   CodeGenFunction::ConditionalEvaluation eval(CGF);
4607   CGF.EmitBranchOnBoolExpr(condExpr, LHSBlock, RHSBlock,
4608                            CGF.getProfileCount(lhsExpr));
4609 
4610   CGF.EmitBlock(LHSBlock);
4611   CGF.incrementProfileCounter(E);
4612   eval.begin(CGF);
4613   Value *LHS = Visit(lhsExpr);
4614   eval.end(CGF);
4615 
4616   LHSBlock = Builder.GetInsertBlock();
4617   Builder.CreateBr(ContBlock);
4618 
4619   CGF.EmitBlock(RHSBlock);
4620   eval.begin(CGF);
4621   Value *RHS = Visit(rhsExpr);
4622   eval.end(CGF);
4623 
4624   RHSBlock = Builder.GetInsertBlock();
4625   CGF.EmitBlock(ContBlock);
4626 
4627   // If the LHS or RHS is a throw expression, it will be legitimately null.
4628   if (!LHS)
4629     return RHS;
4630   if (!RHS)
4631     return LHS;
4632 
4633   // Create a PHI node for the real part.
4634   llvm::PHINode *PN = Builder.CreatePHI(LHS->getType(), 2, "cond");
4635   PN->addIncoming(LHS, LHSBlock);
4636   PN->addIncoming(RHS, RHSBlock);
4637   return PN;
4638 }
4639 
4640 Value *ScalarExprEmitter::VisitChooseExpr(ChooseExpr *E) {
4641   return Visit(E->getChosenSubExpr());
4642 }
4643 
4644 Value *ScalarExprEmitter::VisitVAArgExpr(VAArgExpr *VE) {
4645   QualType Ty = VE->getType();
4646 
4647   if (Ty->isVariablyModifiedType())
4648     CGF.EmitVariablyModifiedType(Ty);
4649 
4650   Address ArgValue = Address::invalid();
4651   Address ArgPtr = CGF.EmitVAArg(VE, ArgValue);
4652 
4653   llvm::Type *ArgTy = ConvertType(VE->getType());
4654 
4655   // If EmitVAArg fails, emit an error.
4656   if (!ArgPtr.isValid()) {
4657     CGF.ErrorUnsupported(VE, "va_arg expression");
4658     return llvm::UndefValue::get(ArgTy);
4659   }
4660 
4661   // FIXME Volatility.
4662   llvm::Value *Val = Builder.CreateLoad(ArgPtr);
4663 
4664   // If EmitVAArg promoted the type, we must truncate it.
4665   if (ArgTy != Val->getType()) {
4666     if (ArgTy->isPointerTy() && !Val->getType()->isPointerTy())
4667       Val = Builder.CreateIntToPtr(Val, ArgTy);
4668     else
4669       Val = Builder.CreateTrunc(Val, ArgTy);
4670   }
4671 
4672   return Val;
4673 }
4674 
4675 Value *ScalarExprEmitter::VisitBlockExpr(const BlockExpr *block) {
4676   return CGF.EmitBlockLiteral(block);
4677 }
4678 
4679 // Convert a vec3 to vec4, or vice versa.
4680 static Value *ConvertVec3AndVec4(CGBuilderTy &Builder, CodeGenFunction &CGF,
4681                                  Value *Src, unsigned NumElementsDst) {
4682   static constexpr int Mask[] = {0, 1, 2, -1};
4683   return Builder.CreateShuffleVector(Src,
4684                                      llvm::makeArrayRef(Mask, NumElementsDst));
4685 }
4686 
4687 // Create cast instructions for converting LLVM value \p Src to LLVM type \p
4688 // DstTy. \p Src has the same size as \p DstTy. Both are single value types
4689 // but could be scalar or vectors of different lengths, and either can be
4690 // pointer.
4691 // There are 4 cases:
4692 // 1. non-pointer -> non-pointer  : needs 1 bitcast
4693 // 2. pointer -> pointer          : needs 1 bitcast or addrspacecast
4694 // 3. pointer -> non-pointer
4695 //   a) pointer -> intptr_t       : needs 1 ptrtoint
4696 //   b) pointer -> non-intptr_t   : needs 1 ptrtoint then 1 bitcast
4697 // 4. non-pointer -> pointer
4698 //   a) intptr_t -> pointer       : needs 1 inttoptr
4699 //   b) non-intptr_t -> pointer   : needs 1 bitcast then 1 inttoptr
4700 // Note: for cases 3b and 4b two casts are required since LLVM casts do not
4701 // allow casting directly between pointer types and non-integer non-pointer
4702 // types.
4703 static Value *createCastsForTypeOfSameSize(CGBuilderTy &Builder,
4704                                            const llvm::DataLayout &DL,
4705                                            Value *Src, llvm::Type *DstTy,
4706                                            StringRef Name = "") {
4707   auto SrcTy = Src->getType();
4708 
4709   // Case 1.
4710   if (!SrcTy->isPointerTy() && !DstTy->isPointerTy())
4711     return Builder.CreateBitCast(Src, DstTy, Name);
4712 
4713   // Case 2.
4714   if (SrcTy->isPointerTy() && DstTy->isPointerTy())
4715     return Builder.CreatePointerBitCastOrAddrSpaceCast(Src, DstTy, Name);
4716 
4717   // Case 3.
4718   if (SrcTy->isPointerTy() && !DstTy->isPointerTy()) {
4719     // Case 3b.
4720     if (!DstTy->isIntegerTy())
4721       Src = Builder.CreatePtrToInt(Src, DL.getIntPtrType(SrcTy));
4722     // Cases 3a and 3b.
4723     return Builder.CreateBitOrPointerCast(Src, DstTy, Name);
4724   }
4725 
4726   // Case 4b.
4727   if (!SrcTy->isIntegerTy())
4728     Src = Builder.CreateBitCast(Src, DL.getIntPtrType(DstTy));
4729   // Cases 4a and 4b.
4730   return Builder.CreateIntToPtr(Src, DstTy, Name);
4731 }
4732 
4733 Value *ScalarExprEmitter::VisitAsTypeExpr(AsTypeExpr *E) {
4734   Value *Src  = CGF.EmitScalarExpr(E->getSrcExpr());
4735   llvm::Type *DstTy = ConvertType(E->getType());
4736 
4737   llvm::Type *SrcTy = Src->getType();
4738   unsigned NumElementsSrc =
4739       isa<llvm::VectorType>(SrcTy)
4740           ? cast<llvm::FixedVectorType>(SrcTy)->getNumElements()
4741           : 0;
4742   unsigned NumElementsDst =
4743       isa<llvm::VectorType>(DstTy)
4744           ? cast<llvm::FixedVectorType>(DstTy)->getNumElements()
4745           : 0;
4746 
4747   // Going from vec3 to non-vec3 is a special case and requires a shuffle
4748   // vector to get a vec4, then a bitcast if the target type is different.
4749   if (NumElementsSrc == 3 && NumElementsDst != 3) {
4750     Src = ConvertVec3AndVec4(Builder, CGF, Src, 4);
4751 
4752     if (!CGF.CGM.getCodeGenOpts().PreserveVec3Type) {
4753       Src = createCastsForTypeOfSameSize(Builder, CGF.CGM.getDataLayout(), Src,
4754                                          DstTy);
4755     }
4756 
4757     Src->setName("astype");
4758     return Src;
4759   }
4760 
4761   // Going from non-vec3 to vec3 is a special case and requires a bitcast
4762   // to vec4 if the original type is not vec4, then a shuffle vector to
4763   // get a vec3.
4764   if (NumElementsSrc != 3 && NumElementsDst == 3) {
4765     if (!CGF.CGM.getCodeGenOpts().PreserveVec3Type) {
4766       auto *Vec4Ty = llvm::FixedVectorType::get(
4767           cast<llvm::VectorType>(DstTy)->getElementType(), 4);
4768       Src = createCastsForTypeOfSameSize(Builder, CGF.CGM.getDataLayout(), Src,
4769                                          Vec4Ty);
4770     }
4771 
4772     Src = ConvertVec3AndVec4(Builder, CGF, Src, 3);
4773     Src->setName("astype");
4774     return Src;
4775   }
4776 
4777   return createCastsForTypeOfSameSize(Builder, CGF.CGM.getDataLayout(),
4778                                       Src, DstTy, "astype");
4779 }
4780 
4781 Value *ScalarExprEmitter::VisitAtomicExpr(AtomicExpr *E) {
4782   return CGF.EmitAtomicExpr(E).getScalarVal();
4783 }
4784 
4785 //===----------------------------------------------------------------------===//
4786 //                         Entry Point into this File
4787 //===----------------------------------------------------------------------===//
4788 
4789 /// Emit the computation of the specified expression of scalar type, ignoring
4790 /// the result.
4791 Value *CodeGenFunction::EmitScalarExpr(const Expr *E, bool IgnoreResultAssign) {
4792   assert(E && hasScalarEvaluationKind(E->getType()) &&
4793          "Invalid scalar expression to emit");
4794 
4795   return ScalarExprEmitter(*this, IgnoreResultAssign)
4796       .Visit(const_cast<Expr *>(E));
4797 }
4798 
4799 /// Emit a conversion from the specified type to the specified destination type,
4800 /// both of which are LLVM scalar types.
4801 Value *CodeGenFunction::EmitScalarConversion(Value *Src, QualType SrcTy,
4802                                              QualType DstTy,
4803                                              SourceLocation Loc) {
4804   assert(hasScalarEvaluationKind(SrcTy) && hasScalarEvaluationKind(DstTy) &&
4805          "Invalid scalar expression to emit");
4806   return ScalarExprEmitter(*this).EmitScalarConversion(Src, SrcTy, DstTy, Loc);
4807 }
4808 
4809 /// Emit a conversion from the specified complex type to the specified
4810 /// destination type, where the destination type is an LLVM scalar type.
4811 Value *CodeGenFunction::EmitComplexToScalarConversion(ComplexPairTy Src,
4812                                                       QualType SrcTy,
4813                                                       QualType DstTy,
4814                                                       SourceLocation Loc) {
4815   assert(SrcTy->isAnyComplexType() && hasScalarEvaluationKind(DstTy) &&
4816          "Invalid complex -> scalar conversion");
4817   return ScalarExprEmitter(*this)
4818       .EmitComplexToScalarConversion(Src, SrcTy, DstTy, Loc);
4819 }
4820 
4821 
4822 llvm::Value *CodeGenFunction::
4823 EmitScalarPrePostIncDec(const UnaryOperator *E, LValue LV,
4824                         bool isInc, bool isPre) {
4825   return ScalarExprEmitter(*this).EmitScalarPrePostIncDec(E, LV, isInc, isPre);
4826 }
4827 
4828 LValue CodeGenFunction::EmitObjCIsaExpr(const ObjCIsaExpr *E) {
4829   // object->isa or (*object).isa
4830   // Generate code as for: *(Class*)object
4831 
4832   Expr *BaseExpr = E->getBase();
4833   Address Addr = Address::invalid();
4834   if (BaseExpr->isRValue()) {
4835     Addr = Address(EmitScalarExpr(BaseExpr), getPointerAlign());
4836   } else {
4837     Addr = EmitLValue(BaseExpr).getAddress(*this);
4838   }
4839 
4840   // Cast the address to Class*.
4841   Addr = Builder.CreateElementBitCast(Addr, ConvertType(E->getType()));
4842   return MakeAddrLValue(Addr, E->getType());
4843 }
4844 
4845 
4846 LValue CodeGenFunction::EmitCompoundAssignmentLValue(
4847                                             const CompoundAssignOperator *E) {
4848   ScalarExprEmitter Scalar(*this);
4849   Value *Result = nullptr;
4850   switch (E->getOpcode()) {
4851 #define COMPOUND_OP(Op)                                                       \
4852     case BO_##Op##Assign:                                                     \
4853       return Scalar.EmitCompoundAssignLValue(E, &ScalarExprEmitter::Emit##Op, \
4854                                              Result)
4855   COMPOUND_OP(Mul);
4856   COMPOUND_OP(Div);
4857   COMPOUND_OP(Rem);
4858   COMPOUND_OP(Add);
4859   COMPOUND_OP(Sub);
4860   COMPOUND_OP(Shl);
4861   COMPOUND_OP(Shr);
4862   COMPOUND_OP(And);
4863   COMPOUND_OP(Xor);
4864   COMPOUND_OP(Or);
4865 #undef COMPOUND_OP
4866 
4867   case BO_PtrMemD:
4868   case BO_PtrMemI:
4869   case BO_Mul:
4870   case BO_Div:
4871   case BO_Rem:
4872   case BO_Add:
4873   case BO_Sub:
4874   case BO_Shl:
4875   case BO_Shr:
4876   case BO_LT:
4877   case BO_GT:
4878   case BO_LE:
4879   case BO_GE:
4880   case BO_EQ:
4881   case BO_NE:
4882   case BO_Cmp:
4883   case BO_And:
4884   case BO_Xor:
4885   case BO_Or:
4886   case BO_LAnd:
4887   case BO_LOr:
4888   case BO_Assign:
4889   case BO_Comma:
4890     llvm_unreachable("Not valid compound assignment operators");
4891   }
4892 
4893   llvm_unreachable("Unhandled compound assignment operator");
4894 }
4895 
4896 struct GEPOffsetAndOverflow {
4897   // The total (signed) byte offset for the GEP.
4898   llvm::Value *TotalOffset;
4899   // The offset overflow flag - true if the total offset overflows.
4900   llvm::Value *OffsetOverflows;
4901 };
4902 
4903 /// Evaluate given GEPVal, which is either an inbounds GEP, or a constant,
4904 /// and compute the total offset it applies from it's base pointer BasePtr.
4905 /// Returns offset in bytes and a boolean flag whether an overflow happened
4906 /// during evaluation.
4907 static GEPOffsetAndOverflow EmitGEPOffsetInBytes(Value *BasePtr, Value *GEPVal,
4908                                                  llvm::LLVMContext &VMContext,
4909                                                  CodeGenModule &CGM,
4910                                                  CGBuilderTy &Builder) {
4911   const auto &DL = CGM.getDataLayout();
4912 
4913   // The total (signed) byte offset for the GEP.
4914   llvm::Value *TotalOffset = nullptr;
4915 
4916   // Was the GEP already reduced to a constant?
4917   if (isa<llvm::Constant>(GEPVal)) {
4918     // Compute the offset by casting both pointers to integers and subtracting:
4919     // GEPVal = BasePtr + ptr(Offset) <--> Offset = int(GEPVal) - int(BasePtr)
4920     Value *BasePtr_int =
4921         Builder.CreatePtrToInt(BasePtr, DL.getIntPtrType(BasePtr->getType()));
4922     Value *GEPVal_int =
4923         Builder.CreatePtrToInt(GEPVal, DL.getIntPtrType(GEPVal->getType()));
4924     TotalOffset = Builder.CreateSub(GEPVal_int, BasePtr_int);
4925     return {TotalOffset, /*OffsetOverflows=*/Builder.getFalse()};
4926   }
4927 
4928   auto *GEP = cast<llvm::GEPOperator>(GEPVal);
4929   assert(GEP->getPointerOperand() == BasePtr &&
4930          "BasePtr must be the the base of the GEP.");
4931   assert(GEP->isInBounds() && "Expected inbounds GEP");
4932 
4933   auto *IntPtrTy = DL.getIntPtrType(GEP->getPointerOperandType());
4934 
4935   // Grab references to the signed add/mul overflow intrinsics for intptr_t.
4936   auto *Zero = llvm::ConstantInt::getNullValue(IntPtrTy);
4937   auto *SAddIntrinsic =
4938       CGM.getIntrinsic(llvm::Intrinsic::sadd_with_overflow, IntPtrTy);
4939   auto *SMulIntrinsic =
4940       CGM.getIntrinsic(llvm::Intrinsic::smul_with_overflow, IntPtrTy);
4941 
4942   // The offset overflow flag - true if the total offset overflows.
4943   llvm::Value *OffsetOverflows = Builder.getFalse();
4944 
4945   /// Return the result of the given binary operation.
4946   auto eval = [&](BinaryOperator::Opcode Opcode, llvm::Value *LHS,
4947                   llvm::Value *RHS) -> llvm::Value * {
4948     assert((Opcode == BO_Add || Opcode == BO_Mul) && "Can't eval binop");
4949 
4950     // If the operands are constants, return a constant result.
4951     if (auto *LHSCI = dyn_cast<llvm::ConstantInt>(LHS)) {
4952       if (auto *RHSCI = dyn_cast<llvm::ConstantInt>(RHS)) {
4953         llvm::APInt N;
4954         bool HasOverflow = mayHaveIntegerOverflow(LHSCI, RHSCI, Opcode,
4955                                                   /*Signed=*/true, N);
4956         if (HasOverflow)
4957           OffsetOverflows = Builder.getTrue();
4958         return llvm::ConstantInt::get(VMContext, N);
4959       }
4960     }
4961 
4962     // Otherwise, compute the result with checked arithmetic.
4963     auto *ResultAndOverflow = Builder.CreateCall(
4964         (Opcode == BO_Add) ? SAddIntrinsic : SMulIntrinsic, {LHS, RHS});
4965     OffsetOverflows = Builder.CreateOr(
4966         Builder.CreateExtractValue(ResultAndOverflow, 1), OffsetOverflows);
4967     return Builder.CreateExtractValue(ResultAndOverflow, 0);
4968   };
4969 
4970   // Determine the total byte offset by looking at each GEP operand.
4971   for (auto GTI = llvm::gep_type_begin(GEP), GTE = llvm::gep_type_end(GEP);
4972        GTI != GTE; ++GTI) {
4973     llvm::Value *LocalOffset;
4974     auto *Index = GTI.getOperand();
4975     // Compute the local offset contributed by this indexing step:
4976     if (auto *STy = GTI.getStructTypeOrNull()) {
4977       // For struct indexing, the local offset is the byte position of the
4978       // specified field.
4979       unsigned FieldNo = cast<llvm::ConstantInt>(Index)->getZExtValue();
4980       LocalOffset = llvm::ConstantInt::get(
4981           IntPtrTy, DL.getStructLayout(STy)->getElementOffset(FieldNo));
4982     } else {
4983       // Otherwise this is array-like indexing. The local offset is the index
4984       // multiplied by the element size.
4985       auto *ElementSize = llvm::ConstantInt::get(
4986           IntPtrTy, DL.getTypeAllocSize(GTI.getIndexedType()));
4987       auto *IndexS = Builder.CreateIntCast(Index, IntPtrTy, /*isSigned=*/true);
4988       LocalOffset = eval(BO_Mul, ElementSize, IndexS);
4989     }
4990 
4991     // If this is the first offset, set it as the total offset. Otherwise, add
4992     // the local offset into the running total.
4993     if (!TotalOffset || TotalOffset == Zero)
4994       TotalOffset = LocalOffset;
4995     else
4996       TotalOffset = eval(BO_Add, TotalOffset, LocalOffset);
4997   }
4998 
4999   return {TotalOffset, OffsetOverflows};
5000 }
5001 
5002 Value *
5003 CodeGenFunction::EmitCheckedInBoundsGEP(Value *Ptr, ArrayRef<Value *> IdxList,
5004                                         bool SignedIndices, bool IsSubtraction,
5005                                         SourceLocation Loc, const Twine &Name) {
5006   Value *GEPVal = Builder.CreateInBoundsGEP(Ptr, IdxList, Name);
5007 
5008   // If the pointer overflow sanitizer isn't enabled, do nothing.
5009   if (!SanOpts.has(SanitizerKind::PointerOverflow))
5010     return GEPVal;
5011 
5012   llvm::Type *PtrTy = Ptr->getType();
5013 
5014   // Perform nullptr-and-offset check unless the nullptr is defined.
5015   bool PerformNullCheck = !NullPointerIsDefined(
5016       Builder.GetInsertBlock()->getParent(), PtrTy->getPointerAddressSpace());
5017   // Check for overflows unless the GEP got constant-folded,
5018   // and only in the default address space
5019   bool PerformOverflowCheck =
5020       !isa<llvm::Constant>(GEPVal) && PtrTy->getPointerAddressSpace() == 0;
5021 
5022   if (!(PerformNullCheck || PerformOverflowCheck))
5023     return GEPVal;
5024 
5025   const auto &DL = CGM.getDataLayout();
5026 
5027   SanitizerScope SanScope(this);
5028   llvm::Type *IntPtrTy = DL.getIntPtrType(PtrTy);
5029 
5030   GEPOffsetAndOverflow EvaluatedGEP =
5031       EmitGEPOffsetInBytes(Ptr, GEPVal, getLLVMContext(), CGM, Builder);
5032 
5033   assert((!isa<llvm::Constant>(EvaluatedGEP.TotalOffset) ||
5034           EvaluatedGEP.OffsetOverflows == Builder.getFalse()) &&
5035          "If the offset got constant-folded, we don't expect that there was an "
5036          "overflow.");
5037 
5038   auto *Zero = llvm::ConstantInt::getNullValue(IntPtrTy);
5039 
5040   // Common case: if the total offset is zero, and we are using C++ semantics,
5041   // where nullptr+0 is defined, don't emit a check.
5042   if (EvaluatedGEP.TotalOffset == Zero && CGM.getLangOpts().CPlusPlus)
5043     return GEPVal;
5044 
5045   // Now that we've computed the total offset, add it to the base pointer (with
5046   // wrapping semantics).
5047   auto *IntPtr = Builder.CreatePtrToInt(Ptr, IntPtrTy);
5048   auto *ComputedGEP = Builder.CreateAdd(IntPtr, EvaluatedGEP.TotalOffset);
5049 
5050   llvm::SmallVector<std::pair<llvm::Value *, SanitizerMask>, 2> Checks;
5051 
5052   if (PerformNullCheck) {
5053     // In C++, if the base pointer evaluates to a null pointer value,
5054     // the only valid  pointer this inbounds GEP can produce is also
5055     // a null pointer, so the offset must also evaluate to zero.
5056     // Likewise, if we have non-zero base pointer, we can not get null pointer
5057     // as a result, so the offset can not be -intptr_t(BasePtr).
5058     // In other words, both pointers are either null, or both are non-null,
5059     // or the behaviour is undefined.
5060     //
5061     // C, however, is more strict in this regard, and gives more
5062     // optimization opportunities: in C, additionally, nullptr+0 is undefined.
5063     // So both the input to the 'gep inbounds' AND the output must not be null.
5064     auto *BaseIsNotNullptr = Builder.CreateIsNotNull(Ptr);
5065     auto *ResultIsNotNullptr = Builder.CreateIsNotNull(ComputedGEP);
5066     auto *Valid =
5067         CGM.getLangOpts().CPlusPlus
5068             ? Builder.CreateICmpEQ(BaseIsNotNullptr, ResultIsNotNullptr)
5069             : Builder.CreateAnd(BaseIsNotNullptr, ResultIsNotNullptr);
5070     Checks.emplace_back(Valid, SanitizerKind::PointerOverflow);
5071   }
5072 
5073   if (PerformOverflowCheck) {
5074     // The GEP is valid if:
5075     // 1) The total offset doesn't overflow, and
5076     // 2) The sign of the difference between the computed address and the base
5077     // pointer matches the sign of the total offset.
5078     llvm::Value *ValidGEP;
5079     auto *NoOffsetOverflow = Builder.CreateNot(EvaluatedGEP.OffsetOverflows);
5080     if (SignedIndices) {
5081       // GEP is computed as `unsigned base + signed offset`, therefore:
5082       // * If offset was positive, then the computed pointer can not be
5083       //   [unsigned] less than the base pointer, unless it overflowed.
5084       // * If offset was negative, then the computed pointer can not be
5085       //   [unsigned] greater than the bas pointere, unless it overflowed.
5086       auto *PosOrZeroValid = Builder.CreateICmpUGE(ComputedGEP, IntPtr);
5087       auto *PosOrZeroOffset =
5088           Builder.CreateICmpSGE(EvaluatedGEP.TotalOffset, Zero);
5089       llvm::Value *NegValid = Builder.CreateICmpULT(ComputedGEP, IntPtr);
5090       ValidGEP =
5091           Builder.CreateSelect(PosOrZeroOffset, PosOrZeroValid, NegValid);
5092     } else if (!IsSubtraction) {
5093       // GEP is computed as `unsigned base + unsigned offset`,  therefore the
5094       // computed pointer can not be [unsigned] less than base pointer,
5095       // unless there was an overflow.
5096       // Equivalent to `@llvm.uadd.with.overflow(%base, %offset)`.
5097       ValidGEP = Builder.CreateICmpUGE(ComputedGEP, IntPtr);
5098     } else {
5099       // GEP is computed as `unsigned base - unsigned offset`, therefore the
5100       // computed pointer can not be [unsigned] greater than base pointer,
5101       // unless there was an overflow.
5102       // Equivalent to `@llvm.usub.with.overflow(%base, sub(0, %offset))`.
5103       ValidGEP = Builder.CreateICmpULE(ComputedGEP, IntPtr);
5104     }
5105     ValidGEP = Builder.CreateAnd(ValidGEP, NoOffsetOverflow);
5106     Checks.emplace_back(ValidGEP, SanitizerKind::PointerOverflow);
5107   }
5108 
5109   assert(!Checks.empty() && "Should have produced some checks.");
5110 
5111   llvm::Constant *StaticArgs[] = {EmitCheckSourceLocation(Loc)};
5112   // Pass the computed GEP to the runtime to avoid emitting poisoned arguments.
5113   llvm::Value *DynamicArgs[] = {IntPtr, ComputedGEP};
5114   EmitCheck(Checks, SanitizerHandler::PointerOverflow, StaticArgs, DynamicArgs);
5115 
5116   return GEPVal;
5117 }
5118