1 //===- InstructionCombining.cpp - Combine multiple instructions -----------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // InstructionCombining - Combine instructions to form fewer, simple
10 // instructions.  This pass does not modify the CFG.  This pass is where
11 // algebraic simplification happens.
12 //
13 // This pass combines things like:
14 //    %Y = add i32 %X, 1
15 //    %Z = add i32 %Y, 1
16 // into:
17 //    %Z = add i32 %X, 2
18 //
19 // This is a simple worklist driven algorithm.
20 //
21 // This pass guarantees that the following canonicalizations are performed on
22 // the program:
23 //    1. If a binary operator has a constant operand, it is moved to the RHS
24 //    2. Bitwise operators with constant operands are always grouped so that
25 //       shifts are performed first, then or's, then and's, then xor's.
26 //    3. Compare instructions are converted from <,>,<=,>= to ==,!= if possible
27 //    4. All cmp instructions on boolean values are replaced with logical ops
28 //    5. add X, X is represented as (X*2) => (X << 1)
29 //    6. Multiplies with a power-of-two constant argument are transformed into
30 //       shifts.
31 //   ... etc.
32 //
33 //===----------------------------------------------------------------------===//
34 
35 #include "InstCombineInternal.h"
36 #include "llvm-c/Initialization.h"
37 #include "llvm-c/Transforms/InstCombine.h"
38 #include "llvm/ADT/APInt.h"
39 #include "llvm/ADT/ArrayRef.h"
40 #include "llvm/ADT/DenseMap.h"
41 #include "llvm/ADT/None.h"
42 #include "llvm/ADT/SmallPtrSet.h"
43 #include "llvm/ADT/SmallVector.h"
44 #include "llvm/ADT/Statistic.h"
45 #include "llvm/ADT/TinyPtrVector.h"
46 #include "llvm/Analysis/AliasAnalysis.h"
47 #include "llvm/Analysis/AssumptionCache.h"
48 #include "llvm/Analysis/BasicAliasAnalysis.h"
49 #include "llvm/Analysis/BlockFrequencyInfo.h"
50 #include "llvm/Analysis/CFG.h"
51 #include "llvm/Analysis/ConstantFolding.h"
52 #include "llvm/Analysis/EHPersonalities.h"
53 #include "llvm/Analysis/GlobalsModRef.h"
54 #include "llvm/Analysis/InstructionSimplify.h"
55 #include "llvm/Analysis/LazyBlockFrequencyInfo.h"
56 #include "llvm/Analysis/LoopInfo.h"
57 #include "llvm/Analysis/MemoryBuiltins.h"
58 #include "llvm/Analysis/OptimizationRemarkEmitter.h"
59 #include "llvm/Analysis/ProfileSummaryInfo.h"
60 #include "llvm/Analysis/TargetFolder.h"
61 #include "llvm/Analysis/TargetLibraryInfo.h"
62 #include "llvm/Analysis/TargetTransformInfo.h"
63 #include "llvm/Analysis/ValueTracking.h"
64 #include "llvm/Analysis/VectorUtils.h"
65 #include "llvm/IR/BasicBlock.h"
66 #include "llvm/IR/CFG.h"
67 #include "llvm/IR/Constant.h"
68 #include "llvm/IR/Constants.h"
69 #include "llvm/IR/DIBuilder.h"
70 #include "llvm/IR/DataLayout.h"
71 #include "llvm/IR/DebugInfo.h"
72 #include "llvm/IR/DerivedTypes.h"
73 #include "llvm/IR/Dominators.h"
74 #include "llvm/IR/Function.h"
75 #include "llvm/IR/GetElementPtrTypeIterator.h"
76 #include "llvm/IR/IRBuilder.h"
77 #include "llvm/IR/InstrTypes.h"
78 #include "llvm/IR/Instruction.h"
79 #include "llvm/IR/Instructions.h"
80 #include "llvm/IR/IntrinsicInst.h"
81 #include "llvm/IR/Intrinsics.h"
82 #include "llvm/IR/LegacyPassManager.h"
83 #include "llvm/IR/Metadata.h"
84 #include "llvm/IR/Operator.h"
85 #include "llvm/IR/PassManager.h"
86 #include "llvm/IR/PatternMatch.h"
87 #include "llvm/IR/Type.h"
88 #include "llvm/IR/Use.h"
89 #include "llvm/IR/User.h"
90 #include "llvm/IR/Value.h"
91 #include "llvm/IR/ValueHandle.h"
92 #include "llvm/InitializePasses.h"
93 #include "llvm/Pass.h"
94 #include "llvm/Support/CBindingWrapping.h"
95 #include "llvm/Support/Casting.h"
96 #include "llvm/Support/CommandLine.h"
97 #include "llvm/Support/Compiler.h"
98 #include "llvm/Support/Debug.h"
99 #include "llvm/Support/DebugCounter.h"
100 #include "llvm/Support/ErrorHandling.h"
101 #include "llvm/Support/KnownBits.h"
102 #include "llvm/Support/raw_ostream.h"
103 #include "llvm/Transforms/InstCombine/InstCombine.h"
104 #include "llvm/Transforms/Utils/Local.h"
105 #include <algorithm>
106 #include <cassert>
107 #include <cstdint>
108 #include <memory>
109 #include <string>
110 #include <utility>
111 
112 #define DEBUG_TYPE "instcombine"
113 #include "llvm/Transforms/Utils/InstructionWorklist.h"
114 
115 using namespace llvm;
116 using namespace llvm::PatternMatch;
117 
118 STATISTIC(NumWorklistIterations,
119           "Number of instruction combining iterations performed");
120 
121 STATISTIC(NumCombined , "Number of insts combined");
122 STATISTIC(NumConstProp, "Number of constant folds");
123 STATISTIC(NumDeadInst , "Number of dead inst eliminated");
124 STATISTIC(NumSunkInst , "Number of instructions sunk");
125 STATISTIC(NumExpand,    "Number of expansions");
126 STATISTIC(NumFactor   , "Number of factorizations");
127 STATISTIC(NumReassoc  , "Number of reassociations");
128 DEBUG_COUNTER(VisitCounter, "instcombine-visit",
129               "Controls which instructions are visited");
130 
131 // FIXME: these limits eventually should be as low as 2.
132 static constexpr unsigned InstCombineDefaultMaxIterations = 1000;
133 #ifndef NDEBUG
134 static constexpr unsigned InstCombineDefaultInfiniteLoopThreshold = 100;
135 #else
136 static constexpr unsigned InstCombineDefaultInfiniteLoopThreshold = 1000;
137 #endif
138 
139 static cl::opt<bool>
140 EnableCodeSinking("instcombine-code-sinking", cl::desc("Enable code sinking"),
141                                               cl::init(true));
142 
143 static cl::opt<unsigned> LimitMaxIterations(
144     "instcombine-max-iterations",
145     cl::desc("Limit the maximum number of instruction combining iterations"),
146     cl::init(InstCombineDefaultMaxIterations));
147 
148 static cl::opt<unsigned> InfiniteLoopDetectionThreshold(
149     "instcombine-infinite-loop-threshold",
150     cl::desc("Number of instruction combining iterations considered an "
151              "infinite loop"),
152     cl::init(InstCombineDefaultInfiniteLoopThreshold), cl::Hidden);
153 
154 static cl::opt<unsigned>
155 MaxArraySize("instcombine-maxarray-size", cl::init(1024),
156              cl::desc("Maximum array size considered when doing a combine"));
157 
158 // FIXME: Remove this flag when it is no longer necessary to convert
159 // llvm.dbg.declare to avoid inaccurate debug info. Setting this to false
160 // increases variable availability at the cost of accuracy. Variables that
161 // cannot be promoted by mem2reg or SROA will be described as living in memory
162 // for their entire lifetime. However, passes like DSE and instcombine can
163 // delete stores to the alloca, leading to misleading and inaccurate debug
164 // information. This flag can be removed when those passes are fixed.
165 static cl::opt<unsigned> ShouldLowerDbgDeclare("instcombine-lower-dbg-declare",
166                                                cl::Hidden, cl::init(true));
167 
168 Optional<Instruction *>
169 InstCombiner::targetInstCombineIntrinsic(IntrinsicInst &II) {
170   // Handle target specific intrinsics
171   if (II.getCalledFunction()->isTargetIntrinsic()) {
172     return TTI.instCombineIntrinsic(*this, II);
173   }
174   return None;
175 }
176 
177 Optional<Value *> InstCombiner::targetSimplifyDemandedUseBitsIntrinsic(
178     IntrinsicInst &II, APInt DemandedMask, KnownBits &Known,
179     bool &KnownBitsComputed) {
180   // Handle target specific intrinsics
181   if (II.getCalledFunction()->isTargetIntrinsic()) {
182     return TTI.simplifyDemandedUseBitsIntrinsic(*this, II, DemandedMask, Known,
183                                                 KnownBitsComputed);
184   }
185   return None;
186 }
187 
188 Optional<Value *> InstCombiner::targetSimplifyDemandedVectorEltsIntrinsic(
189     IntrinsicInst &II, APInt DemandedElts, APInt &UndefElts, APInt &UndefElts2,
190     APInt &UndefElts3,
191     std::function<void(Instruction *, unsigned, APInt, APInt &)>
192         SimplifyAndSetOp) {
193   // Handle target specific intrinsics
194   if (II.getCalledFunction()->isTargetIntrinsic()) {
195     return TTI.simplifyDemandedVectorEltsIntrinsic(
196         *this, II, DemandedElts, UndefElts, UndefElts2, UndefElts3,
197         SimplifyAndSetOp);
198   }
199   return None;
200 }
201 
202 Value *InstCombinerImpl::EmitGEPOffset(User *GEP) {
203   return llvm::EmitGEPOffset(&Builder, DL, GEP);
204 }
205 
206 /// Legal integers and common types are considered desirable. This is used to
207 /// avoid creating instructions with types that may not be supported well by the
208 /// the backend.
209 /// NOTE: This treats i8, i16 and i32 specially because they are common
210 ///       types in frontend languages.
211 bool InstCombinerImpl::isDesirableIntType(unsigned BitWidth) const {
212   switch (BitWidth) {
213   case 8:
214   case 16:
215   case 32:
216     return true;
217   default:
218     return DL.isLegalInteger(BitWidth);
219   }
220 }
221 
222 /// Return true if it is desirable to convert an integer computation from a
223 /// given bit width to a new bit width.
224 /// We don't want to convert from a legal to an illegal type or from a smaller
225 /// to a larger illegal type. A width of '1' is always treated as a desirable
226 /// type because i1 is a fundamental type in IR, and there are many specialized
227 /// optimizations for i1 types. Common/desirable widths are equally treated as
228 /// legal to convert to, in order to open up more combining opportunities.
229 bool InstCombinerImpl::shouldChangeType(unsigned FromWidth,
230                                         unsigned ToWidth) const {
231   bool FromLegal = FromWidth == 1 || DL.isLegalInteger(FromWidth);
232   bool ToLegal = ToWidth == 1 || DL.isLegalInteger(ToWidth);
233 
234   // Convert to desirable widths even if they are not legal types.
235   // Only shrink types, to prevent infinite loops.
236   if (ToWidth < FromWidth && isDesirableIntType(ToWidth))
237     return true;
238 
239   // If this is a legal integer from type, and the result would be an illegal
240   // type, don't do the transformation.
241   if (FromLegal && !ToLegal)
242     return false;
243 
244   // Otherwise, if both are illegal, do not increase the size of the result. We
245   // do allow things like i160 -> i64, but not i64 -> i160.
246   if (!FromLegal && !ToLegal && ToWidth > FromWidth)
247     return false;
248 
249   return true;
250 }
251 
252 /// Return true if it is desirable to convert a computation from 'From' to 'To'.
253 /// We don't want to convert from a legal to an illegal type or from a smaller
254 /// to a larger illegal type. i1 is always treated as a legal type because it is
255 /// a fundamental type in IR, and there are many specialized optimizations for
256 /// i1 types.
257 bool InstCombinerImpl::shouldChangeType(Type *From, Type *To) const {
258   // TODO: This could be extended to allow vectors. Datalayout changes might be
259   // needed to properly support that.
260   if (!From->isIntegerTy() || !To->isIntegerTy())
261     return false;
262 
263   unsigned FromWidth = From->getPrimitiveSizeInBits();
264   unsigned ToWidth = To->getPrimitiveSizeInBits();
265   return shouldChangeType(FromWidth, ToWidth);
266 }
267 
268 // Return true, if No Signed Wrap should be maintained for I.
269 // The No Signed Wrap flag can be kept if the operation "B (I.getOpcode) C",
270 // where both B and C should be ConstantInts, results in a constant that does
271 // not overflow. This function only handles the Add and Sub opcodes. For
272 // all other opcodes, the function conservatively returns false.
273 static bool maintainNoSignedWrap(BinaryOperator &I, Value *B, Value *C) {
274   auto *OBO = dyn_cast<OverflowingBinaryOperator>(&I);
275   if (!OBO || !OBO->hasNoSignedWrap())
276     return false;
277 
278   // We reason about Add and Sub Only.
279   Instruction::BinaryOps Opcode = I.getOpcode();
280   if (Opcode != Instruction::Add && Opcode != Instruction::Sub)
281     return false;
282 
283   const APInt *BVal, *CVal;
284   if (!match(B, m_APInt(BVal)) || !match(C, m_APInt(CVal)))
285     return false;
286 
287   bool Overflow = false;
288   if (Opcode == Instruction::Add)
289     (void)BVal->sadd_ov(*CVal, Overflow);
290   else
291     (void)BVal->ssub_ov(*CVal, Overflow);
292 
293   return !Overflow;
294 }
295 
296 static bool hasNoUnsignedWrap(BinaryOperator &I) {
297   auto *OBO = dyn_cast<OverflowingBinaryOperator>(&I);
298   return OBO && OBO->hasNoUnsignedWrap();
299 }
300 
301 static bool hasNoSignedWrap(BinaryOperator &I) {
302   auto *OBO = dyn_cast<OverflowingBinaryOperator>(&I);
303   return OBO && OBO->hasNoSignedWrap();
304 }
305 
306 /// Conservatively clears subclassOptionalData after a reassociation or
307 /// commutation. We preserve fast-math flags when applicable as they can be
308 /// preserved.
309 static void ClearSubclassDataAfterReassociation(BinaryOperator &I) {
310   FPMathOperator *FPMO = dyn_cast<FPMathOperator>(&I);
311   if (!FPMO) {
312     I.clearSubclassOptionalData();
313     return;
314   }
315 
316   FastMathFlags FMF = I.getFastMathFlags();
317   I.clearSubclassOptionalData();
318   I.setFastMathFlags(FMF);
319 }
320 
321 /// Combine constant operands of associative operations either before or after a
322 /// cast to eliminate one of the associative operations:
323 /// (op (cast (op X, C2)), C1) --> (cast (op X, op (C1, C2)))
324 /// (op (cast (op X, C2)), C1) --> (op (cast X), op (C1, C2))
325 static bool simplifyAssocCastAssoc(BinaryOperator *BinOp1,
326                                    InstCombinerImpl &IC) {
327   auto *Cast = dyn_cast<CastInst>(BinOp1->getOperand(0));
328   if (!Cast || !Cast->hasOneUse())
329     return false;
330 
331   // TODO: Enhance logic for other casts and remove this check.
332   auto CastOpcode = Cast->getOpcode();
333   if (CastOpcode != Instruction::ZExt)
334     return false;
335 
336   // TODO: Enhance logic for other BinOps and remove this check.
337   if (!BinOp1->isBitwiseLogicOp())
338     return false;
339 
340   auto AssocOpcode = BinOp1->getOpcode();
341   auto *BinOp2 = dyn_cast<BinaryOperator>(Cast->getOperand(0));
342   if (!BinOp2 || !BinOp2->hasOneUse() || BinOp2->getOpcode() != AssocOpcode)
343     return false;
344 
345   Constant *C1, *C2;
346   if (!match(BinOp1->getOperand(1), m_Constant(C1)) ||
347       !match(BinOp2->getOperand(1), m_Constant(C2)))
348     return false;
349 
350   // TODO: This assumes a zext cast.
351   // Eg, if it was a trunc, we'd cast C1 to the source type because casting C2
352   // to the destination type might lose bits.
353 
354   // Fold the constants together in the destination type:
355   // (op (cast (op X, C2)), C1) --> (op (cast X), FoldedC)
356   Type *DestTy = C1->getType();
357   Constant *CastC2 = ConstantExpr::getCast(CastOpcode, C2, DestTy);
358   Constant *FoldedC = ConstantExpr::get(AssocOpcode, C1, CastC2);
359   IC.replaceOperand(*Cast, 0, BinOp2->getOperand(0));
360   IC.replaceOperand(*BinOp1, 1, FoldedC);
361   return true;
362 }
363 
364 // Simplifies IntToPtr/PtrToInt RoundTrip Cast To BitCast.
365 // inttoptr ( ptrtoint (x) ) --> x
366 Value *InstCombinerImpl::simplifyIntToPtrRoundTripCast(Value *Val) {
367   auto *IntToPtr = dyn_cast<IntToPtrInst>(Val);
368   if (IntToPtr && DL.getPointerTypeSizeInBits(IntToPtr->getDestTy()) ==
369                       DL.getTypeSizeInBits(IntToPtr->getSrcTy())) {
370     auto *PtrToInt = dyn_cast<PtrToIntInst>(IntToPtr->getOperand(0));
371     Type *CastTy = IntToPtr->getDestTy();
372     if (PtrToInt &&
373         CastTy->getPointerAddressSpace() ==
374             PtrToInt->getSrcTy()->getPointerAddressSpace() &&
375         DL.getPointerTypeSizeInBits(PtrToInt->getSrcTy()) ==
376             DL.getTypeSizeInBits(PtrToInt->getDestTy())) {
377       return CastInst::CreateBitOrPointerCast(PtrToInt->getOperand(0), CastTy,
378                                               "", PtrToInt);
379     }
380   }
381   return nullptr;
382 }
383 
384 /// This performs a few simplifications for operators that are associative or
385 /// commutative:
386 ///
387 ///  Commutative operators:
388 ///
389 ///  1. Order operands such that they are listed from right (least complex) to
390 ///     left (most complex).  This puts constants before unary operators before
391 ///     binary operators.
392 ///
393 ///  Associative operators:
394 ///
395 ///  2. Transform: "(A op B) op C" ==> "A op (B op C)" if "B op C" simplifies.
396 ///  3. Transform: "A op (B op C)" ==> "(A op B) op C" if "A op B" simplifies.
397 ///
398 ///  Associative and commutative operators:
399 ///
400 ///  4. Transform: "(A op B) op C" ==> "(C op A) op B" if "C op A" simplifies.
401 ///  5. Transform: "A op (B op C)" ==> "B op (C op A)" if "C op A" simplifies.
402 ///  6. Transform: "(A op C1) op (B op C2)" ==> "(A op B) op (C1 op C2)"
403 ///     if C1 and C2 are constants.
404 bool InstCombinerImpl::SimplifyAssociativeOrCommutative(BinaryOperator &I) {
405   Instruction::BinaryOps Opcode = I.getOpcode();
406   bool Changed = false;
407 
408   do {
409     // Order operands such that they are listed from right (least complex) to
410     // left (most complex).  This puts constants before unary operators before
411     // binary operators.
412     if (I.isCommutative() && getComplexity(I.getOperand(0)) <
413         getComplexity(I.getOperand(1)))
414       Changed = !I.swapOperands();
415 
416     BinaryOperator *Op0 = dyn_cast<BinaryOperator>(I.getOperand(0));
417     BinaryOperator *Op1 = dyn_cast<BinaryOperator>(I.getOperand(1));
418 
419     if (I.isAssociative()) {
420       // Transform: "(A op B) op C" ==> "A op (B op C)" if "B op C" simplifies.
421       if (Op0 && Op0->getOpcode() == Opcode) {
422         Value *A = Op0->getOperand(0);
423         Value *B = Op0->getOperand(1);
424         Value *C = I.getOperand(1);
425 
426         // Does "B op C" simplify?
427         if (Value *V = SimplifyBinOp(Opcode, B, C, SQ.getWithInstruction(&I))) {
428           // It simplifies to V.  Form "A op V".
429           replaceOperand(I, 0, A);
430           replaceOperand(I, 1, V);
431           bool IsNUW = hasNoUnsignedWrap(I) && hasNoUnsignedWrap(*Op0);
432           bool IsNSW = maintainNoSignedWrap(I, B, C) && hasNoSignedWrap(*Op0);
433 
434           // Conservatively clear all optional flags since they may not be
435           // preserved by the reassociation. Reset nsw/nuw based on the above
436           // analysis.
437           ClearSubclassDataAfterReassociation(I);
438 
439           // Note: this is only valid because SimplifyBinOp doesn't look at
440           // the operands to Op0.
441           if (IsNUW)
442             I.setHasNoUnsignedWrap(true);
443 
444           if (IsNSW)
445             I.setHasNoSignedWrap(true);
446 
447           Changed = true;
448           ++NumReassoc;
449           continue;
450         }
451       }
452 
453       // Transform: "A op (B op C)" ==> "(A op B) op C" if "A op B" simplifies.
454       if (Op1 && Op1->getOpcode() == Opcode) {
455         Value *A = I.getOperand(0);
456         Value *B = Op1->getOperand(0);
457         Value *C = Op1->getOperand(1);
458 
459         // Does "A op B" simplify?
460         if (Value *V = SimplifyBinOp(Opcode, A, B, SQ.getWithInstruction(&I))) {
461           // It simplifies to V.  Form "V op C".
462           replaceOperand(I, 0, V);
463           replaceOperand(I, 1, C);
464           // Conservatively clear the optional flags, since they may not be
465           // preserved by the reassociation.
466           ClearSubclassDataAfterReassociation(I);
467           Changed = true;
468           ++NumReassoc;
469           continue;
470         }
471       }
472     }
473 
474     if (I.isAssociative() && I.isCommutative()) {
475       if (simplifyAssocCastAssoc(&I, *this)) {
476         Changed = true;
477         ++NumReassoc;
478         continue;
479       }
480 
481       // Transform: "(A op B) op C" ==> "(C op A) op B" if "C op A" simplifies.
482       if (Op0 && Op0->getOpcode() == Opcode) {
483         Value *A = Op0->getOperand(0);
484         Value *B = Op0->getOperand(1);
485         Value *C = I.getOperand(1);
486 
487         // Does "C op A" simplify?
488         if (Value *V = SimplifyBinOp(Opcode, C, A, SQ.getWithInstruction(&I))) {
489           // It simplifies to V.  Form "V op B".
490           replaceOperand(I, 0, V);
491           replaceOperand(I, 1, B);
492           // Conservatively clear the optional flags, since they may not be
493           // preserved by the reassociation.
494           ClearSubclassDataAfterReassociation(I);
495           Changed = true;
496           ++NumReassoc;
497           continue;
498         }
499       }
500 
501       // Transform: "A op (B op C)" ==> "B op (C op A)" if "C op A" simplifies.
502       if (Op1 && Op1->getOpcode() == Opcode) {
503         Value *A = I.getOperand(0);
504         Value *B = Op1->getOperand(0);
505         Value *C = Op1->getOperand(1);
506 
507         // Does "C op A" simplify?
508         if (Value *V = SimplifyBinOp(Opcode, C, A, SQ.getWithInstruction(&I))) {
509           // It simplifies to V.  Form "B op V".
510           replaceOperand(I, 0, B);
511           replaceOperand(I, 1, V);
512           // Conservatively clear the optional flags, since they may not be
513           // preserved by the reassociation.
514           ClearSubclassDataAfterReassociation(I);
515           Changed = true;
516           ++NumReassoc;
517           continue;
518         }
519       }
520 
521       // Transform: "(A op C1) op (B op C2)" ==> "(A op B) op (C1 op C2)"
522       // if C1 and C2 are constants.
523       Value *A, *B;
524       Constant *C1, *C2;
525       if (Op0 && Op1 &&
526           Op0->getOpcode() == Opcode && Op1->getOpcode() == Opcode &&
527           match(Op0, m_OneUse(m_BinOp(m_Value(A), m_Constant(C1)))) &&
528           match(Op1, m_OneUse(m_BinOp(m_Value(B), m_Constant(C2))))) {
529         bool IsNUW = hasNoUnsignedWrap(I) &&
530            hasNoUnsignedWrap(*Op0) &&
531            hasNoUnsignedWrap(*Op1);
532          BinaryOperator *NewBO = (IsNUW && Opcode == Instruction::Add) ?
533            BinaryOperator::CreateNUW(Opcode, A, B) :
534            BinaryOperator::Create(Opcode, A, B);
535 
536          if (isa<FPMathOperator>(NewBO)) {
537           FastMathFlags Flags = I.getFastMathFlags();
538           Flags &= Op0->getFastMathFlags();
539           Flags &= Op1->getFastMathFlags();
540           NewBO->setFastMathFlags(Flags);
541         }
542         InsertNewInstWith(NewBO, I);
543         NewBO->takeName(Op1);
544         replaceOperand(I, 0, NewBO);
545         replaceOperand(I, 1, ConstantExpr::get(Opcode, C1, C2));
546         // Conservatively clear the optional flags, since they may not be
547         // preserved by the reassociation.
548         ClearSubclassDataAfterReassociation(I);
549         if (IsNUW)
550           I.setHasNoUnsignedWrap(true);
551 
552         Changed = true;
553         continue;
554       }
555     }
556 
557     // No further simplifications.
558     return Changed;
559   } while (true);
560 }
561 
562 /// Return whether "X LOp (Y ROp Z)" is always equal to
563 /// "(X LOp Y) ROp (X LOp Z)".
564 static bool leftDistributesOverRight(Instruction::BinaryOps LOp,
565                                      Instruction::BinaryOps ROp) {
566   // X & (Y | Z) <--> (X & Y) | (X & Z)
567   // X & (Y ^ Z) <--> (X & Y) ^ (X & Z)
568   if (LOp == Instruction::And)
569     return ROp == Instruction::Or || ROp == Instruction::Xor;
570 
571   // X | (Y & Z) <--> (X | Y) & (X | Z)
572   if (LOp == Instruction::Or)
573     return ROp == Instruction::And;
574 
575   // X * (Y + Z) <--> (X * Y) + (X * Z)
576   // X * (Y - Z) <--> (X * Y) - (X * Z)
577   if (LOp == Instruction::Mul)
578     return ROp == Instruction::Add || ROp == Instruction::Sub;
579 
580   return false;
581 }
582 
583 /// Return whether "(X LOp Y) ROp Z" is always equal to
584 /// "(X ROp Z) LOp (Y ROp Z)".
585 static bool rightDistributesOverLeft(Instruction::BinaryOps LOp,
586                                      Instruction::BinaryOps ROp) {
587   if (Instruction::isCommutative(ROp))
588     return leftDistributesOverRight(ROp, LOp);
589 
590   // (X {&|^} Y) >> Z <--> (X >> Z) {&|^} (Y >> Z) for all shifts.
591   return Instruction::isBitwiseLogicOp(LOp) && Instruction::isShift(ROp);
592 
593   // TODO: It would be nice to handle division, aka "(X + Y)/Z = X/Z + Y/Z",
594   // but this requires knowing that the addition does not overflow and other
595   // such subtleties.
596 }
597 
598 /// This function returns identity value for given opcode, which can be used to
599 /// factor patterns like (X * 2) + X ==> (X * 2) + (X * 1) ==> X * (2 + 1).
600 static Value *getIdentityValue(Instruction::BinaryOps Opcode, Value *V) {
601   if (isa<Constant>(V))
602     return nullptr;
603 
604   return ConstantExpr::getBinOpIdentity(Opcode, V->getType());
605 }
606 
607 /// This function predicates factorization using distributive laws. By default,
608 /// it just returns the 'Op' inputs. But for special-cases like
609 /// 'add(shl(X, 5), ...)', this function will have TopOpcode == Instruction::Add
610 /// and Op = shl(X, 5). The 'shl' is treated as the more general 'mul X, 32' to
611 /// allow more factorization opportunities.
612 static Instruction::BinaryOps
613 getBinOpsForFactorization(Instruction::BinaryOps TopOpcode, BinaryOperator *Op,
614                           Value *&LHS, Value *&RHS) {
615   assert(Op && "Expected a binary operator");
616   LHS = Op->getOperand(0);
617   RHS = Op->getOperand(1);
618   if (TopOpcode == Instruction::Add || TopOpcode == Instruction::Sub) {
619     Constant *C;
620     if (match(Op, m_Shl(m_Value(), m_Constant(C)))) {
621       // X << C --> X * (1 << C)
622       RHS = ConstantExpr::getShl(ConstantInt::get(Op->getType(), 1), C);
623       return Instruction::Mul;
624     }
625     // TODO: We can add other conversions e.g. shr => div etc.
626   }
627   return Op->getOpcode();
628 }
629 
630 /// This tries to simplify binary operations by factorizing out common terms
631 /// (e. g. "(A*B)+(A*C)" -> "A*(B+C)").
632 Value *InstCombinerImpl::tryFactorization(BinaryOperator &I,
633                                           Instruction::BinaryOps InnerOpcode,
634                                           Value *A, Value *B, Value *C,
635                                           Value *D) {
636   assert(A && B && C && D && "All values must be provided");
637 
638   Value *V = nullptr;
639   Value *SimplifiedInst = nullptr;
640   Value *LHS = I.getOperand(0), *RHS = I.getOperand(1);
641   Instruction::BinaryOps TopLevelOpcode = I.getOpcode();
642 
643   // Does "X op' Y" always equal "Y op' X"?
644   bool InnerCommutative = Instruction::isCommutative(InnerOpcode);
645 
646   // Does "X op' (Y op Z)" always equal "(X op' Y) op (X op' Z)"?
647   if (leftDistributesOverRight(InnerOpcode, TopLevelOpcode))
648     // Does the instruction have the form "(A op' B) op (A op' D)" or, in the
649     // commutative case, "(A op' B) op (C op' A)"?
650     if (A == C || (InnerCommutative && A == D)) {
651       if (A != C)
652         std::swap(C, D);
653       // Consider forming "A op' (B op D)".
654       // If "B op D" simplifies then it can be formed with no cost.
655       V = SimplifyBinOp(TopLevelOpcode, B, D, SQ.getWithInstruction(&I));
656       // If "B op D" doesn't simplify then only go on if both of the existing
657       // operations "A op' B" and "C op' D" will be zapped as no longer used.
658       if (!V && LHS->hasOneUse() && RHS->hasOneUse())
659         V = Builder.CreateBinOp(TopLevelOpcode, B, D, RHS->getName());
660       if (V) {
661         SimplifiedInst = Builder.CreateBinOp(InnerOpcode, A, V);
662       }
663     }
664 
665   // Does "(X op Y) op' Z" always equal "(X op' Z) op (Y op' Z)"?
666   if (!SimplifiedInst && rightDistributesOverLeft(TopLevelOpcode, InnerOpcode))
667     // Does the instruction have the form "(A op' B) op (C op' B)" or, in the
668     // commutative case, "(A op' B) op (B op' D)"?
669     if (B == D || (InnerCommutative && B == C)) {
670       if (B != D)
671         std::swap(C, D);
672       // Consider forming "(A op C) op' B".
673       // If "A op C" simplifies then it can be formed with no cost.
674       V = SimplifyBinOp(TopLevelOpcode, A, C, SQ.getWithInstruction(&I));
675 
676       // If "A op C" doesn't simplify then only go on if both of the existing
677       // operations "A op' B" and "C op' D" will be zapped as no longer used.
678       if (!V && LHS->hasOneUse() && RHS->hasOneUse())
679         V = Builder.CreateBinOp(TopLevelOpcode, A, C, LHS->getName());
680       if (V) {
681         SimplifiedInst = Builder.CreateBinOp(InnerOpcode, V, B);
682       }
683     }
684 
685   if (SimplifiedInst) {
686     ++NumFactor;
687     SimplifiedInst->takeName(&I);
688 
689     // Check if we can add NSW/NUW flags to SimplifiedInst. If so, set them.
690     if (BinaryOperator *BO = dyn_cast<BinaryOperator>(SimplifiedInst)) {
691       if (isa<OverflowingBinaryOperator>(SimplifiedInst)) {
692         bool HasNSW = false;
693         bool HasNUW = false;
694         if (isa<OverflowingBinaryOperator>(&I)) {
695           HasNSW = I.hasNoSignedWrap();
696           HasNUW = I.hasNoUnsignedWrap();
697         }
698 
699         if (auto *LOBO = dyn_cast<OverflowingBinaryOperator>(LHS)) {
700           HasNSW &= LOBO->hasNoSignedWrap();
701           HasNUW &= LOBO->hasNoUnsignedWrap();
702         }
703 
704         if (auto *ROBO = dyn_cast<OverflowingBinaryOperator>(RHS)) {
705           HasNSW &= ROBO->hasNoSignedWrap();
706           HasNUW &= ROBO->hasNoUnsignedWrap();
707         }
708 
709         if (TopLevelOpcode == Instruction::Add &&
710             InnerOpcode == Instruction::Mul) {
711           // We can propagate 'nsw' if we know that
712           //  %Y = mul nsw i16 %X, C
713           //  %Z = add nsw i16 %Y, %X
714           // =>
715           //  %Z = mul nsw i16 %X, C+1
716           //
717           // iff C+1 isn't INT_MIN
718           const APInt *CInt;
719           if (match(V, m_APInt(CInt))) {
720             if (!CInt->isMinSignedValue())
721               BO->setHasNoSignedWrap(HasNSW);
722           }
723 
724           // nuw can be propagated with any constant or nuw value.
725           BO->setHasNoUnsignedWrap(HasNUW);
726         }
727       }
728     }
729   }
730   return SimplifiedInst;
731 }
732 
733 /// This tries to simplify binary operations which some other binary operation
734 /// distributes over either by factorizing out common terms
735 /// (eg "(A*B)+(A*C)" -> "A*(B+C)") or expanding out if this results in
736 /// simplifications (eg: "A & (B | C) -> (A&B) | (A&C)" if this is a win).
737 /// Returns the simplified value, or null if it didn't simplify.
738 Value *InstCombinerImpl::SimplifyUsingDistributiveLaws(BinaryOperator &I) {
739   Value *LHS = I.getOperand(0), *RHS = I.getOperand(1);
740   BinaryOperator *Op0 = dyn_cast<BinaryOperator>(LHS);
741   BinaryOperator *Op1 = dyn_cast<BinaryOperator>(RHS);
742   Instruction::BinaryOps TopLevelOpcode = I.getOpcode();
743 
744   {
745     // Factorization.
746     Value *A, *B, *C, *D;
747     Instruction::BinaryOps LHSOpcode, RHSOpcode;
748     if (Op0)
749       LHSOpcode = getBinOpsForFactorization(TopLevelOpcode, Op0, A, B);
750     if (Op1)
751       RHSOpcode = getBinOpsForFactorization(TopLevelOpcode, Op1, C, D);
752 
753     // The instruction has the form "(A op' B) op (C op' D)".  Try to factorize
754     // a common term.
755     if (Op0 && Op1 && LHSOpcode == RHSOpcode)
756       if (Value *V = tryFactorization(I, LHSOpcode, A, B, C, D))
757         return V;
758 
759     // The instruction has the form "(A op' B) op (C)".  Try to factorize common
760     // term.
761     if (Op0)
762       if (Value *Ident = getIdentityValue(LHSOpcode, RHS))
763         if (Value *V = tryFactorization(I, LHSOpcode, A, B, RHS, Ident))
764           return V;
765 
766     // The instruction has the form "(B) op (C op' D)".  Try to factorize common
767     // term.
768     if (Op1)
769       if (Value *Ident = getIdentityValue(RHSOpcode, LHS))
770         if (Value *V = tryFactorization(I, RHSOpcode, LHS, Ident, C, D))
771           return V;
772   }
773 
774   // Expansion.
775   if (Op0 && rightDistributesOverLeft(Op0->getOpcode(), TopLevelOpcode)) {
776     // The instruction has the form "(A op' B) op C".  See if expanding it out
777     // to "(A op C) op' (B op C)" results in simplifications.
778     Value *A = Op0->getOperand(0), *B = Op0->getOperand(1), *C = RHS;
779     Instruction::BinaryOps InnerOpcode = Op0->getOpcode(); // op'
780 
781     // Disable the use of undef because it's not safe to distribute undef.
782     auto SQDistributive = SQ.getWithInstruction(&I).getWithoutUndef();
783     Value *L = SimplifyBinOp(TopLevelOpcode, A, C, SQDistributive);
784     Value *R = SimplifyBinOp(TopLevelOpcode, B, C, SQDistributive);
785 
786     // Do "A op C" and "B op C" both simplify?
787     if (L && R) {
788       // They do! Return "L op' R".
789       ++NumExpand;
790       C = Builder.CreateBinOp(InnerOpcode, L, R);
791       C->takeName(&I);
792       return C;
793     }
794 
795     // Does "A op C" simplify to the identity value for the inner opcode?
796     if (L && L == ConstantExpr::getBinOpIdentity(InnerOpcode, L->getType())) {
797       // They do! Return "B op C".
798       ++NumExpand;
799       C = Builder.CreateBinOp(TopLevelOpcode, B, C);
800       C->takeName(&I);
801       return C;
802     }
803 
804     // Does "B op C" simplify to the identity value for the inner opcode?
805     if (R && R == ConstantExpr::getBinOpIdentity(InnerOpcode, R->getType())) {
806       // They do! Return "A op C".
807       ++NumExpand;
808       C = Builder.CreateBinOp(TopLevelOpcode, A, C);
809       C->takeName(&I);
810       return C;
811     }
812   }
813 
814   if (Op1 && leftDistributesOverRight(TopLevelOpcode, Op1->getOpcode())) {
815     // The instruction has the form "A op (B op' C)".  See if expanding it out
816     // to "(A op B) op' (A op C)" results in simplifications.
817     Value *A = LHS, *B = Op1->getOperand(0), *C = Op1->getOperand(1);
818     Instruction::BinaryOps InnerOpcode = Op1->getOpcode(); // op'
819 
820     // Disable the use of undef because it's not safe to distribute undef.
821     auto SQDistributive = SQ.getWithInstruction(&I).getWithoutUndef();
822     Value *L = SimplifyBinOp(TopLevelOpcode, A, B, SQDistributive);
823     Value *R = SimplifyBinOp(TopLevelOpcode, A, C, SQDistributive);
824 
825     // Do "A op B" and "A op C" both simplify?
826     if (L && R) {
827       // They do! Return "L op' R".
828       ++NumExpand;
829       A = Builder.CreateBinOp(InnerOpcode, L, R);
830       A->takeName(&I);
831       return A;
832     }
833 
834     // Does "A op B" simplify to the identity value for the inner opcode?
835     if (L && L == ConstantExpr::getBinOpIdentity(InnerOpcode, L->getType())) {
836       // They do! Return "A op C".
837       ++NumExpand;
838       A = Builder.CreateBinOp(TopLevelOpcode, A, C);
839       A->takeName(&I);
840       return A;
841     }
842 
843     // Does "A op C" simplify to the identity value for the inner opcode?
844     if (R && R == ConstantExpr::getBinOpIdentity(InnerOpcode, R->getType())) {
845       // They do! Return "A op B".
846       ++NumExpand;
847       A = Builder.CreateBinOp(TopLevelOpcode, A, B);
848       A->takeName(&I);
849       return A;
850     }
851   }
852 
853   return SimplifySelectsFeedingBinaryOp(I, LHS, RHS);
854 }
855 
856 Value *InstCombinerImpl::SimplifySelectsFeedingBinaryOp(BinaryOperator &I,
857                                                         Value *LHS,
858                                                         Value *RHS) {
859   Value *A, *B, *C, *D, *E, *F;
860   bool LHSIsSelect = match(LHS, m_Select(m_Value(A), m_Value(B), m_Value(C)));
861   bool RHSIsSelect = match(RHS, m_Select(m_Value(D), m_Value(E), m_Value(F)));
862   if (!LHSIsSelect && !RHSIsSelect)
863     return nullptr;
864 
865   FastMathFlags FMF;
866   BuilderTy::FastMathFlagGuard Guard(Builder);
867   if (isa<FPMathOperator>(&I)) {
868     FMF = I.getFastMathFlags();
869     Builder.setFastMathFlags(FMF);
870   }
871 
872   Instruction::BinaryOps Opcode = I.getOpcode();
873   SimplifyQuery Q = SQ.getWithInstruction(&I);
874 
875   Value *Cond, *True = nullptr, *False = nullptr;
876   if (LHSIsSelect && RHSIsSelect && A == D) {
877     // (A ? B : C) op (A ? E : F) -> A ? (B op E) : (C op F)
878     Cond = A;
879     True = SimplifyBinOp(Opcode, B, E, FMF, Q);
880     False = SimplifyBinOp(Opcode, C, F, FMF, Q);
881 
882     if (LHS->hasOneUse() && RHS->hasOneUse()) {
883       if (False && !True)
884         True = Builder.CreateBinOp(Opcode, B, E);
885       else if (True && !False)
886         False = Builder.CreateBinOp(Opcode, C, F);
887     }
888   } else if (LHSIsSelect && LHS->hasOneUse()) {
889     // (A ? B : C) op Y -> A ? (B op Y) : (C op Y)
890     Cond = A;
891     True = SimplifyBinOp(Opcode, B, RHS, FMF, Q);
892     False = SimplifyBinOp(Opcode, C, RHS, FMF, Q);
893   } else if (RHSIsSelect && RHS->hasOneUse()) {
894     // X op (D ? E : F) -> D ? (X op E) : (X op F)
895     Cond = D;
896     True = SimplifyBinOp(Opcode, LHS, E, FMF, Q);
897     False = SimplifyBinOp(Opcode, LHS, F, FMF, Q);
898   }
899 
900   if (!True || !False)
901     return nullptr;
902 
903   Value *SI = Builder.CreateSelect(Cond, True, False);
904   SI->takeName(&I);
905   return SI;
906 }
907 
908 /// Freely adapt every user of V as-if V was changed to !V.
909 /// WARNING: only if canFreelyInvertAllUsersOf() said this can be done.
910 void InstCombinerImpl::freelyInvertAllUsersOf(Value *I) {
911   for (User *U : I->users()) {
912     switch (cast<Instruction>(U)->getOpcode()) {
913     case Instruction::Select: {
914       auto *SI = cast<SelectInst>(U);
915       SI->swapValues();
916       SI->swapProfMetadata();
917       break;
918     }
919     case Instruction::Br:
920       cast<BranchInst>(U)->swapSuccessors(); // swaps prof metadata too
921       break;
922     case Instruction::Xor:
923       replaceInstUsesWith(cast<Instruction>(*U), I);
924       break;
925     default:
926       llvm_unreachable("Got unexpected user - out of sync with "
927                        "canFreelyInvertAllUsersOf() ?");
928     }
929   }
930 }
931 
932 /// Given a 'sub' instruction, return the RHS of the instruction if the LHS is a
933 /// constant zero (which is the 'negate' form).
934 Value *InstCombinerImpl::dyn_castNegVal(Value *V) const {
935   Value *NegV;
936   if (match(V, m_Neg(m_Value(NegV))))
937     return NegV;
938 
939   // Constants can be considered to be negated values if they can be folded.
940   if (ConstantInt *C = dyn_cast<ConstantInt>(V))
941     return ConstantExpr::getNeg(C);
942 
943   if (ConstantDataVector *C = dyn_cast<ConstantDataVector>(V))
944     if (C->getType()->getElementType()->isIntegerTy())
945       return ConstantExpr::getNeg(C);
946 
947   if (ConstantVector *CV = dyn_cast<ConstantVector>(V)) {
948     for (unsigned i = 0, e = CV->getNumOperands(); i != e; ++i) {
949       Constant *Elt = CV->getAggregateElement(i);
950       if (!Elt)
951         return nullptr;
952 
953       if (isa<UndefValue>(Elt))
954         continue;
955 
956       if (!isa<ConstantInt>(Elt))
957         return nullptr;
958     }
959     return ConstantExpr::getNeg(CV);
960   }
961 
962   // Negate integer vector splats.
963   if (auto *CV = dyn_cast<Constant>(V))
964     if (CV->getType()->isVectorTy() &&
965         CV->getType()->getScalarType()->isIntegerTy() && CV->getSplatValue())
966       return ConstantExpr::getNeg(CV);
967 
968   return nullptr;
969 }
970 
971 /// A binop with a constant operand and a sign-extended boolean operand may be
972 /// converted into a select of constants by applying the binary operation to
973 /// the constant with the two possible values of the extended boolean (0 or -1).
974 Instruction *InstCombinerImpl::foldBinopOfSextBoolToSelect(BinaryOperator &BO) {
975   // TODO: Handle non-commutative binop (constant is operand 0).
976   // TODO: Handle zext.
977   // TODO: Peek through 'not' of cast.
978   Value *BO0 = BO.getOperand(0);
979   Value *BO1 = BO.getOperand(1);
980   Value *X;
981   Constant *C;
982   if (!match(BO0, m_SExt(m_Value(X))) || !match(BO1, m_ImmConstant(C)) ||
983       !X->getType()->isIntOrIntVectorTy(1))
984     return nullptr;
985 
986   // bo (sext i1 X), C --> select X, (bo -1, C), (bo 0, C)
987   Constant *Ones = ConstantInt::getAllOnesValue(BO.getType());
988   Constant *Zero = ConstantInt::getNullValue(BO.getType());
989   Constant *TVal = ConstantExpr::get(BO.getOpcode(), Ones, C);
990   Constant *FVal = ConstantExpr::get(BO.getOpcode(), Zero, C);
991   return SelectInst::Create(X, TVal, FVal);
992 }
993 
994 static Value *foldOperationIntoSelectOperand(Instruction &I, Value *SO,
995                                              InstCombiner::BuilderTy &Builder) {
996   if (auto *Cast = dyn_cast<CastInst>(&I))
997     return Builder.CreateCast(Cast->getOpcode(), SO, I.getType());
998 
999   if (auto *II = dyn_cast<IntrinsicInst>(&I)) {
1000     assert(canConstantFoldCallTo(II, cast<Function>(II->getCalledOperand())) &&
1001            "Expected constant-foldable intrinsic");
1002     Intrinsic::ID IID = II->getIntrinsicID();
1003     if (II->arg_size() == 1)
1004       return Builder.CreateUnaryIntrinsic(IID, SO);
1005 
1006     // This works for real binary ops like min/max (where we always expect the
1007     // constant operand to be canonicalized as op1) and unary ops with a bonus
1008     // constant argument like ctlz/cttz.
1009     // TODO: Handle non-commutative binary intrinsics as below for binops.
1010     assert(II->arg_size() == 2 && "Expected binary intrinsic");
1011     assert(isa<Constant>(II->getArgOperand(1)) && "Expected constant operand");
1012     return Builder.CreateBinaryIntrinsic(IID, SO, II->getArgOperand(1));
1013   }
1014 
1015   assert(I.isBinaryOp() && "Unexpected opcode for select folding");
1016 
1017   // Figure out if the constant is the left or the right argument.
1018   bool ConstIsRHS = isa<Constant>(I.getOperand(1));
1019   Constant *ConstOperand = cast<Constant>(I.getOperand(ConstIsRHS));
1020 
1021   if (auto *SOC = dyn_cast<Constant>(SO)) {
1022     if (ConstIsRHS)
1023       return ConstantExpr::get(I.getOpcode(), SOC, ConstOperand);
1024     return ConstantExpr::get(I.getOpcode(), ConstOperand, SOC);
1025   }
1026 
1027   Value *Op0 = SO, *Op1 = ConstOperand;
1028   if (!ConstIsRHS)
1029     std::swap(Op0, Op1);
1030 
1031   Value *NewBO = Builder.CreateBinOp(cast<BinaryOperator>(&I)->getOpcode(), Op0,
1032                                      Op1, SO->getName() + ".op");
1033   if (auto *NewBOI = dyn_cast<Instruction>(NewBO))
1034     NewBOI->copyIRFlags(&I);
1035   return NewBO;
1036 }
1037 
1038 Instruction *InstCombinerImpl::FoldOpIntoSelect(Instruction &Op,
1039                                                 SelectInst *SI) {
1040   // Don't modify shared select instructions.
1041   if (!SI->hasOneUse())
1042     return nullptr;
1043 
1044   Value *TV = SI->getTrueValue();
1045   Value *FV = SI->getFalseValue();
1046   if (!(isa<Constant>(TV) || isa<Constant>(FV)))
1047     return nullptr;
1048 
1049   // Bool selects with constant operands can be folded to logical ops.
1050   if (SI->getType()->isIntOrIntVectorTy(1))
1051     return nullptr;
1052 
1053   // If it's a bitcast involving vectors, make sure it has the same number of
1054   // elements on both sides.
1055   if (auto *BC = dyn_cast<BitCastInst>(&Op)) {
1056     VectorType *DestTy = dyn_cast<VectorType>(BC->getDestTy());
1057     VectorType *SrcTy = dyn_cast<VectorType>(BC->getSrcTy());
1058 
1059     // Verify that either both or neither are vectors.
1060     if ((SrcTy == nullptr) != (DestTy == nullptr))
1061       return nullptr;
1062 
1063     // If vectors, verify that they have the same number of elements.
1064     if (SrcTy && SrcTy->getElementCount() != DestTy->getElementCount())
1065       return nullptr;
1066   }
1067 
1068   // Test if a CmpInst instruction is used exclusively by a select as
1069   // part of a minimum or maximum operation. If so, refrain from doing
1070   // any other folding. This helps out other analyses which understand
1071   // non-obfuscated minimum and maximum idioms, such as ScalarEvolution
1072   // and CodeGen. And in this case, at least one of the comparison
1073   // operands has at least one user besides the compare (the select),
1074   // which would often largely negate the benefit of folding anyway.
1075   if (auto *CI = dyn_cast<CmpInst>(SI->getCondition())) {
1076     if (CI->hasOneUse()) {
1077       Value *Op0 = CI->getOperand(0), *Op1 = CI->getOperand(1);
1078 
1079       // FIXME: This is a hack to avoid infinite looping with min/max patterns.
1080       //        We have to ensure that vector constants that only differ with
1081       //        undef elements are treated as equivalent.
1082       auto areLooselyEqual = [](Value *A, Value *B) {
1083         if (A == B)
1084           return true;
1085 
1086         // Test for vector constants.
1087         Constant *ConstA, *ConstB;
1088         if (!match(A, m_Constant(ConstA)) || !match(B, m_Constant(ConstB)))
1089           return false;
1090 
1091         // TODO: Deal with FP constants?
1092         if (!A->getType()->isIntOrIntVectorTy() || A->getType() != B->getType())
1093           return false;
1094 
1095         // Compare for equality including undefs as equal.
1096         auto *Cmp = ConstantExpr::getCompare(ICmpInst::ICMP_EQ, ConstA, ConstB);
1097         const APInt *C;
1098         return match(Cmp, m_APIntAllowUndef(C)) && C->isOne();
1099       };
1100 
1101       if ((areLooselyEqual(TV, Op0) && areLooselyEqual(FV, Op1)) ||
1102           (areLooselyEqual(FV, Op0) && areLooselyEqual(TV, Op1)))
1103         return nullptr;
1104     }
1105   }
1106 
1107   Value *NewTV = foldOperationIntoSelectOperand(Op, TV, Builder);
1108   Value *NewFV = foldOperationIntoSelectOperand(Op, FV, Builder);
1109   return SelectInst::Create(SI->getCondition(), NewTV, NewFV, "", nullptr, SI);
1110 }
1111 
1112 static Value *foldOperationIntoPhiValue(BinaryOperator *I, Value *InV,
1113                                         InstCombiner::BuilderTy &Builder) {
1114   bool ConstIsRHS = isa<Constant>(I->getOperand(1));
1115   Constant *C = cast<Constant>(I->getOperand(ConstIsRHS));
1116 
1117   if (auto *InC = dyn_cast<Constant>(InV)) {
1118     if (ConstIsRHS)
1119       return ConstantExpr::get(I->getOpcode(), InC, C);
1120     return ConstantExpr::get(I->getOpcode(), C, InC);
1121   }
1122 
1123   Value *Op0 = InV, *Op1 = C;
1124   if (!ConstIsRHS)
1125     std::swap(Op0, Op1);
1126 
1127   Value *RI = Builder.CreateBinOp(I->getOpcode(), Op0, Op1, "phi.bo");
1128   auto *FPInst = dyn_cast<Instruction>(RI);
1129   if (FPInst && isa<FPMathOperator>(FPInst))
1130     FPInst->copyFastMathFlags(I);
1131   return RI;
1132 }
1133 
1134 Instruction *InstCombinerImpl::foldOpIntoPhi(Instruction &I, PHINode *PN) {
1135   unsigned NumPHIValues = PN->getNumIncomingValues();
1136   if (NumPHIValues == 0)
1137     return nullptr;
1138 
1139   // We normally only transform phis with a single use.  However, if a PHI has
1140   // multiple uses and they are all the same operation, we can fold *all* of the
1141   // uses into the PHI.
1142   if (!PN->hasOneUse()) {
1143     // Walk the use list for the instruction, comparing them to I.
1144     for (User *U : PN->users()) {
1145       Instruction *UI = cast<Instruction>(U);
1146       if (UI != &I && !I.isIdenticalTo(UI))
1147         return nullptr;
1148     }
1149     // Otherwise, we can replace *all* users with the new PHI we form.
1150   }
1151 
1152   // Check to see if all of the operands of the PHI are simple constants
1153   // (constantint/constantfp/undef).  If there is one non-constant value,
1154   // remember the BB it is in.  If there is more than one or if *it* is a PHI,
1155   // bail out.  We don't do arbitrary constant expressions here because moving
1156   // their computation can be expensive without a cost model.
1157   BasicBlock *NonConstBB = nullptr;
1158   for (unsigned i = 0; i != NumPHIValues; ++i) {
1159     Value *InVal = PN->getIncomingValue(i);
1160     // For non-freeze, require constant operand
1161     // For freeze, require non-undef, non-poison operand
1162     if (!isa<FreezeInst>(I) && match(InVal, m_ImmConstant()))
1163       continue;
1164     if (isa<FreezeInst>(I) && isGuaranteedNotToBeUndefOrPoison(InVal))
1165       continue;
1166 
1167     if (isa<PHINode>(InVal)) return nullptr;  // Itself a phi.
1168     if (NonConstBB) return nullptr;  // More than one non-const value.
1169 
1170     NonConstBB = PN->getIncomingBlock(i);
1171 
1172     // If the InVal is an invoke at the end of the pred block, then we can't
1173     // insert a computation after it without breaking the edge.
1174     if (isa<InvokeInst>(InVal))
1175       if (cast<Instruction>(InVal)->getParent() == NonConstBB)
1176         return nullptr;
1177 
1178     // If the incoming non-constant value is in I's block, we will remove one
1179     // instruction, but insert another equivalent one, leading to infinite
1180     // instcombine.
1181     if (isPotentiallyReachable(I.getParent(), NonConstBB, nullptr, &DT, LI))
1182       return nullptr;
1183   }
1184 
1185   // If there is exactly one non-constant value, we can insert a copy of the
1186   // operation in that block.  However, if this is a critical edge, we would be
1187   // inserting the computation on some other paths (e.g. inside a loop).  Only
1188   // do this if the pred block is unconditionally branching into the phi block.
1189   // Also, make sure that the pred block is not dead code.
1190   if (NonConstBB != nullptr) {
1191     BranchInst *BI = dyn_cast<BranchInst>(NonConstBB->getTerminator());
1192     if (!BI || !BI->isUnconditional() || !DT.isReachableFromEntry(NonConstBB))
1193       return nullptr;
1194   }
1195 
1196   // Okay, we can do the transformation: create the new PHI node.
1197   PHINode *NewPN = PHINode::Create(I.getType(), PN->getNumIncomingValues());
1198   InsertNewInstBefore(NewPN, *PN);
1199   NewPN->takeName(PN);
1200 
1201   // If we are going to have to insert a new computation, do so right before the
1202   // predecessor's terminator.
1203   if (NonConstBB)
1204     Builder.SetInsertPoint(NonConstBB->getTerminator());
1205 
1206   // Next, add all of the operands to the PHI.
1207   if (SelectInst *SI = dyn_cast<SelectInst>(&I)) {
1208     // We only currently try to fold the condition of a select when it is a phi,
1209     // not the true/false values.
1210     Value *TrueV = SI->getTrueValue();
1211     Value *FalseV = SI->getFalseValue();
1212     BasicBlock *PhiTransBB = PN->getParent();
1213     for (unsigned i = 0; i != NumPHIValues; ++i) {
1214       BasicBlock *ThisBB = PN->getIncomingBlock(i);
1215       Value *TrueVInPred = TrueV->DoPHITranslation(PhiTransBB, ThisBB);
1216       Value *FalseVInPred = FalseV->DoPHITranslation(PhiTransBB, ThisBB);
1217       Value *InV = nullptr;
1218       // Beware of ConstantExpr:  it may eventually evaluate to getNullValue,
1219       // even if currently isNullValue gives false.
1220       Constant *InC = dyn_cast<Constant>(PN->getIncomingValue(i));
1221       // For vector constants, we cannot use isNullValue to fold into
1222       // FalseVInPred versus TrueVInPred. When we have individual nonzero
1223       // elements in the vector, we will incorrectly fold InC to
1224       // `TrueVInPred`.
1225       if (InC && isa<ConstantInt>(InC))
1226         InV = InC->isNullValue() ? FalseVInPred : TrueVInPred;
1227       else {
1228         // Generate the select in the same block as PN's current incoming block.
1229         // Note: ThisBB need not be the NonConstBB because vector constants
1230         // which are constants by definition are handled here.
1231         // FIXME: This can lead to an increase in IR generation because we might
1232         // generate selects for vector constant phi operand, that could not be
1233         // folded to TrueVInPred or FalseVInPred as done for ConstantInt. For
1234         // non-vector phis, this transformation was always profitable because
1235         // the select would be generated exactly once in the NonConstBB.
1236         Builder.SetInsertPoint(ThisBB->getTerminator());
1237         InV = Builder.CreateSelect(PN->getIncomingValue(i), TrueVInPred,
1238                                    FalseVInPred, "phi.sel");
1239       }
1240       NewPN->addIncoming(InV, ThisBB);
1241     }
1242   } else if (CmpInst *CI = dyn_cast<CmpInst>(&I)) {
1243     Constant *C = cast<Constant>(I.getOperand(1));
1244     for (unsigned i = 0; i != NumPHIValues; ++i) {
1245       Value *InV = nullptr;
1246       if (auto *InC = dyn_cast<Constant>(PN->getIncomingValue(i)))
1247         InV = ConstantExpr::getCompare(CI->getPredicate(), InC, C);
1248       else
1249         InV = Builder.CreateCmp(CI->getPredicate(), PN->getIncomingValue(i),
1250                                 C, "phi.cmp");
1251       NewPN->addIncoming(InV, PN->getIncomingBlock(i));
1252     }
1253   } else if (auto *BO = dyn_cast<BinaryOperator>(&I)) {
1254     for (unsigned i = 0; i != NumPHIValues; ++i) {
1255       Value *InV = foldOperationIntoPhiValue(BO, PN->getIncomingValue(i),
1256                                              Builder);
1257       NewPN->addIncoming(InV, PN->getIncomingBlock(i));
1258     }
1259   } else if (isa<FreezeInst>(&I)) {
1260     for (unsigned i = 0; i != NumPHIValues; ++i) {
1261       Value *InV;
1262       if (NonConstBB == PN->getIncomingBlock(i))
1263         InV = Builder.CreateFreeze(PN->getIncomingValue(i), "phi.fr");
1264       else
1265         InV = PN->getIncomingValue(i);
1266       NewPN->addIncoming(InV, PN->getIncomingBlock(i));
1267     }
1268   } else {
1269     CastInst *CI = cast<CastInst>(&I);
1270     Type *RetTy = CI->getType();
1271     for (unsigned i = 0; i != NumPHIValues; ++i) {
1272       Value *InV;
1273       if (Constant *InC = dyn_cast<Constant>(PN->getIncomingValue(i)))
1274         InV = ConstantExpr::getCast(CI->getOpcode(), InC, RetTy);
1275       else
1276         InV = Builder.CreateCast(CI->getOpcode(), PN->getIncomingValue(i),
1277                                  I.getType(), "phi.cast");
1278       NewPN->addIncoming(InV, PN->getIncomingBlock(i));
1279     }
1280   }
1281 
1282   for (User *U : make_early_inc_range(PN->users())) {
1283     Instruction *User = cast<Instruction>(U);
1284     if (User == &I) continue;
1285     replaceInstUsesWith(*User, NewPN);
1286     eraseInstFromFunction(*User);
1287   }
1288   return replaceInstUsesWith(I, NewPN);
1289 }
1290 
1291 Instruction *InstCombinerImpl::foldBinopWithPhiOperands(BinaryOperator &BO) {
1292   // TODO: This should be similar to the incoming values check in foldOpIntoPhi:
1293   //       we are guarding against replicating the binop in >1 predecessor.
1294   //       This could miss matching a phi with 2 constant incoming values.
1295   auto *Phi0 = dyn_cast<PHINode>(BO.getOperand(0));
1296   auto *Phi1 = dyn_cast<PHINode>(BO.getOperand(1));
1297   if (!Phi0 || !Phi1 || !Phi0->hasOneUse() || !Phi1->hasOneUse() ||
1298       Phi0->getNumOperands() != 2 || Phi1->getNumOperands() != 2)
1299     return nullptr;
1300 
1301   // TODO: Remove the restriction for binop being in the same block as the phis.
1302   if (BO.getParent() != Phi0->getParent() ||
1303       BO.getParent() != Phi1->getParent())
1304     return nullptr;
1305 
1306   // Match a pair of incoming constants for one of the predecessor blocks.
1307   BasicBlock *ConstBB, *OtherBB;
1308   Constant *C0, *C1;
1309   if (match(Phi0->getIncomingValue(0), m_ImmConstant(C0))) {
1310     ConstBB = Phi0->getIncomingBlock(0);
1311     OtherBB = Phi0->getIncomingBlock(1);
1312   } else if (match(Phi0->getIncomingValue(1), m_ImmConstant(C0))) {
1313     ConstBB = Phi0->getIncomingBlock(1);
1314     OtherBB = Phi0->getIncomingBlock(0);
1315   } else {
1316     return nullptr;
1317   }
1318   if (!match(Phi1->getIncomingValueForBlock(ConstBB), m_ImmConstant(C1)))
1319     return nullptr;
1320 
1321   // The block that we are hoisting to must reach here unconditionally.
1322   // Otherwise, we could be speculatively executing an expensive or
1323   // non-speculative op.
1324   auto *PredBlockBranch = dyn_cast<BranchInst>(OtherBB->getTerminator());
1325   if (!PredBlockBranch || PredBlockBranch->isConditional() ||
1326       !DT.isReachableFromEntry(OtherBB))
1327     return nullptr;
1328 
1329   // TODO: This check could be tightened to only apply to binops (div/rem) that
1330   //       are not safe to speculatively execute. But that could allow hoisting
1331   //       potentially expensive instructions (fdiv for example).
1332   for (auto BBIter = BO.getParent()->begin(); &*BBIter != &BO; ++BBIter)
1333     if (!isGuaranteedToTransferExecutionToSuccessor(&*BBIter))
1334       return nullptr;
1335 
1336   // Make a new binop in the predecessor block with the non-constant incoming
1337   // values.
1338   Builder.SetInsertPoint(PredBlockBranch);
1339   Value *NewBO = Builder.CreateBinOp(BO.getOpcode(),
1340                                      Phi0->getIncomingValueForBlock(OtherBB),
1341                                      Phi1->getIncomingValueForBlock(OtherBB));
1342   if (auto *NotFoldedNewBO = dyn_cast<BinaryOperator>(NewBO))
1343     NotFoldedNewBO->copyIRFlags(&BO);
1344 
1345   // Fold constants for the predecessor block with constant incoming values.
1346   Constant *NewC = ConstantExpr::get(BO.getOpcode(), C0, C1);
1347 
1348   // Replace the binop with a phi of the new values. The old phis are dead.
1349   PHINode *NewPhi = PHINode::Create(BO.getType(), 2);
1350   NewPhi->addIncoming(NewBO, OtherBB);
1351   NewPhi->addIncoming(NewC, ConstBB);
1352   return NewPhi;
1353 }
1354 
1355 Instruction *InstCombinerImpl::foldBinOpIntoSelectOrPhi(BinaryOperator &I) {
1356   if (!isa<Constant>(I.getOperand(1)))
1357     return nullptr;
1358 
1359   if (auto *Sel = dyn_cast<SelectInst>(I.getOperand(0))) {
1360     if (Instruction *NewSel = FoldOpIntoSelect(I, Sel))
1361       return NewSel;
1362   } else if (auto *PN = dyn_cast<PHINode>(I.getOperand(0))) {
1363     if (Instruction *NewPhi = foldOpIntoPhi(I, PN))
1364       return NewPhi;
1365   }
1366   return nullptr;
1367 }
1368 
1369 /// Given a pointer type and a constant offset, determine whether or not there
1370 /// is a sequence of GEP indices into the pointed type that will land us at the
1371 /// specified offset. If so, fill them into NewIndices and return the resultant
1372 /// element type, otherwise return null.
1373 static Type *findElementAtOffset(PointerType *PtrTy, int64_t IntOffset,
1374                                  SmallVectorImpl<Value *> &NewIndices,
1375                                  const DataLayout &DL) {
1376   // Only used by visitGEPOfBitcast(), which is skipped for opaque pointers.
1377   Type *Ty = PtrTy->getNonOpaquePointerElementType();
1378   if (!Ty->isSized())
1379     return nullptr;
1380 
1381   APInt Offset(DL.getIndexTypeSizeInBits(PtrTy), IntOffset);
1382   SmallVector<APInt> Indices = DL.getGEPIndicesForOffset(Ty, Offset);
1383   if (!Offset.isZero())
1384     return nullptr;
1385 
1386   for (const APInt &Index : Indices)
1387     NewIndices.push_back(ConstantInt::get(PtrTy->getContext(), Index));
1388   return Ty;
1389 }
1390 
1391 static bool shouldMergeGEPs(GEPOperator &GEP, GEPOperator &Src) {
1392   // If this GEP has only 0 indices, it is the same pointer as
1393   // Src. If Src is not a trivial GEP too, don't combine
1394   // the indices.
1395   if (GEP.hasAllZeroIndices() && !Src.hasAllZeroIndices() &&
1396       !Src.hasOneUse())
1397     return false;
1398   return true;
1399 }
1400 
1401 /// Return a value X such that Val = X * Scale, or null if none.
1402 /// If the multiplication is known not to overflow, then NoSignedWrap is set.
1403 Value *InstCombinerImpl::Descale(Value *Val, APInt Scale, bool &NoSignedWrap) {
1404   assert(isa<IntegerType>(Val->getType()) && "Can only descale integers!");
1405   assert(cast<IntegerType>(Val->getType())->getBitWidth() ==
1406          Scale.getBitWidth() && "Scale not compatible with value!");
1407 
1408   // If Val is zero or Scale is one then Val = Val * Scale.
1409   if (match(Val, m_Zero()) || Scale == 1) {
1410     NoSignedWrap = true;
1411     return Val;
1412   }
1413 
1414   // If Scale is zero then it does not divide Val.
1415   if (Scale.isMinValue())
1416     return nullptr;
1417 
1418   // Look through chains of multiplications, searching for a constant that is
1419   // divisible by Scale.  For example, descaling X*(Y*(Z*4)) by a factor of 4
1420   // will find the constant factor 4 and produce X*(Y*Z).  Descaling X*(Y*8) by
1421   // a factor of 4 will produce X*(Y*2).  The principle of operation is to bore
1422   // down from Val:
1423   //
1424   //     Val = M1 * X          ||   Analysis starts here and works down
1425   //      M1 = M2 * Y          ||   Doesn't descend into terms with more
1426   //      M2 =  Z * 4          \/   than one use
1427   //
1428   // Then to modify a term at the bottom:
1429   //
1430   //     Val = M1 * X
1431   //      M1 =  Z * Y          ||   Replaced M2 with Z
1432   //
1433   // Then to work back up correcting nsw flags.
1434 
1435   // Op - the term we are currently analyzing.  Starts at Val then drills down.
1436   // Replaced with its descaled value before exiting from the drill down loop.
1437   Value *Op = Val;
1438 
1439   // Parent - initially null, but after drilling down notes where Op came from.
1440   // In the example above, Parent is (Val, 0) when Op is M1, because M1 is the
1441   // 0'th operand of Val.
1442   std::pair<Instruction *, unsigned> Parent;
1443 
1444   // Set if the transform requires a descaling at deeper levels that doesn't
1445   // overflow.
1446   bool RequireNoSignedWrap = false;
1447 
1448   // Log base 2 of the scale. Negative if not a power of 2.
1449   int32_t logScale = Scale.exactLogBase2();
1450 
1451   for (;; Op = Parent.first->getOperand(Parent.second)) { // Drill down
1452     if (ConstantInt *CI = dyn_cast<ConstantInt>(Op)) {
1453       // If Op is a constant divisible by Scale then descale to the quotient.
1454       APInt Quotient(Scale), Remainder(Scale); // Init ensures right bitwidth.
1455       APInt::sdivrem(CI->getValue(), Scale, Quotient, Remainder);
1456       if (!Remainder.isMinValue())
1457         // Not divisible by Scale.
1458         return nullptr;
1459       // Replace with the quotient in the parent.
1460       Op = ConstantInt::get(CI->getType(), Quotient);
1461       NoSignedWrap = true;
1462       break;
1463     }
1464 
1465     if (BinaryOperator *BO = dyn_cast<BinaryOperator>(Op)) {
1466       if (BO->getOpcode() == Instruction::Mul) {
1467         // Multiplication.
1468         NoSignedWrap = BO->hasNoSignedWrap();
1469         if (RequireNoSignedWrap && !NoSignedWrap)
1470           return nullptr;
1471 
1472         // There are three cases for multiplication: multiplication by exactly
1473         // the scale, multiplication by a constant different to the scale, and
1474         // multiplication by something else.
1475         Value *LHS = BO->getOperand(0);
1476         Value *RHS = BO->getOperand(1);
1477 
1478         if (ConstantInt *CI = dyn_cast<ConstantInt>(RHS)) {
1479           // Multiplication by a constant.
1480           if (CI->getValue() == Scale) {
1481             // Multiplication by exactly the scale, replace the multiplication
1482             // by its left-hand side in the parent.
1483             Op = LHS;
1484             break;
1485           }
1486 
1487           // Otherwise drill down into the constant.
1488           if (!Op->hasOneUse())
1489             return nullptr;
1490 
1491           Parent = std::make_pair(BO, 1);
1492           continue;
1493         }
1494 
1495         // Multiplication by something else. Drill down into the left-hand side
1496         // since that's where the reassociate pass puts the good stuff.
1497         if (!Op->hasOneUse())
1498           return nullptr;
1499 
1500         Parent = std::make_pair(BO, 0);
1501         continue;
1502       }
1503 
1504       if (logScale > 0 && BO->getOpcode() == Instruction::Shl &&
1505           isa<ConstantInt>(BO->getOperand(1))) {
1506         // Multiplication by a power of 2.
1507         NoSignedWrap = BO->hasNoSignedWrap();
1508         if (RequireNoSignedWrap && !NoSignedWrap)
1509           return nullptr;
1510 
1511         Value *LHS = BO->getOperand(0);
1512         int32_t Amt = cast<ConstantInt>(BO->getOperand(1))->
1513           getLimitedValue(Scale.getBitWidth());
1514         // Op = LHS << Amt.
1515 
1516         if (Amt == logScale) {
1517           // Multiplication by exactly the scale, replace the multiplication
1518           // by its left-hand side in the parent.
1519           Op = LHS;
1520           break;
1521         }
1522         if (Amt < logScale || !Op->hasOneUse())
1523           return nullptr;
1524 
1525         // Multiplication by more than the scale.  Reduce the multiplying amount
1526         // by the scale in the parent.
1527         Parent = std::make_pair(BO, 1);
1528         Op = ConstantInt::get(BO->getType(), Amt - logScale);
1529         break;
1530       }
1531     }
1532 
1533     if (!Op->hasOneUse())
1534       return nullptr;
1535 
1536     if (CastInst *Cast = dyn_cast<CastInst>(Op)) {
1537       if (Cast->getOpcode() == Instruction::SExt) {
1538         // Op is sign-extended from a smaller type, descale in the smaller type.
1539         unsigned SmallSize = Cast->getSrcTy()->getPrimitiveSizeInBits();
1540         APInt SmallScale = Scale.trunc(SmallSize);
1541         // Suppose Op = sext X, and we descale X as Y * SmallScale.  We want to
1542         // descale Op as (sext Y) * Scale.  In order to have
1543         //   sext (Y * SmallScale) = (sext Y) * Scale
1544         // some conditions need to hold however: SmallScale must sign-extend to
1545         // Scale and the multiplication Y * SmallScale should not overflow.
1546         if (SmallScale.sext(Scale.getBitWidth()) != Scale)
1547           // SmallScale does not sign-extend to Scale.
1548           return nullptr;
1549         assert(SmallScale.exactLogBase2() == logScale);
1550         // Require that Y * SmallScale must not overflow.
1551         RequireNoSignedWrap = true;
1552 
1553         // Drill down through the cast.
1554         Parent = std::make_pair(Cast, 0);
1555         Scale = SmallScale;
1556         continue;
1557       }
1558 
1559       if (Cast->getOpcode() == Instruction::Trunc) {
1560         // Op is truncated from a larger type, descale in the larger type.
1561         // Suppose Op = trunc X, and we descale X as Y * sext Scale.  Then
1562         //   trunc (Y * sext Scale) = (trunc Y) * Scale
1563         // always holds.  However (trunc Y) * Scale may overflow even if
1564         // trunc (Y * sext Scale) does not, so nsw flags need to be cleared
1565         // from this point up in the expression (see later).
1566         if (RequireNoSignedWrap)
1567           return nullptr;
1568 
1569         // Drill down through the cast.
1570         unsigned LargeSize = Cast->getSrcTy()->getPrimitiveSizeInBits();
1571         Parent = std::make_pair(Cast, 0);
1572         Scale = Scale.sext(LargeSize);
1573         if (logScale + 1 == (int32_t)Cast->getType()->getPrimitiveSizeInBits())
1574           logScale = -1;
1575         assert(Scale.exactLogBase2() == logScale);
1576         continue;
1577       }
1578     }
1579 
1580     // Unsupported expression, bail out.
1581     return nullptr;
1582   }
1583 
1584   // If Op is zero then Val = Op * Scale.
1585   if (match(Op, m_Zero())) {
1586     NoSignedWrap = true;
1587     return Op;
1588   }
1589 
1590   // We know that we can successfully descale, so from here on we can safely
1591   // modify the IR.  Op holds the descaled version of the deepest term in the
1592   // expression.  NoSignedWrap is 'true' if multiplying Op by Scale is known
1593   // not to overflow.
1594 
1595   if (!Parent.first)
1596     // The expression only had one term.
1597     return Op;
1598 
1599   // Rewrite the parent using the descaled version of its operand.
1600   assert(Parent.first->hasOneUse() && "Drilled down when more than one use!");
1601   assert(Op != Parent.first->getOperand(Parent.second) &&
1602          "Descaling was a no-op?");
1603   replaceOperand(*Parent.first, Parent.second, Op);
1604   Worklist.push(Parent.first);
1605 
1606   // Now work back up the expression correcting nsw flags.  The logic is based
1607   // on the following observation: if X * Y is known not to overflow as a signed
1608   // multiplication, and Y is replaced by a value Z with smaller absolute value,
1609   // then X * Z will not overflow as a signed multiplication either.  As we work
1610   // our way up, having NoSignedWrap 'true' means that the descaled value at the
1611   // current level has strictly smaller absolute value than the original.
1612   Instruction *Ancestor = Parent.first;
1613   do {
1614     if (BinaryOperator *BO = dyn_cast<BinaryOperator>(Ancestor)) {
1615       // If the multiplication wasn't nsw then we can't say anything about the
1616       // value of the descaled multiplication, and we have to clear nsw flags
1617       // from this point on up.
1618       bool OpNoSignedWrap = BO->hasNoSignedWrap();
1619       NoSignedWrap &= OpNoSignedWrap;
1620       if (NoSignedWrap != OpNoSignedWrap) {
1621         BO->setHasNoSignedWrap(NoSignedWrap);
1622         Worklist.push(Ancestor);
1623       }
1624     } else if (Ancestor->getOpcode() == Instruction::Trunc) {
1625       // The fact that the descaled input to the trunc has smaller absolute
1626       // value than the original input doesn't tell us anything useful about
1627       // the absolute values of the truncations.
1628       NoSignedWrap = false;
1629     }
1630     assert((Ancestor->getOpcode() != Instruction::SExt || NoSignedWrap) &&
1631            "Failed to keep proper track of nsw flags while drilling down?");
1632 
1633     if (Ancestor == Val)
1634       // Got to the top, all done!
1635       return Val;
1636 
1637     // Move up one level in the expression.
1638     assert(Ancestor->hasOneUse() && "Drilled down when more than one use!");
1639     Ancestor = Ancestor->user_back();
1640   } while (true);
1641 }
1642 
1643 Instruction *InstCombinerImpl::foldVectorBinop(BinaryOperator &Inst) {
1644   if (!isa<VectorType>(Inst.getType()))
1645     return nullptr;
1646 
1647   BinaryOperator::BinaryOps Opcode = Inst.getOpcode();
1648   Value *LHS = Inst.getOperand(0), *RHS = Inst.getOperand(1);
1649   assert(cast<VectorType>(LHS->getType())->getElementCount() ==
1650          cast<VectorType>(Inst.getType())->getElementCount());
1651   assert(cast<VectorType>(RHS->getType())->getElementCount() ==
1652          cast<VectorType>(Inst.getType())->getElementCount());
1653 
1654   // If both operands of the binop are vector concatenations, then perform the
1655   // narrow binop on each pair of the source operands followed by concatenation
1656   // of the results.
1657   Value *L0, *L1, *R0, *R1;
1658   ArrayRef<int> Mask;
1659   if (match(LHS, m_Shuffle(m_Value(L0), m_Value(L1), m_Mask(Mask))) &&
1660       match(RHS, m_Shuffle(m_Value(R0), m_Value(R1), m_SpecificMask(Mask))) &&
1661       LHS->hasOneUse() && RHS->hasOneUse() &&
1662       cast<ShuffleVectorInst>(LHS)->isConcat() &&
1663       cast<ShuffleVectorInst>(RHS)->isConcat()) {
1664     // This transform does not have the speculative execution constraint as
1665     // below because the shuffle is a concatenation. The new binops are
1666     // operating on exactly the same elements as the existing binop.
1667     // TODO: We could ease the mask requirement to allow different undef lanes,
1668     //       but that requires an analysis of the binop-with-undef output value.
1669     Value *NewBO0 = Builder.CreateBinOp(Opcode, L0, R0);
1670     if (auto *BO = dyn_cast<BinaryOperator>(NewBO0))
1671       BO->copyIRFlags(&Inst);
1672     Value *NewBO1 = Builder.CreateBinOp(Opcode, L1, R1);
1673     if (auto *BO = dyn_cast<BinaryOperator>(NewBO1))
1674       BO->copyIRFlags(&Inst);
1675     return new ShuffleVectorInst(NewBO0, NewBO1, Mask);
1676   }
1677 
1678   // It may not be safe to reorder shuffles and things like div, urem, etc.
1679   // because we may trap when executing those ops on unknown vector elements.
1680   // See PR20059.
1681   if (!isSafeToSpeculativelyExecute(&Inst))
1682     return nullptr;
1683 
1684   auto createBinOpShuffle = [&](Value *X, Value *Y, ArrayRef<int> M) {
1685     Value *XY = Builder.CreateBinOp(Opcode, X, Y);
1686     if (auto *BO = dyn_cast<BinaryOperator>(XY))
1687       BO->copyIRFlags(&Inst);
1688     return new ShuffleVectorInst(XY, M);
1689   };
1690 
1691   // If both arguments of the binary operation are shuffles that use the same
1692   // mask and shuffle within a single vector, move the shuffle after the binop.
1693   Value *V1, *V2;
1694   if (match(LHS, m_Shuffle(m_Value(V1), m_Undef(), m_Mask(Mask))) &&
1695       match(RHS, m_Shuffle(m_Value(V2), m_Undef(), m_SpecificMask(Mask))) &&
1696       V1->getType() == V2->getType() &&
1697       (LHS->hasOneUse() || RHS->hasOneUse() || LHS == RHS)) {
1698     // Op(shuffle(V1, Mask), shuffle(V2, Mask)) -> shuffle(Op(V1, V2), Mask)
1699     return createBinOpShuffle(V1, V2, Mask);
1700   }
1701 
1702   // If both arguments of a commutative binop are select-shuffles that use the
1703   // same mask with commuted operands, the shuffles are unnecessary.
1704   if (Inst.isCommutative() &&
1705       match(LHS, m_Shuffle(m_Value(V1), m_Value(V2), m_Mask(Mask))) &&
1706       match(RHS,
1707             m_Shuffle(m_Specific(V2), m_Specific(V1), m_SpecificMask(Mask)))) {
1708     auto *LShuf = cast<ShuffleVectorInst>(LHS);
1709     auto *RShuf = cast<ShuffleVectorInst>(RHS);
1710     // TODO: Allow shuffles that contain undefs in the mask?
1711     //       That is legal, but it reduces undef knowledge.
1712     // TODO: Allow arbitrary shuffles by shuffling after binop?
1713     //       That might be legal, but we have to deal with poison.
1714     if (LShuf->isSelect() &&
1715         !is_contained(LShuf->getShuffleMask(), UndefMaskElem) &&
1716         RShuf->isSelect() &&
1717         !is_contained(RShuf->getShuffleMask(), UndefMaskElem)) {
1718       // Example:
1719       // LHS = shuffle V1, V2, <0, 5, 6, 3>
1720       // RHS = shuffle V2, V1, <0, 5, 6, 3>
1721       // LHS + RHS --> (V10+V20, V21+V11, V22+V12, V13+V23) --> V1 + V2
1722       Instruction *NewBO = BinaryOperator::Create(Opcode, V1, V2);
1723       NewBO->copyIRFlags(&Inst);
1724       return NewBO;
1725     }
1726   }
1727 
1728   // If one argument is a shuffle within one vector and the other is a constant,
1729   // try moving the shuffle after the binary operation. This canonicalization
1730   // intends to move shuffles closer to other shuffles and binops closer to
1731   // other binops, so they can be folded. It may also enable demanded elements
1732   // transforms.
1733   Constant *C;
1734   auto *InstVTy = dyn_cast<FixedVectorType>(Inst.getType());
1735   if (InstVTy &&
1736       match(&Inst,
1737             m_c_BinOp(m_OneUse(m_Shuffle(m_Value(V1), m_Undef(), m_Mask(Mask))),
1738                       m_ImmConstant(C))) &&
1739       cast<FixedVectorType>(V1->getType())->getNumElements() <=
1740           InstVTy->getNumElements()) {
1741     assert(InstVTy->getScalarType() == V1->getType()->getScalarType() &&
1742            "Shuffle should not change scalar type");
1743 
1744     // Find constant NewC that has property:
1745     //   shuffle(NewC, ShMask) = C
1746     // If such constant does not exist (example: ShMask=<0,0> and C=<1,2>)
1747     // reorder is not possible. A 1-to-1 mapping is not required. Example:
1748     // ShMask = <1,1,2,2> and C = <5,5,6,6> --> NewC = <undef,5,6,undef>
1749     bool ConstOp1 = isa<Constant>(RHS);
1750     ArrayRef<int> ShMask = Mask;
1751     unsigned SrcVecNumElts =
1752         cast<FixedVectorType>(V1->getType())->getNumElements();
1753     UndefValue *UndefScalar = UndefValue::get(C->getType()->getScalarType());
1754     SmallVector<Constant *, 16> NewVecC(SrcVecNumElts, UndefScalar);
1755     bool MayChange = true;
1756     unsigned NumElts = InstVTy->getNumElements();
1757     for (unsigned I = 0; I < NumElts; ++I) {
1758       Constant *CElt = C->getAggregateElement(I);
1759       if (ShMask[I] >= 0) {
1760         assert(ShMask[I] < (int)NumElts && "Not expecting narrowing shuffle");
1761         Constant *NewCElt = NewVecC[ShMask[I]];
1762         // Bail out if:
1763         // 1. The constant vector contains a constant expression.
1764         // 2. The shuffle needs an element of the constant vector that can't
1765         //    be mapped to a new constant vector.
1766         // 3. This is a widening shuffle that copies elements of V1 into the
1767         //    extended elements (extending with undef is allowed).
1768         if (!CElt || (!isa<UndefValue>(NewCElt) && NewCElt != CElt) ||
1769             I >= SrcVecNumElts) {
1770           MayChange = false;
1771           break;
1772         }
1773         NewVecC[ShMask[I]] = CElt;
1774       }
1775       // If this is a widening shuffle, we must be able to extend with undef
1776       // elements. If the original binop does not produce an undef in the high
1777       // lanes, then this transform is not safe.
1778       // Similarly for undef lanes due to the shuffle mask, we can only
1779       // transform binops that preserve undef.
1780       // TODO: We could shuffle those non-undef constant values into the
1781       //       result by using a constant vector (rather than an undef vector)
1782       //       as operand 1 of the new binop, but that might be too aggressive
1783       //       for target-independent shuffle creation.
1784       if (I >= SrcVecNumElts || ShMask[I] < 0) {
1785         Constant *MaybeUndef =
1786             ConstOp1 ? ConstantExpr::get(Opcode, UndefScalar, CElt)
1787                      : ConstantExpr::get(Opcode, CElt, UndefScalar);
1788         if (!match(MaybeUndef, m_Undef())) {
1789           MayChange = false;
1790           break;
1791         }
1792       }
1793     }
1794     if (MayChange) {
1795       Constant *NewC = ConstantVector::get(NewVecC);
1796       // It may not be safe to execute a binop on a vector with undef elements
1797       // because the entire instruction can be folded to undef or create poison
1798       // that did not exist in the original code.
1799       if (Inst.isIntDivRem() || (Inst.isShift() && ConstOp1))
1800         NewC = getSafeVectorConstantForBinop(Opcode, NewC, ConstOp1);
1801 
1802       // Op(shuffle(V1, Mask), C) -> shuffle(Op(V1, NewC), Mask)
1803       // Op(C, shuffle(V1, Mask)) -> shuffle(Op(NewC, V1), Mask)
1804       Value *NewLHS = ConstOp1 ? V1 : NewC;
1805       Value *NewRHS = ConstOp1 ? NewC : V1;
1806       return createBinOpShuffle(NewLHS, NewRHS, Mask);
1807     }
1808   }
1809 
1810   // Try to reassociate to sink a splat shuffle after a binary operation.
1811   if (Inst.isAssociative() && Inst.isCommutative()) {
1812     // Canonicalize shuffle operand as LHS.
1813     if (isa<ShuffleVectorInst>(RHS))
1814       std::swap(LHS, RHS);
1815 
1816     Value *X;
1817     ArrayRef<int> MaskC;
1818     int SplatIndex;
1819     Value *Y, *OtherOp;
1820     if (!match(LHS,
1821                m_OneUse(m_Shuffle(m_Value(X), m_Undef(), m_Mask(MaskC)))) ||
1822         !match(MaskC, m_SplatOrUndefMask(SplatIndex)) ||
1823         X->getType() != Inst.getType() ||
1824         !match(RHS, m_OneUse(m_BinOp(Opcode, m_Value(Y), m_Value(OtherOp)))))
1825       return nullptr;
1826 
1827     // FIXME: This may not be safe if the analysis allows undef elements. By
1828     //        moving 'Y' before the splat shuffle, we are implicitly assuming
1829     //        that it is not undef/poison at the splat index.
1830     if (isSplatValue(OtherOp, SplatIndex)) {
1831       std::swap(Y, OtherOp);
1832     } else if (!isSplatValue(Y, SplatIndex)) {
1833       return nullptr;
1834     }
1835 
1836     // X and Y are splatted values, so perform the binary operation on those
1837     // values followed by a splat followed by the 2nd binary operation:
1838     // bo (splat X), (bo Y, OtherOp) --> bo (splat (bo X, Y)), OtherOp
1839     Value *NewBO = Builder.CreateBinOp(Opcode, X, Y);
1840     SmallVector<int, 8> NewMask(MaskC.size(), SplatIndex);
1841     Value *NewSplat = Builder.CreateShuffleVector(NewBO, NewMask);
1842     Instruction *R = BinaryOperator::Create(Opcode, NewSplat, OtherOp);
1843 
1844     // Intersect FMF on both new binops. Other (poison-generating) flags are
1845     // dropped to be safe.
1846     if (isa<FPMathOperator>(R)) {
1847       R->copyFastMathFlags(&Inst);
1848       R->andIRFlags(RHS);
1849     }
1850     if (auto *NewInstBO = dyn_cast<BinaryOperator>(NewBO))
1851       NewInstBO->copyIRFlags(R);
1852     return R;
1853   }
1854 
1855   return nullptr;
1856 }
1857 
1858 /// Try to narrow the width of a binop if at least 1 operand is an extend of
1859 /// of a value. This requires a potentially expensive known bits check to make
1860 /// sure the narrow op does not overflow.
1861 Instruction *InstCombinerImpl::narrowMathIfNoOverflow(BinaryOperator &BO) {
1862   // We need at least one extended operand.
1863   Value *Op0 = BO.getOperand(0), *Op1 = BO.getOperand(1);
1864 
1865   // If this is a sub, we swap the operands since we always want an extension
1866   // on the RHS. The LHS can be an extension or a constant.
1867   if (BO.getOpcode() == Instruction::Sub)
1868     std::swap(Op0, Op1);
1869 
1870   Value *X;
1871   bool IsSext = match(Op0, m_SExt(m_Value(X)));
1872   if (!IsSext && !match(Op0, m_ZExt(m_Value(X))))
1873     return nullptr;
1874 
1875   // If both operands are the same extension from the same source type and we
1876   // can eliminate at least one (hasOneUse), this might work.
1877   CastInst::CastOps CastOpc = IsSext ? Instruction::SExt : Instruction::ZExt;
1878   Value *Y;
1879   if (!(match(Op1, m_ZExtOrSExt(m_Value(Y))) && X->getType() == Y->getType() &&
1880         cast<Operator>(Op1)->getOpcode() == CastOpc &&
1881         (Op0->hasOneUse() || Op1->hasOneUse()))) {
1882     // If that did not match, see if we have a suitable constant operand.
1883     // Truncating and extending must produce the same constant.
1884     Constant *WideC;
1885     if (!Op0->hasOneUse() || !match(Op1, m_Constant(WideC)))
1886       return nullptr;
1887     Constant *NarrowC = ConstantExpr::getTrunc(WideC, X->getType());
1888     if (ConstantExpr::getCast(CastOpc, NarrowC, BO.getType()) != WideC)
1889       return nullptr;
1890     Y = NarrowC;
1891   }
1892 
1893   // Swap back now that we found our operands.
1894   if (BO.getOpcode() == Instruction::Sub)
1895     std::swap(X, Y);
1896 
1897   // Both operands have narrow versions. Last step: the math must not overflow
1898   // in the narrow width.
1899   if (!willNotOverflow(BO.getOpcode(), X, Y, BO, IsSext))
1900     return nullptr;
1901 
1902   // bo (ext X), (ext Y) --> ext (bo X, Y)
1903   // bo (ext X), C       --> ext (bo X, C')
1904   Value *NarrowBO = Builder.CreateBinOp(BO.getOpcode(), X, Y, "narrow");
1905   if (auto *NewBinOp = dyn_cast<BinaryOperator>(NarrowBO)) {
1906     if (IsSext)
1907       NewBinOp->setHasNoSignedWrap();
1908     else
1909       NewBinOp->setHasNoUnsignedWrap();
1910   }
1911   return CastInst::Create(CastOpc, NarrowBO, BO.getType());
1912 }
1913 
1914 static bool isMergedGEPInBounds(GEPOperator &GEP1, GEPOperator &GEP2) {
1915   // At least one GEP must be inbounds.
1916   if (!GEP1.isInBounds() && !GEP2.isInBounds())
1917     return false;
1918 
1919   return (GEP1.isInBounds() || GEP1.hasAllZeroIndices()) &&
1920          (GEP2.isInBounds() || GEP2.hasAllZeroIndices());
1921 }
1922 
1923 /// Thread a GEP operation with constant indices through the constant true/false
1924 /// arms of a select.
1925 static Instruction *foldSelectGEP(GetElementPtrInst &GEP,
1926                                   InstCombiner::BuilderTy &Builder) {
1927   if (!GEP.hasAllConstantIndices())
1928     return nullptr;
1929 
1930   Instruction *Sel;
1931   Value *Cond;
1932   Constant *TrueC, *FalseC;
1933   if (!match(GEP.getPointerOperand(), m_Instruction(Sel)) ||
1934       !match(Sel,
1935              m_Select(m_Value(Cond), m_Constant(TrueC), m_Constant(FalseC))))
1936     return nullptr;
1937 
1938   // gep (select Cond, TrueC, FalseC), IndexC --> select Cond, TrueC', FalseC'
1939   // Propagate 'inbounds' and metadata from existing instructions.
1940   // Note: using IRBuilder to create the constants for efficiency.
1941   SmallVector<Value *, 4> IndexC(GEP.indices());
1942   bool IsInBounds = GEP.isInBounds();
1943   Type *Ty = GEP.getSourceElementType();
1944   Value *NewTrueC = IsInBounds ? Builder.CreateInBoundsGEP(Ty, TrueC, IndexC)
1945                                : Builder.CreateGEP(Ty, TrueC, IndexC);
1946   Value *NewFalseC = IsInBounds ? Builder.CreateInBoundsGEP(Ty, FalseC, IndexC)
1947                                 : Builder.CreateGEP(Ty, FalseC, IndexC);
1948   return SelectInst::Create(Cond, NewTrueC, NewFalseC, "", nullptr, Sel);
1949 }
1950 
1951 Instruction *InstCombinerImpl::visitGEPOfGEP(GetElementPtrInst &GEP,
1952                                              GEPOperator *Src) {
1953   // Combine Indices - If the source pointer to this getelementptr instruction
1954   // is a getelementptr instruction with matching element type, combine the
1955   // indices of the two getelementptr instructions into a single instruction.
1956   if (Src->getResultElementType() != GEP.getSourceElementType())
1957     return nullptr;
1958 
1959   if (!shouldMergeGEPs(*cast<GEPOperator>(&GEP), *Src))
1960     return nullptr;
1961 
1962   if (Src->getNumOperands() == 2 && GEP.getNumOperands() == 2 &&
1963       Src->hasOneUse()) {
1964     Value *GO1 = GEP.getOperand(1);
1965     Value *SO1 = Src->getOperand(1);
1966 
1967     if (LI) {
1968       // Try to reassociate loop invariant GEP chains to enable LICM.
1969       if (Loop *L = LI->getLoopFor(GEP.getParent())) {
1970         // Reassociate the two GEPs if SO1 is variant in the loop and GO1 is
1971         // invariant: this breaks the dependence between GEPs and allows LICM
1972         // to hoist the invariant part out of the loop.
1973         if (L->isLoopInvariant(GO1) && !L->isLoopInvariant(SO1)) {
1974           // We have to be careful here.
1975           // We have something like:
1976           //  %src = getelementptr <ty>, <ty>* %base, <ty> %idx
1977           //  %gep = getelementptr <ty>, <ty>* %src, <ty> %idx2
1978           // If we just swap idx & idx2 then we could inadvertantly
1979           // change %src from a vector to a scalar, or vice versa.
1980           // Cases:
1981           //  1) %base a scalar & idx a scalar & idx2 a vector
1982           //      => Swapping idx & idx2 turns %src into a vector type.
1983           //  2) %base a scalar & idx a vector & idx2 a scalar
1984           //      => Swapping idx & idx2 turns %src in a scalar type
1985           //  3) %base, %idx, and %idx2 are scalars
1986           //      => %src & %gep are scalars
1987           //      => swapping idx & idx2 is safe
1988           //  4) %base a vector
1989           //      => %src is a vector
1990           //      => swapping idx & idx2 is safe.
1991           auto *SO0 = Src->getOperand(0);
1992           auto *SO0Ty = SO0->getType();
1993           if (!isa<VectorType>(GEP.getType()) || // case 3
1994               isa<VectorType>(SO0Ty)) { // case 4
1995             Src->setOperand(1, GO1);
1996             GEP.setOperand(1, SO1);
1997             return &GEP;
1998           } else {
1999             // Case 1 or 2
2000             // -- have to recreate %src & %gep
2001             // put NewSrc at same location as %src
2002             Builder.SetInsertPoint(cast<Instruction>(Src));
2003             Value *NewSrc = Builder.CreateGEP(
2004                 GEP.getSourceElementType(), SO0, GO1, Src->getName());
2005             // Propagate 'inbounds' if the new source was not constant-folded.
2006             if (auto *NewSrcGEPI = dyn_cast<GetElementPtrInst>(NewSrc))
2007               NewSrcGEPI->setIsInBounds(Src->isInBounds());
2008             GetElementPtrInst *NewGEP = GetElementPtrInst::Create(
2009                 GEP.getSourceElementType(), NewSrc, {SO1});
2010             NewGEP->setIsInBounds(GEP.isInBounds());
2011             return NewGEP;
2012           }
2013         }
2014       }
2015     }
2016   }
2017 
2018   // Note that if our source is a gep chain itself then we wait for that
2019   // chain to be resolved before we perform this transformation.  This
2020   // avoids us creating a TON of code in some cases.
2021   if (auto *SrcGEP = dyn_cast<GEPOperator>(Src->getOperand(0)))
2022     if (SrcGEP->getNumOperands() == 2 && shouldMergeGEPs(*Src, *SrcGEP))
2023       return nullptr;   // Wait until our source is folded to completion.
2024 
2025   SmallVector<Value*, 8> Indices;
2026 
2027   // Find out whether the last index in the source GEP is a sequential idx.
2028   bool EndsWithSequential = false;
2029   for (gep_type_iterator I = gep_type_begin(*Src), E = gep_type_end(*Src);
2030        I != E; ++I)
2031     EndsWithSequential = I.isSequential();
2032 
2033   // Can we combine the two pointer arithmetics offsets?
2034   if (EndsWithSequential) {
2035     // Replace: gep (gep %P, long B), long A, ...
2036     // With:    T = long A+B; gep %P, T, ...
2037     Value *SO1 = Src->getOperand(Src->getNumOperands()-1);
2038     Value *GO1 = GEP.getOperand(1);
2039 
2040     // If they aren't the same type, then the input hasn't been processed
2041     // by the loop above yet (which canonicalizes sequential index types to
2042     // intptr_t).  Just avoid transforming this until the input has been
2043     // normalized.
2044     if (SO1->getType() != GO1->getType())
2045       return nullptr;
2046 
2047     Value *Sum =
2048         SimplifyAddInst(GO1, SO1, false, false, SQ.getWithInstruction(&GEP));
2049     // Only do the combine when we are sure the cost after the
2050     // merge is never more than that before the merge.
2051     if (Sum == nullptr)
2052       return nullptr;
2053 
2054     // Update the GEP in place if possible.
2055     if (Src->getNumOperands() == 2) {
2056       GEP.setIsInBounds(isMergedGEPInBounds(*Src, *cast<GEPOperator>(&GEP)));
2057       replaceOperand(GEP, 0, Src->getOperand(0));
2058       replaceOperand(GEP, 1, Sum);
2059       return &GEP;
2060     }
2061     Indices.append(Src->op_begin()+1, Src->op_end()-1);
2062     Indices.push_back(Sum);
2063     Indices.append(GEP.op_begin()+2, GEP.op_end());
2064   } else if (isa<Constant>(*GEP.idx_begin()) &&
2065              cast<Constant>(*GEP.idx_begin())->isNullValue() &&
2066              Src->getNumOperands() != 1) {
2067     // Otherwise we can do the fold if the first index of the GEP is a zero
2068     Indices.append(Src->op_begin()+1, Src->op_end());
2069     Indices.append(GEP.idx_begin()+1, GEP.idx_end());
2070   }
2071 
2072   if (!Indices.empty())
2073     return isMergedGEPInBounds(*Src, *cast<GEPOperator>(&GEP))
2074                ? GetElementPtrInst::CreateInBounds(
2075                      Src->getSourceElementType(), Src->getOperand(0), Indices,
2076                      GEP.getName())
2077                : GetElementPtrInst::Create(Src->getSourceElementType(),
2078                                            Src->getOperand(0), Indices,
2079                                            GEP.getName());
2080 
2081   return nullptr;
2082 }
2083 
2084 // Note that we may have also stripped an address space cast in between.
2085 Instruction *InstCombinerImpl::visitGEPOfBitcast(BitCastInst *BCI,
2086                                                  GetElementPtrInst &GEP) {
2087   // With opaque pointers, there is no pointer element type we can use to
2088   // adjust the GEP type.
2089   PointerType *SrcType = cast<PointerType>(BCI->getSrcTy());
2090   if (SrcType->isOpaque())
2091     return nullptr;
2092 
2093   Type *GEPEltType = GEP.getSourceElementType();
2094   Type *SrcEltType = SrcType->getNonOpaquePointerElementType();
2095   Value *SrcOp = BCI->getOperand(0);
2096 
2097   // GEP directly using the source operand if this GEP is accessing an element
2098   // of a bitcasted pointer to vector or array of the same dimensions:
2099   // gep (bitcast <c x ty>* X to [c x ty]*), Y, Z --> gep X, Y, Z
2100   // gep (bitcast [c x ty]* X to <c x ty>*), Y, Z --> gep X, Y, Z
2101   auto areMatchingArrayAndVecTypes = [](Type *ArrTy, Type *VecTy,
2102                                         const DataLayout &DL) {
2103     auto *VecVTy = cast<FixedVectorType>(VecTy);
2104     return ArrTy->getArrayElementType() == VecVTy->getElementType() &&
2105            ArrTy->getArrayNumElements() == VecVTy->getNumElements() &&
2106            DL.getTypeAllocSize(ArrTy) == DL.getTypeAllocSize(VecTy);
2107   };
2108   if (GEP.getNumOperands() == 3 &&
2109       ((GEPEltType->isArrayTy() && isa<FixedVectorType>(SrcEltType) &&
2110         areMatchingArrayAndVecTypes(GEPEltType, SrcEltType, DL)) ||
2111        (isa<FixedVectorType>(GEPEltType) && SrcEltType->isArrayTy() &&
2112         areMatchingArrayAndVecTypes(SrcEltType, GEPEltType, DL)))) {
2113 
2114     // Create a new GEP here, as using `setOperand()` followed by
2115     // `setSourceElementType()` won't actually update the type of the
2116     // existing GEP Value. Causing issues if this Value is accessed when
2117     // constructing an AddrSpaceCastInst
2118     SmallVector<Value *, 8> Indices(GEP.indices());
2119     Value *NGEP = GEP.isInBounds()
2120                       ? Builder.CreateInBoundsGEP(SrcEltType, SrcOp, Indices)
2121                       : Builder.CreateGEP(SrcEltType, SrcOp, Indices);
2122     NGEP->takeName(&GEP);
2123 
2124     // Preserve GEP address space to satisfy users
2125     if (NGEP->getType()->getPointerAddressSpace() != GEP.getAddressSpace())
2126       return new AddrSpaceCastInst(NGEP, GEP.getType());
2127 
2128     return replaceInstUsesWith(GEP, NGEP);
2129   }
2130 
2131   // See if we can simplify:
2132   //   X = bitcast A* to B*
2133   //   Y = gep X, <...constant indices...>
2134   // into a gep of the original struct. This is important for SROA and alias
2135   // analysis of unions. If "A" is also a bitcast, wait for A/X to be merged.
2136   unsigned OffsetBits = DL.getIndexTypeSizeInBits(GEP.getType());
2137   APInt Offset(OffsetBits, 0);
2138 
2139   // If the bitcast argument is an allocation, The bitcast is for convertion
2140   // to actual type of allocation. Removing such bitcasts, results in having
2141   // GEPs with i8* base and pure byte offsets. That means GEP is not aware of
2142   // struct or array hierarchy.
2143   // By avoiding such GEPs, phi translation and MemoryDependencyAnalysis have
2144   // a better chance to succeed.
2145   if (!isa<BitCastInst>(SrcOp) && GEP.accumulateConstantOffset(DL, Offset) &&
2146       !isAllocationFn(SrcOp, &TLI)) {
2147     // If this GEP instruction doesn't move the pointer, just replace the GEP
2148     // with a bitcast of the real input to the dest type.
2149     if (!Offset) {
2150       // If the bitcast is of an allocation, and the allocation will be
2151       // converted to match the type of the cast, don't touch this.
2152       if (isa<AllocaInst>(SrcOp)) {
2153         // See if the bitcast simplifies, if so, don't nuke this GEP yet.
2154         if (Instruction *I = visitBitCast(*BCI)) {
2155           if (I != BCI) {
2156             I->takeName(BCI);
2157             BCI->getParent()->getInstList().insert(BCI->getIterator(), I);
2158             replaceInstUsesWith(*BCI, I);
2159           }
2160           return &GEP;
2161         }
2162       }
2163 
2164       if (SrcType->getPointerAddressSpace() != GEP.getAddressSpace())
2165         return new AddrSpaceCastInst(SrcOp, GEP.getType());
2166       return new BitCastInst(SrcOp, GEP.getType());
2167     }
2168 
2169     // Otherwise, if the offset is non-zero, we need to find out if there is a
2170     // field at Offset in 'A's type.  If so, we can pull the cast through the
2171     // GEP.
2172     SmallVector<Value*, 8> NewIndices;
2173     if (findElementAtOffset(SrcType, Offset.getSExtValue(), NewIndices, DL)) {
2174       Value *NGEP =
2175           GEP.isInBounds()
2176               ? Builder.CreateInBoundsGEP(SrcEltType, SrcOp, NewIndices)
2177               : Builder.CreateGEP(SrcEltType, SrcOp, NewIndices);
2178 
2179       if (NGEP->getType() == GEP.getType())
2180         return replaceInstUsesWith(GEP, NGEP);
2181       NGEP->takeName(&GEP);
2182 
2183       if (NGEP->getType()->getPointerAddressSpace() != GEP.getAddressSpace())
2184         return new AddrSpaceCastInst(NGEP, GEP.getType());
2185       return new BitCastInst(NGEP, GEP.getType());
2186     }
2187   }
2188 
2189   return nullptr;
2190 }
2191 
2192 Instruction *InstCombinerImpl::visitGetElementPtrInst(GetElementPtrInst &GEP) {
2193   Value *PtrOp = GEP.getOperand(0);
2194   SmallVector<Value *, 8> Indices(GEP.indices());
2195   Type *GEPType = GEP.getType();
2196   Type *GEPEltType = GEP.getSourceElementType();
2197   bool IsGEPSrcEleScalable = isa<ScalableVectorType>(GEPEltType);
2198   if (Value *V = SimplifyGEPInst(GEPEltType, PtrOp, Indices, GEP.isInBounds(),
2199                                  SQ.getWithInstruction(&GEP)))
2200     return replaceInstUsesWith(GEP, V);
2201 
2202   // For vector geps, use the generic demanded vector support.
2203   // Skip if GEP return type is scalable. The number of elements is unknown at
2204   // compile-time.
2205   if (auto *GEPFVTy = dyn_cast<FixedVectorType>(GEPType)) {
2206     auto VWidth = GEPFVTy->getNumElements();
2207     APInt UndefElts(VWidth, 0);
2208     APInt AllOnesEltMask(APInt::getAllOnes(VWidth));
2209     if (Value *V = SimplifyDemandedVectorElts(&GEP, AllOnesEltMask,
2210                                               UndefElts)) {
2211       if (V != &GEP)
2212         return replaceInstUsesWith(GEP, V);
2213       return &GEP;
2214     }
2215 
2216     // TODO: 1) Scalarize splat operands, 2) scalarize entire instruction if
2217     // possible (decide on canonical form for pointer broadcast), 3) exploit
2218     // undef elements to decrease demanded bits
2219   }
2220 
2221   // Eliminate unneeded casts for indices, and replace indices which displace
2222   // by multiples of a zero size type with zero.
2223   bool MadeChange = false;
2224 
2225   // Index width may not be the same width as pointer width.
2226   // Data layout chooses the right type based on supported integer types.
2227   Type *NewScalarIndexTy =
2228       DL.getIndexType(GEP.getPointerOperandType()->getScalarType());
2229 
2230   gep_type_iterator GTI = gep_type_begin(GEP);
2231   for (User::op_iterator I = GEP.op_begin() + 1, E = GEP.op_end(); I != E;
2232        ++I, ++GTI) {
2233     // Skip indices into struct types.
2234     if (GTI.isStruct())
2235       continue;
2236 
2237     Type *IndexTy = (*I)->getType();
2238     Type *NewIndexType =
2239         IndexTy->isVectorTy()
2240             ? VectorType::get(NewScalarIndexTy,
2241                               cast<VectorType>(IndexTy)->getElementCount())
2242             : NewScalarIndexTy;
2243 
2244     // If the element type has zero size then any index over it is equivalent
2245     // to an index of zero, so replace it with zero if it is not zero already.
2246     Type *EltTy = GTI.getIndexedType();
2247     if (EltTy->isSized() && DL.getTypeAllocSize(EltTy).isZero())
2248       if (!isa<Constant>(*I) || !match(I->get(), m_Zero())) {
2249         *I = Constant::getNullValue(NewIndexType);
2250         MadeChange = true;
2251       }
2252 
2253     if (IndexTy != NewIndexType) {
2254       // If we are using a wider index than needed for this platform, shrink
2255       // it to what we need.  If narrower, sign-extend it to what we need.
2256       // This explicit cast can make subsequent optimizations more obvious.
2257       *I = Builder.CreateIntCast(*I, NewIndexType, true);
2258       MadeChange = true;
2259     }
2260   }
2261   if (MadeChange)
2262     return &GEP;
2263 
2264   // Check to see if the inputs to the PHI node are getelementptr instructions.
2265   if (auto *PN = dyn_cast<PHINode>(PtrOp)) {
2266     auto *Op1 = dyn_cast<GetElementPtrInst>(PN->getOperand(0));
2267     if (!Op1)
2268       return nullptr;
2269 
2270     // Don't fold a GEP into itself through a PHI node. This can only happen
2271     // through the back-edge of a loop. Folding a GEP into itself means that
2272     // the value of the previous iteration needs to be stored in the meantime,
2273     // thus requiring an additional register variable to be live, but not
2274     // actually achieving anything (the GEP still needs to be executed once per
2275     // loop iteration).
2276     if (Op1 == &GEP)
2277       return nullptr;
2278 
2279     int DI = -1;
2280 
2281     for (auto I = PN->op_begin()+1, E = PN->op_end(); I !=E; ++I) {
2282       auto *Op2 = dyn_cast<GetElementPtrInst>(*I);
2283       if (!Op2 || Op1->getNumOperands() != Op2->getNumOperands() ||
2284           Op1->getSourceElementType() != Op2->getSourceElementType())
2285         return nullptr;
2286 
2287       // As for Op1 above, don't try to fold a GEP into itself.
2288       if (Op2 == &GEP)
2289         return nullptr;
2290 
2291       // Keep track of the type as we walk the GEP.
2292       Type *CurTy = nullptr;
2293 
2294       for (unsigned J = 0, F = Op1->getNumOperands(); J != F; ++J) {
2295         if (Op1->getOperand(J)->getType() != Op2->getOperand(J)->getType())
2296           return nullptr;
2297 
2298         if (Op1->getOperand(J) != Op2->getOperand(J)) {
2299           if (DI == -1) {
2300             // We have not seen any differences yet in the GEPs feeding the
2301             // PHI yet, so we record this one if it is allowed to be a
2302             // variable.
2303 
2304             // The first two arguments can vary for any GEP, the rest have to be
2305             // static for struct slots
2306             if (J > 1) {
2307               assert(CurTy && "No current type?");
2308               if (CurTy->isStructTy())
2309                 return nullptr;
2310             }
2311 
2312             DI = J;
2313           } else {
2314             // The GEP is different by more than one input. While this could be
2315             // extended to support GEPs that vary by more than one variable it
2316             // doesn't make sense since it greatly increases the complexity and
2317             // would result in an R+R+R addressing mode which no backend
2318             // directly supports and would need to be broken into several
2319             // simpler instructions anyway.
2320             return nullptr;
2321           }
2322         }
2323 
2324         // Sink down a layer of the type for the next iteration.
2325         if (J > 0) {
2326           if (J == 1) {
2327             CurTy = Op1->getSourceElementType();
2328           } else {
2329             CurTy =
2330                 GetElementPtrInst::getTypeAtIndex(CurTy, Op1->getOperand(J));
2331           }
2332         }
2333       }
2334     }
2335 
2336     // If not all GEPs are identical we'll have to create a new PHI node.
2337     // Check that the old PHI node has only one use so that it will get
2338     // removed.
2339     if (DI != -1 && !PN->hasOneUse())
2340       return nullptr;
2341 
2342     auto *NewGEP = cast<GetElementPtrInst>(Op1->clone());
2343     if (DI == -1) {
2344       // All the GEPs feeding the PHI are identical. Clone one down into our
2345       // BB so that it can be merged with the current GEP.
2346     } else {
2347       // All the GEPs feeding the PHI differ at a single offset. Clone a GEP
2348       // into the current block so it can be merged, and create a new PHI to
2349       // set that index.
2350       PHINode *NewPN;
2351       {
2352         IRBuilderBase::InsertPointGuard Guard(Builder);
2353         Builder.SetInsertPoint(PN);
2354         NewPN = Builder.CreatePHI(Op1->getOperand(DI)->getType(),
2355                                   PN->getNumOperands());
2356       }
2357 
2358       for (auto &I : PN->operands())
2359         NewPN->addIncoming(cast<GEPOperator>(I)->getOperand(DI),
2360                            PN->getIncomingBlock(I));
2361 
2362       NewGEP->setOperand(DI, NewPN);
2363     }
2364 
2365     GEP.getParent()->getInstList().insert(
2366         GEP.getParent()->getFirstInsertionPt(), NewGEP);
2367     replaceOperand(GEP, 0, NewGEP);
2368     PtrOp = NewGEP;
2369   }
2370 
2371   if (auto *Src = dyn_cast<GEPOperator>(PtrOp))
2372     if (Instruction *I = visitGEPOfGEP(GEP, Src))
2373       return I;
2374 
2375   // Skip if GEP source element type is scalable. The type alloc size is unknown
2376   // at compile-time.
2377   if (GEP.getNumIndices() == 1 && !IsGEPSrcEleScalable) {
2378     unsigned AS = GEP.getPointerAddressSpace();
2379     if (GEP.getOperand(1)->getType()->getScalarSizeInBits() ==
2380         DL.getIndexSizeInBits(AS)) {
2381       uint64_t TyAllocSize = DL.getTypeAllocSize(GEPEltType).getFixedSize();
2382 
2383       bool Matched = false;
2384       uint64_t C;
2385       Value *V = nullptr;
2386       if (TyAllocSize == 1) {
2387         V = GEP.getOperand(1);
2388         Matched = true;
2389       } else if (match(GEP.getOperand(1),
2390                        m_AShr(m_Value(V), m_ConstantInt(C)))) {
2391         if (TyAllocSize == 1ULL << C)
2392           Matched = true;
2393       } else if (match(GEP.getOperand(1),
2394                        m_SDiv(m_Value(V), m_ConstantInt(C)))) {
2395         if (TyAllocSize == C)
2396           Matched = true;
2397       }
2398 
2399       // Canonicalize (gep i8* X, (ptrtoint Y)-(ptrtoint X)) to (bitcast Y), but
2400       // only if both point to the same underlying object (otherwise provenance
2401       // is not necessarily retained).
2402       Value *Y;
2403       Value *X = GEP.getOperand(0);
2404       if (Matched &&
2405           match(V, m_Sub(m_PtrToInt(m_Value(Y)), m_PtrToInt(m_Specific(X)))) &&
2406           getUnderlyingObject(X) == getUnderlyingObject(Y))
2407         return CastInst::CreatePointerBitCastOrAddrSpaceCast(Y, GEPType);
2408     }
2409   }
2410 
2411   // We do not handle pointer-vector geps here.
2412   if (GEPType->isVectorTy())
2413     return nullptr;
2414 
2415   // Handle gep(bitcast x) and gep(gep x, 0, 0, 0).
2416   Value *StrippedPtr = PtrOp->stripPointerCasts();
2417   PointerType *StrippedPtrTy = cast<PointerType>(StrippedPtr->getType());
2418 
2419   // TODO: The basic approach of these folds is not compatible with opaque
2420   // pointers, because we can't use bitcasts as a hint for a desirable GEP
2421   // type. Instead, we should perform canonicalization directly on the GEP
2422   // type. For now, skip these.
2423   if (StrippedPtr != PtrOp && !StrippedPtrTy->isOpaque()) {
2424     bool HasZeroPointerIndex = false;
2425     Type *StrippedPtrEltTy = StrippedPtrTy->getNonOpaquePointerElementType();
2426 
2427     if (auto *C = dyn_cast<ConstantInt>(GEP.getOperand(1)))
2428       HasZeroPointerIndex = C->isZero();
2429 
2430     // Transform: GEP (bitcast [10 x i8]* X to [0 x i8]*), i32 0, ...
2431     // into     : GEP [10 x i8]* X, i32 0, ...
2432     //
2433     // Likewise, transform: GEP (bitcast i8* X to [0 x i8]*), i32 0, ...
2434     //           into     : GEP i8* X, ...
2435     //
2436     // This occurs when the program declares an array extern like "int X[];"
2437     if (HasZeroPointerIndex) {
2438       if (auto *CATy = dyn_cast<ArrayType>(GEPEltType)) {
2439         // GEP (bitcast i8* X to [0 x i8]*), i32 0, ... ?
2440         if (CATy->getElementType() == StrippedPtrEltTy) {
2441           // -> GEP i8* X, ...
2442           SmallVector<Value *, 8> Idx(drop_begin(GEP.indices()));
2443           GetElementPtrInst *Res = GetElementPtrInst::Create(
2444               StrippedPtrEltTy, StrippedPtr, Idx, GEP.getName());
2445           Res->setIsInBounds(GEP.isInBounds());
2446           if (StrippedPtrTy->getAddressSpace() == GEP.getAddressSpace())
2447             return Res;
2448           // Insert Res, and create an addrspacecast.
2449           // e.g.,
2450           // GEP (addrspacecast i8 addrspace(1)* X to [0 x i8]*), i32 0, ...
2451           // ->
2452           // %0 = GEP i8 addrspace(1)* X, ...
2453           // addrspacecast i8 addrspace(1)* %0 to i8*
2454           return new AddrSpaceCastInst(Builder.Insert(Res), GEPType);
2455         }
2456 
2457         if (auto *XATy = dyn_cast<ArrayType>(StrippedPtrEltTy)) {
2458           // GEP (bitcast [10 x i8]* X to [0 x i8]*), i32 0, ... ?
2459           if (CATy->getElementType() == XATy->getElementType()) {
2460             // -> GEP [10 x i8]* X, i32 0, ...
2461             // At this point, we know that the cast source type is a pointer
2462             // to an array of the same type as the destination pointer
2463             // array.  Because the array type is never stepped over (there
2464             // is a leading zero) we can fold the cast into this GEP.
2465             if (StrippedPtrTy->getAddressSpace() == GEP.getAddressSpace()) {
2466               GEP.setSourceElementType(XATy);
2467               return replaceOperand(GEP, 0, StrippedPtr);
2468             }
2469             // Cannot replace the base pointer directly because StrippedPtr's
2470             // address space is different. Instead, create a new GEP followed by
2471             // an addrspacecast.
2472             // e.g.,
2473             // GEP (addrspacecast [10 x i8] addrspace(1)* X to [0 x i8]*),
2474             //   i32 0, ...
2475             // ->
2476             // %0 = GEP [10 x i8] addrspace(1)* X, ...
2477             // addrspacecast i8 addrspace(1)* %0 to i8*
2478             SmallVector<Value *, 8> Idx(GEP.indices());
2479             Value *NewGEP =
2480                 GEP.isInBounds()
2481                     ? Builder.CreateInBoundsGEP(StrippedPtrEltTy, StrippedPtr,
2482                                                 Idx, GEP.getName())
2483                     : Builder.CreateGEP(StrippedPtrEltTy, StrippedPtr, Idx,
2484                                         GEP.getName());
2485             return new AddrSpaceCastInst(NewGEP, GEPType);
2486           }
2487         }
2488       }
2489     } else if (GEP.getNumOperands() == 2 && !IsGEPSrcEleScalable) {
2490       // Skip if GEP source element type is scalable. The type alloc size is
2491       // unknown at compile-time.
2492       // Transform things like: %t = getelementptr i32*
2493       // bitcast ([2 x i32]* %str to i32*), i32 %V into:  %t1 = getelementptr [2
2494       // x i32]* %str, i32 0, i32 %V; bitcast
2495       if (StrippedPtrEltTy->isArrayTy() &&
2496           DL.getTypeAllocSize(StrippedPtrEltTy->getArrayElementType()) ==
2497               DL.getTypeAllocSize(GEPEltType)) {
2498         Type *IdxType = DL.getIndexType(GEPType);
2499         Value *Idx[2] = { Constant::getNullValue(IdxType), GEP.getOperand(1) };
2500         Value *NewGEP =
2501             GEP.isInBounds()
2502                 ? Builder.CreateInBoundsGEP(StrippedPtrEltTy, StrippedPtr, Idx,
2503                                             GEP.getName())
2504                 : Builder.CreateGEP(StrippedPtrEltTy, StrippedPtr, Idx,
2505                                     GEP.getName());
2506 
2507         // V and GEP are both pointer types --> BitCast
2508         return CastInst::CreatePointerBitCastOrAddrSpaceCast(NewGEP, GEPType);
2509       }
2510 
2511       // Transform things like:
2512       // %V = mul i64 %N, 4
2513       // %t = getelementptr i8* bitcast (i32* %arr to i8*), i32 %V
2514       // into:  %t1 = getelementptr i32* %arr, i32 %N; bitcast
2515       if (GEPEltType->isSized() && StrippedPtrEltTy->isSized()) {
2516         // Check that changing the type amounts to dividing the index by a scale
2517         // factor.
2518         uint64_t ResSize = DL.getTypeAllocSize(GEPEltType).getFixedSize();
2519         uint64_t SrcSize = DL.getTypeAllocSize(StrippedPtrEltTy).getFixedSize();
2520         if (ResSize && SrcSize % ResSize == 0) {
2521           Value *Idx = GEP.getOperand(1);
2522           unsigned BitWidth = Idx->getType()->getPrimitiveSizeInBits();
2523           uint64_t Scale = SrcSize / ResSize;
2524 
2525           // Earlier transforms ensure that the index has the right type
2526           // according to Data Layout, which considerably simplifies the
2527           // logic by eliminating implicit casts.
2528           assert(Idx->getType() == DL.getIndexType(GEPType) &&
2529                  "Index type does not match the Data Layout preferences");
2530 
2531           bool NSW;
2532           if (Value *NewIdx = Descale(Idx, APInt(BitWidth, Scale), NSW)) {
2533             // Successfully decomposed Idx as NewIdx * Scale, form a new GEP.
2534             // If the multiplication NewIdx * Scale may overflow then the new
2535             // GEP may not be "inbounds".
2536             Value *NewGEP =
2537                 GEP.isInBounds() && NSW
2538                     ? Builder.CreateInBoundsGEP(StrippedPtrEltTy, StrippedPtr,
2539                                                 NewIdx, GEP.getName())
2540                     : Builder.CreateGEP(StrippedPtrEltTy, StrippedPtr, NewIdx,
2541                                         GEP.getName());
2542 
2543             // The NewGEP must be pointer typed, so must the old one -> BitCast
2544             return CastInst::CreatePointerBitCastOrAddrSpaceCast(NewGEP,
2545                                                                  GEPType);
2546           }
2547         }
2548       }
2549 
2550       // Similarly, transform things like:
2551       // getelementptr i8* bitcast ([100 x double]* X to i8*), i32 %tmp
2552       //   (where tmp = 8*tmp2) into:
2553       // getelementptr [100 x double]* %arr, i32 0, i32 %tmp2; bitcast
2554       if (GEPEltType->isSized() && StrippedPtrEltTy->isSized() &&
2555           StrippedPtrEltTy->isArrayTy()) {
2556         // Check that changing to the array element type amounts to dividing the
2557         // index by a scale factor.
2558         uint64_t ResSize = DL.getTypeAllocSize(GEPEltType).getFixedSize();
2559         uint64_t ArrayEltSize =
2560             DL.getTypeAllocSize(StrippedPtrEltTy->getArrayElementType())
2561                 .getFixedSize();
2562         if (ResSize && ArrayEltSize % ResSize == 0) {
2563           Value *Idx = GEP.getOperand(1);
2564           unsigned BitWidth = Idx->getType()->getPrimitiveSizeInBits();
2565           uint64_t Scale = ArrayEltSize / ResSize;
2566 
2567           // Earlier transforms ensure that the index has the right type
2568           // according to the Data Layout, which considerably simplifies
2569           // the logic by eliminating implicit casts.
2570           assert(Idx->getType() == DL.getIndexType(GEPType) &&
2571                  "Index type does not match the Data Layout preferences");
2572 
2573           bool NSW;
2574           if (Value *NewIdx = Descale(Idx, APInt(BitWidth, Scale), NSW)) {
2575             // Successfully decomposed Idx as NewIdx * Scale, form a new GEP.
2576             // If the multiplication NewIdx * Scale may overflow then the new
2577             // GEP may not be "inbounds".
2578             Type *IndTy = DL.getIndexType(GEPType);
2579             Value *Off[2] = {Constant::getNullValue(IndTy), NewIdx};
2580 
2581             Value *NewGEP =
2582                 GEP.isInBounds() && NSW
2583                     ? Builder.CreateInBoundsGEP(StrippedPtrEltTy, StrippedPtr,
2584                                                 Off, GEP.getName())
2585                     : Builder.CreateGEP(StrippedPtrEltTy, StrippedPtr, Off,
2586                                         GEP.getName());
2587             // The NewGEP must be pointer typed, so must the old one -> BitCast
2588             return CastInst::CreatePointerBitCastOrAddrSpaceCast(NewGEP,
2589                                                                  GEPType);
2590           }
2591         }
2592       }
2593     }
2594   }
2595 
2596   // addrspacecast between types is canonicalized as a bitcast, then an
2597   // addrspacecast. To take advantage of the below bitcast + struct GEP, look
2598   // through the addrspacecast.
2599   Value *ASCStrippedPtrOp = PtrOp;
2600   if (auto *ASC = dyn_cast<AddrSpaceCastInst>(PtrOp)) {
2601     //   X = bitcast A addrspace(1)* to B addrspace(1)*
2602     //   Y = addrspacecast A addrspace(1)* to B addrspace(2)*
2603     //   Z = gep Y, <...constant indices...>
2604     // Into an addrspacecasted GEP of the struct.
2605     if (auto *BC = dyn_cast<BitCastInst>(ASC->getOperand(0)))
2606       ASCStrippedPtrOp = BC;
2607   }
2608 
2609   if (auto *BCI = dyn_cast<BitCastInst>(ASCStrippedPtrOp))
2610     if (Instruction *I = visitGEPOfBitcast(BCI, GEP))
2611       return I;
2612 
2613   if (!GEP.isInBounds()) {
2614     unsigned IdxWidth =
2615         DL.getIndexSizeInBits(PtrOp->getType()->getPointerAddressSpace());
2616     APInt BasePtrOffset(IdxWidth, 0);
2617     Value *UnderlyingPtrOp =
2618             PtrOp->stripAndAccumulateInBoundsConstantOffsets(DL,
2619                                                              BasePtrOffset);
2620     if (auto *AI = dyn_cast<AllocaInst>(UnderlyingPtrOp)) {
2621       if (GEP.accumulateConstantOffset(DL, BasePtrOffset) &&
2622           BasePtrOffset.isNonNegative()) {
2623         APInt AllocSize(
2624             IdxWidth,
2625             DL.getTypeAllocSize(AI->getAllocatedType()).getKnownMinSize());
2626         if (BasePtrOffset.ule(AllocSize)) {
2627           return GetElementPtrInst::CreateInBounds(
2628               GEP.getSourceElementType(), PtrOp, Indices, GEP.getName());
2629         }
2630       }
2631     }
2632   }
2633 
2634   if (Instruction *R = foldSelectGEP(GEP, Builder))
2635     return R;
2636 
2637   return nullptr;
2638 }
2639 
2640 static bool isNeverEqualToUnescapedAlloc(Value *V, const TargetLibraryInfo &TLI,
2641                                          Instruction *AI) {
2642   if (isa<ConstantPointerNull>(V))
2643     return true;
2644   if (auto *LI = dyn_cast<LoadInst>(V))
2645     return isa<GlobalVariable>(LI->getPointerOperand());
2646   // Two distinct allocations will never be equal.
2647   return isAllocLikeFn(V, &TLI) && V != AI;
2648 }
2649 
2650 /// Given a call CB which uses an address UsedV, return true if we can prove the
2651 /// call's only possible effect is storing to V.
2652 static bool isRemovableWrite(CallBase &CB, Value *UsedV,
2653                              const TargetLibraryInfo &TLI) {
2654   if (!CB.use_empty())
2655     // TODO: add recursion if returned attribute is present
2656     return false;
2657 
2658   if (CB.isTerminator())
2659     // TODO: remove implementation restriction
2660     return false;
2661 
2662   if (!CB.willReturn() || !CB.doesNotThrow())
2663     return false;
2664 
2665   // If the only possible side effect of the call is writing to the alloca,
2666   // and the result isn't used, we can safely remove any reads implied by the
2667   // call including those which might read the alloca itself.
2668   Optional<MemoryLocation> Dest = MemoryLocation::getForDest(&CB, TLI);
2669   return Dest && Dest->Ptr == UsedV;
2670 }
2671 
2672 static bool isAllocSiteRemovable(Instruction *AI,
2673                                  SmallVectorImpl<WeakTrackingVH> &Users,
2674                                  const TargetLibraryInfo &TLI) {
2675   SmallVector<Instruction*, 4> Worklist;
2676   Worklist.push_back(AI);
2677 
2678   do {
2679     Instruction *PI = Worklist.pop_back_val();
2680     for (User *U : PI->users()) {
2681       Instruction *I = cast<Instruction>(U);
2682       switch (I->getOpcode()) {
2683       default:
2684         // Give up the moment we see something we can't handle.
2685         return false;
2686 
2687       case Instruction::AddrSpaceCast:
2688       case Instruction::BitCast:
2689       case Instruction::GetElementPtr:
2690         Users.emplace_back(I);
2691         Worklist.push_back(I);
2692         continue;
2693 
2694       case Instruction::ICmp: {
2695         ICmpInst *ICI = cast<ICmpInst>(I);
2696         // We can fold eq/ne comparisons with null to false/true, respectively.
2697         // We also fold comparisons in some conditions provided the alloc has
2698         // not escaped (see isNeverEqualToUnescapedAlloc).
2699         if (!ICI->isEquality())
2700           return false;
2701         unsigned OtherIndex = (ICI->getOperand(0) == PI) ? 1 : 0;
2702         if (!isNeverEqualToUnescapedAlloc(ICI->getOperand(OtherIndex), TLI, AI))
2703           return false;
2704         Users.emplace_back(I);
2705         continue;
2706       }
2707 
2708       case Instruction::Call:
2709         // Ignore no-op and store intrinsics.
2710         if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) {
2711           switch (II->getIntrinsicID()) {
2712           default:
2713             return false;
2714 
2715           case Intrinsic::memmove:
2716           case Intrinsic::memcpy:
2717           case Intrinsic::memset: {
2718             MemIntrinsic *MI = cast<MemIntrinsic>(II);
2719             if (MI->isVolatile() || MI->getRawDest() != PI)
2720               return false;
2721             LLVM_FALLTHROUGH;
2722           }
2723           case Intrinsic::assume:
2724           case Intrinsic::invariant_start:
2725           case Intrinsic::invariant_end:
2726           case Intrinsic::lifetime_start:
2727           case Intrinsic::lifetime_end:
2728           case Intrinsic::objectsize:
2729             Users.emplace_back(I);
2730             continue;
2731           case Intrinsic::launder_invariant_group:
2732           case Intrinsic::strip_invariant_group:
2733             Users.emplace_back(I);
2734             Worklist.push_back(I);
2735             continue;
2736           }
2737         }
2738 
2739         if (isRemovableWrite(*cast<CallBase>(I), PI, TLI)) {
2740           Users.emplace_back(I);
2741           continue;
2742         }
2743 
2744         if (isFreeCall(I, &TLI)) {
2745           Users.emplace_back(I);
2746           continue;
2747         }
2748 
2749         if (isReallocLikeFn(I, &TLI)) {
2750           Users.emplace_back(I);
2751           Worklist.push_back(I);
2752           continue;
2753         }
2754 
2755         return false;
2756 
2757       case Instruction::Store: {
2758         StoreInst *SI = cast<StoreInst>(I);
2759         if (SI->isVolatile() || SI->getPointerOperand() != PI)
2760           return false;
2761         Users.emplace_back(I);
2762         continue;
2763       }
2764       }
2765       llvm_unreachable("missing a return?");
2766     }
2767   } while (!Worklist.empty());
2768   return true;
2769 }
2770 
2771 Instruction *InstCombinerImpl::visitAllocSite(Instruction &MI) {
2772   assert(isa<AllocaInst>(MI) || isAllocRemovable(&cast<CallBase>(MI), &TLI));
2773 
2774   // If we have a malloc call which is only used in any amount of comparisons to
2775   // null and free calls, delete the calls and replace the comparisons with true
2776   // or false as appropriate.
2777 
2778   // This is based on the principle that we can substitute our own allocation
2779   // function (which will never return null) rather than knowledge of the
2780   // specific function being called. In some sense this can change the permitted
2781   // outputs of a program (when we convert a malloc to an alloca, the fact that
2782   // the allocation is now on the stack is potentially visible, for example),
2783   // but we believe in a permissible manner.
2784   SmallVector<WeakTrackingVH, 64> Users;
2785 
2786   // If we are removing an alloca with a dbg.declare, insert dbg.value calls
2787   // before each store.
2788   SmallVector<DbgVariableIntrinsic *, 8> DVIs;
2789   std::unique_ptr<DIBuilder> DIB;
2790   if (isa<AllocaInst>(MI)) {
2791     findDbgUsers(DVIs, &MI);
2792     DIB.reset(new DIBuilder(*MI.getModule(), /*AllowUnresolved=*/false));
2793   }
2794 
2795   if (isAllocSiteRemovable(&MI, Users, TLI)) {
2796     for (unsigned i = 0, e = Users.size(); i != e; ++i) {
2797       // Lowering all @llvm.objectsize calls first because they may
2798       // use a bitcast/GEP of the alloca we are removing.
2799       if (!Users[i])
2800        continue;
2801 
2802       Instruction *I = cast<Instruction>(&*Users[i]);
2803 
2804       if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) {
2805         if (II->getIntrinsicID() == Intrinsic::objectsize) {
2806           Value *Result =
2807               lowerObjectSizeCall(II, DL, &TLI, /*MustSucceed=*/true);
2808           replaceInstUsesWith(*I, Result);
2809           eraseInstFromFunction(*I);
2810           Users[i] = nullptr; // Skip examining in the next loop.
2811         }
2812       }
2813     }
2814     for (unsigned i = 0, e = Users.size(); i != e; ++i) {
2815       if (!Users[i])
2816         continue;
2817 
2818       Instruction *I = cast<Instruction>(&*Users[i]);
2819 
2820       if (ICmpInst *C = dyn_cast<ICmpInst>(I)) {
2821         replaceInstUsesWith(*C,
2822                             ConstantInt::get(Type::getInt1Ty(C->getContext()),
2823                                              C->isFalseWhenEqual()));
2824       } else if (auto *SI = dyn_cast<StoreInst>(I)) {
2825         for (auto *DVI : DVIs)
2826           if (DVI->isAddressOfVariable())
2827             ConvertDebugDeclareToDebugValue(DVI, SI, *DIB);
2828       } else {
2829         // Casts, GEP, or anything else: we're about to delete this instruction,
2830         // so it can not have any valid uses.
2831         replaceInstUsesWith(*I, PoisonValue::get(I->getType()));
2832       }
2833       eraseInstFromFunction(*I);
2834     }
2835 
2836     if (InvokeInst *II = dyn_cast<InvokeInst>(&MI)) {
2837       // Replace invoke with a NOP intrinsic to maintain the original CFG
2838       Module *M = II->getModule();
2839       Function *F = Intrinsic::getDeclaration(M, Intrinsic::donothing);
2840       InvokeInst::Create(F, II->getNormalDest(), II->getUnwindDest(),
2841                          None, "", II->getParent());
2842     }
2843 
2844     // Remove debug intrinsics which describe the value contained within the
2845     // alloca. In addition to removing dbg.{declare,addr} which simply point to
2846     // the alloca, remove dbg.value(<alloca>, ..., DW_OP_deref)'s as well, e.g.:
2847     //
2848     // ```
2849     //   define void @foo(i32 %0) {
2850     //     %a = alloca i32                              ; Deleted.
2851     //     store i32 %0, i32* %a
2852     //     dbg.value(i32 %0, "arg0")                    ; Not deleted.
2853     //     dbg.value(i32* %a, "arg0", DW_OP_deref)      ; Deleted.
2854     //     call void @trivially_inlinable_no_op(i32* %a)
2855     //     ret void
2856     //  }
2857     // ```
2858     //
2859     // This may not be required if we stop describing the contents of allocas
2860     // using dbg.value(<alloca>, ..., DW_OP_deref), but we currently do this in
2861     // the LowerDbgDeclare utility.
2862     //
2863     // If there is a dead store to `%a` in @trivially_inlinable_no_op, the
2864     // "arg0" dbg.value may be stale after the call. However, failing to remove
2865     // the DW_OP_deref dbg.value causes large gaps in location coverage.
2866     for (auto *DVI : DVIs)
2867       if (DVI->isAddressOfVariable() || DVI->getExpression()->startsWithDeref())
2868         DVI->eraseFromParent();
2869 
2870     return eraseInstFromFunction(MI);
2871   }
2872   return nullptr;
2873 }
2874 
2875 /// Move the call to free before a NULL test.
2876 ///
2877 /// Check if this free is accessed after its argument has been test
2878 /// against NULL (property 0).
2879 /// If yes, it is legal to move this call in its predecessor block.
2880 ///
2881 /// The move is performed only if the block containing the call to free
2882 /// will be removed, i.e.:
2883 /// 1. it has only one predecessor P, and P has two successors
2884 /// 2. it contains the call, noops, and an unconditional branch
2885 /// 3. its successor is the same as its predecessor's successor
2886 ///
2887 /// The profitability is out-of concern here and this function should
2888 /// be called only if the caller knows this transformation would be
2889 /// profitable (e.g., for code size).
2890 static Instruction *tryToMoveFreeBeforeNullTest(CallInst &FI,
2891                                                 const DataLayout &DL) {
2892   Value *Op = FI.getArgOperand(0);
2893   BasicBlock *FreeInstrBB = FI.getParent();
2894   BasicBlock *PredBB = FreeInstrBB->getSinglePredecessor();
2895 
2896   // Validate part of constraint #1: Only one predecessor
2897   // FIXME: We can extend the number of predecessor, but in that case, we
2898   //        would duplicate the call to free in each predecessor and it may
2899   //        not be profitable even for code size.
2900   if (!PredBB)
2901     return nullptr;
2902 
2903   // Validate constraint #2: Does this block contains only the call to
2904   //                         free, noops, and an unconditional branch?
2905   BasicBlock *SuccBB;
2906   Instruction *FreeInstrBBTerminator = FreeInstrBB->getTerminator();
2907   if (!match(FreeInstrBBTerminator, m_UnconditionalBr(SuccBB)))
2908     return nullptr;
2909 
2910   // If there are only 2 instructions in the block, at this point,
2911   // this is the call to free and unconditional.
2912   // If there are more than 2 instructions, check that they are noops
2913   // i.e., they won't hurt the performance of the generated code.
2914   if (FreeInstrBB->size() != 2) {
2915     for (const Instruction &Inst : FreeInstrBB->instructionsWithoutDebug()) {
2916       if (&Inst == &FI || &Inst == FreeInstrBBTerminator)
2917         continue;
2918       auto *Cast = dyn_cast<CastInst>(&Inst);
2919       if (!Cast || !Cast->isNoopCast(DL))
2920         return nullptr;
2921     }
2922   }
2923   // Validate the rest of constraint #1 by matching on the pred branch.
2924   Instruction *TI = PredBB->getTerminator();
2925   BasicBlock *TrueBB, *FalseBB;
2926   ICmpInst::Predicate Pred;
2927   if (!match(TI, m_Br(m_ICmp(Pred,
2928                              m_CombineOr(m_Specific(Op),
2929                                          m_Specific(Op->stripPointerCasts())),
2930                              m_Zero()),
2931                       TrueBB, FalseBB)))
2932     return nullptr;
2933   if (Pred != ICmpInst::ICMP_EQ && Pred != ICmpInst::ICMP_NE)
2934     return nullptr;
2935 
2936   // Validate constraint #3: Ensure the null case just falls through.
2937   if (SuccBB != (Pred == ICmpInst::ICMP_EQ ? TrueBB : FalseBB))
2938     return nullptr;
2939   assert(FreeInstrBB == (Pred == ICmpInst::ICMP_EQ ? FalseBB : TrueBB) &&
2940          "Broken CFG: missing edge from predecessor to successor");
2941 
2942   // At this point, we know that everything in FreeInstrBB can be moved
2943   // before TI.
2944   for (Instruction &Instr : llvm::make_early_inc_range(*FreeInstrBB)) {
2945     if (&Instr == FreeInstrBBTerminator)
2946       break;
2947     Instr.moveBefore(TI);
2948   }
2949   assert(FreeInstrBB->size() == 1 &&
2950          "Only the branch instruction should remain");
2951 
2952   // Now that we've moved the call to free before the NULL check, we have to
2953   // remove any attributes on its parameter that imply it's non-null, because
2954   // those attributes might have only been valid because of the NULL check, and
2955   // we can get miscompiles if we keep them. This is conservative if non-null is
2956   // also implied by something other than the NULL check, but it's guaranteed to
2957   // be correct, and the conservativeness won't matter in practice, since the
2958   // attributes are irrelevant for the call to free itself and the pointer
2959   // shouldn't be used after the call.
2960   AttributeList Attrs = FI.getAttributes();
2961   Attrs = Attrs.removeParamAttribute(FI.getContext(), 0, Attribute::NonNull);
2962   Attribute Dereferenceable = Attrs.getParamAttr(0, Attribute::Dereferenceable);
2963   if (Dereferenceable.isValid()) {
2964     uint64_t Bytes = Dereferenceable.getDereferenceableBytes();
2965     Attrs = Attrs.removeParamAttribute(FI.getContext(), 0,
2966                                        Attribute::Dereferenceable);
2967     Attrs = Attrs.addDereferenceableOrNullParamAttr(FI.getContext(), 0, Bytes);
2968   }
2969   FI.setAttributes(Attrs);
2970 
2971   return &FI;
2972 }
2973 
2974 Instruction *InstCombinerImpl::visitFree(CallInst &FI) {
2975   Value *Op = FI.getArgOperand(0);
2976 
2977   // free undef -> unreachable.
2978   if (isa<UndefValue>(Op)) {
2979     // Leave a marker since we can't modify the CFG here.
2980     CreateNonTerminatorUnreachable(&FI);
2981     return eraseInstFromFunction(FI);
2982   }
2983 
2984   // If we have 'free null' delete the instruction.  This can happen in stl code
2985   // when lots of inlining happens.
2986   if (isa<ConstantPointerNull>(Op))
2987     return eraseInstFromFunction(FI);
2988 
2989   // If we had free(realloc(...)) with no intervening uses, then eliminate the
2990   // realloc() entirely.
2991   if (CallInst *CI = dyn_cast<CallInst>(Op)) {
2992     if (CI->hasOneUse() && isReallocLikeFn(CI, &TLI)) {
2993       return eraseInstFromFunction(
2994           *replaceInstUsesWith(*CI, CI->getOperand(0)));
2995     }
2996   }
2997 
2998   // If we optimize for code size, try to move the call to free before the null
2999   // test so that simplify cfg can remove the empty block and dead code
3000   // elimination the branch. I.e., helps to turn something like:
3001   // if (foo) free(foo);
3002   // into
3003   // free(foo);
3004   //
3005   // Note that we can only do this for 'free' and not for any flavor of
3006   // 'operator delete'; there is no 'operator delete' symbol for which we are
3007   // permitted to invent a call, even if we're passing in a null pointer.
3008   if (MinimizeSize) {
3009     LibFunc Func;
3010     if (TLI.getLibFunc(FI, Func) && TLI.has(Func) && Func == LibFunc_free)
3011       if (Instruction *I = tryToMoveFreeBeforeNullTest(FI, DL))
3012         return I;
3013   }
3014 
3015   return nullptr;
3016 }
3017 
3018 static bool isMustTailCall(Value *V) {
3019   if (auto *CI = dyn_cast<CallInst>(V))
3020     return CI->isMustTailCall();
3021   return false;
3022 }
3023 
3024 Instruction *InstCombinerImpl::visitReturnInst(ReturnInst &RI) {
3025   if (RI.getNumOperands() == 0) // ret void
3026     return nullptr;
3027 
3028   Value *ResultOp = RI.getOperand(0);
3029   Type *VTy = ResultOp->getType();
3030   if (!VTy->isIntegerTy() || isa<Constant>(ResultOp))
3031     return nullptr;
3032 
3033   // Don't replace result of musttail calls.
3034   if (isMustTailCall(ResultOp))
3035     return nullptr;
3036 
3037   // There might be assume intrinsics dominating this return that completely
3038   // determine the value. If so, constant fold it.
3039   KnownBits Known = computeKnownBits(ResultOp, 0, &RI);
3040   if (Known.isConstant())
3041     return replaceOperand(RI, 0,
3042         Constant::getIntegerValue(VTy, Known.getConstant()));
3043 
3044   return nullptr;
3045 }
3046 
3047 // WARNING: keep in sync with SimplifyCFGOpt::simplifyUnreachable()!
3048 Instruction *InstCombinerImpl::visitUnreachableInst(UnreachableInst &I) {
3049   // Try to remove the previous instruction if it must lead to unreachable.
3050   // This includes instructions like stores and "llvm.assume" that may not get
3051   // removed by simple dead code elimination.
3052   while (Instruction *Prev = I.getPrevNonDebugInstruction()) {
3053     // While we theoretically can erase EH, that would result in a block that
3054     // used to start with an EH no longer starting with EH, which is invalid.
3055     // To make it valid, we'd need to fixup predecessors to no longer refer to
3056     // this block, but that changes CFG, which is not allowed in InstCombine.
3057     if (Prev->isEHPad())
3058       return nullptr; // Can not drop any more instructions. We're done here.
3059 
3060     if (!isGuaranteedToTransferExecutionToSuccessor(Prev))
3061       return nullptr; // Can not drop any more instructions. We're done here.
3062     // Otherwise, this instruction can be freely erased,
3063     // even if it is not side-effect free.
3064 
3065     // A value may still have uses before we process it here (for example, in
3066     // another unreachable block), so convert those to poison.
3067     replaceInstUsesWith(*Prev, PoisonValue::get(Prev->getType()));
3068     eraseInstFromFunction(*Prev);
3069   }
3070   assert(I.getParent()->sizeWithoutDebug() == 1 && "The block is now empty.");
3071   // FIXME: recurse into unconditional predecessors?
3072   return nullptr;
3073 }
3074 
3075 Instruction *InstCombinerImpl::visitUnconditionalBranchInst(BranchInst &BI) {
3076   assert(BI.isUnconditional() && "Only for unconditional branches.");
3077 
3078   // If this store is the second-to-last instruction in the basic block
3079   // (excluding debug info and bitcasts of pointers) and if the block ends with
3080   // an unconditional branch, try to move the store to the successor block.
3081 
3082   auto GetLastSinkableStore = [](BasicBlock::iterator BBI) {
3083     auto IsNoopInstrForStoreMerging = [](BasicBlock::iterator BBI) {
3084       return BBI->isDebugOrPseudoInst() ||
3085              (isa<BitCastInst>(BBI) && BBI->getType()->isPointerTy());
3086     };
3087 
3088     BasicBlock::iterator FirstInstr = BBI->getParent()->begin();
3089     do {
3090       if (BBI != FirstInstr)
3091         --BBI;
3092     } while (BBI != FirstInstr && IsNoopInstrForStoreMerging(BBI));
3093 
3094     return dyn_cast<StoreInst>(BBI);
3095   };
3096 
3097   if (StoreInst *SI = GetLastSinkableStore(BasicBlock::iterator(BI)))
3098     if (mergeStoreIntoSuccessor(*SI))
3099       return &BI;
3100 
3101   return nullptr;
3102 }
3103 
3104 Instruction *InstCombinerImpl::visitBranchInst(BranchInst &BI) {
3105   if (BI.isUnconditional())
3106     return visitUnconditionalBranchInst(BI);
3107 
3108   // Change br (not X), label True, label False to: br X, label False, True
3109   Value *X = nullptr;
3110   if (match(&BI, m_Br(m_Not(m_Value(X)), m_BasicBlock(), m_BasicBlock())) &&
3111       !isa<Constant>(X)) {
3112     // Swap Destinations and condition...
3113     BI.swapSuccessors();
3114     return replaceOperand(BI, 0, X);
3115   }
3116 
3117   // If the condition is irrelevant, remove the use so that other
3118   // transforms on the condition become more effective.
3119   if (!isa<ConstantInt>(BI.getCondition()) &&
3120       BI.getSuccessor(0) == BI.getSuccessor(1))
3121     return replaceOperand(
3122         BI, 0, ConstantInt::getFalse(BI.getCondition()->getType()));
3123 
3124   // Canonicalize, for example, fcmp_one -> fcmp_oeq.
3125   CmpInst::Predicate Pred;
3126   if (match(&BI, m_Br(m_OneUse(m_FCmp(Pred, m_Value(), m_Value())),
3127                       m_BasicBlock(), m_BasicBlock())) &&
3128       !isCanonicalPredicate(Pred)) {
3129     // Swap destinations and condition.
3130     CmpInst *Cond = cast<CmpInst>(BI.getCondition());
3131     Cond->setPredicate(CmpInst::getInversePredicate(Pred));
3132     BI.swapSuccessors();
3133     Worklist.push(Cond);
3134     return &BI;
3135   }
3136 
3137   return nullptr;
3138 }
3139 
3140 Instruction *InstCombinerImpl::visitSwitchInst(SwitchInst &SI) {
3141   Value *Cond = SI.getCondition();
3142   Value *Op0;
3143   ConstantInt *AddRHS;
3144   if (match(Cond, m_Add(m_Value(Op0), m_ConstantInt(AddRHS)))) {
3145     // Change 'switch (X+4) case 1:' into 'switch (X) case -3'.
3146     for (auto Case : SI.cases()) {
3147       Constant *NewCase = ConstantExpr::getSub(Case.getCaseValue(), AddRHS);
3148       assert(isa<ConstantInt>(NewCase) &&
3149              "Result of expression should be constant");
3150       Case.setValue(cast<ConstantInt>(NewCase));
3151     }
3152     return replaceOperand(SI, 0, Op0);
3153   }
3154 
3155   KnownBits Known = computeKnownBits(Cond, 0, &SI);
3156   unsigned LeadingKnownZeros = Known.countMinLeadingZeros();
3157   unsigned LeadingKnownOnes = Known.countMinLeadingOnes();
3158 
3159   // Compute the number of leading bits we can ignore.
3160   // TODO: A better way to determine this would use ComputeNumSignBits().
3161   for (auto &C : SI.cases()) {
3162     LeadingKnownZeros = std::min(
3163         LeadingKnownZeros, C.getCaseValue()->getValue().countLeadingZeros());
3164     LeadingKnownOnes = std::min(
3165         LeadingKnownOnes, C.getCaseValue()->getValue().countLeadingOnes());
3166   }
3167 
3168   unsigned NewWidth = Known.getBitWidth() - std::max(LeadingKnownZeros, LeadingKnownOnes);
3169 
3170   // Shrink the condition operand if the new type is smaller than the old type.
3171   // But do not shrink to a non-standard type, because backend can't generate
3172   // good code for that yet.
3173   // TODO: We can make it aggressive again after fixing PR39569.
3174   if (NewWidth > 0 && NewWidth < Known.getBitWidth() &&
3175       shouldChangeType(Known.getBitWidth(), NewWidth)) {
3176     IntegerType *Ty = IntegerType::get(SI.getContext(), NewWidth);
3177     Builder.SetInsertPoint(&SI);
3178     Value *NewCond = Builder.CreateTrunc(Cond, Ty, "trunc");
3179 
3180     for (auto Case : SI.cases()) {
3181       APInt TruncatedCase = Case.getCaseValue()->getValue().trunc(NewWidth);
3182       Case.setValue(ConstantInt::get(SI.getContext(), TruncatedCase));
3183     }
3184     return replaceOperand(SI, 0, NewCond);
3185   }
3186 
3187   return nullptr;
3188 }
3189 
3190 Instruction *InstCombinerImpl::visitExtractValueInst(ExtractValueInst &EV) {
3191   Value *Agg = EV.getAggregateOperand();
3192 
3193   if (!EV.hasIndices())
3194     return replaceInstUsesWith(EV, Agg);
3195 
3196   if (Value *V = SimplifyExtractValueInst(Agg, EV.getIndices(),
3197                                           SQ.getWithInstruction(&EV)))
3198     return replaceInstUsesWith(EV, V);
3199 
3200   if (InsertValueInst *IV = dyn_cast<InsertValueInst>(Agg)) {
3201     // We're extracting from an insertvalue instruction, compare the indices
3202     const unsigned *exti, *exte, *insi, *inse;
3203     for (exti = EV.idx_begin(), insi = IV->idx_begin(),
3204          exte = EV.idx_end(), inse = IV->idx_end();
3205          exti != exte && insi != inse;
3206          ++exti, ++insi) {
3207       if (*insi != *exti)
3208         // The insert and extract both reference distinctly different elements.
3209         // This means the extract is not influenced by the insert, and we can
3210         // replace the aggregate operand of the extract with the aggregate
3211         // operand of the insert. i.e., replace
3212         // %I = insertvalue { i32, { i32 } } %A, { i32 } { i32 42 }, 1
3213         // %E = extractvalue { i32, { i32 } } %I, 0
3214         // with
3215         // %E = extractvalue { i32, { i32 } } %A, 0
3216         return ExtractValueInst::Create(IV->getAggregateOperand(),
3217                                         EV.getIndices());
3218     }
3219     if (exti == exte && insi == inse)
3220       // Both iterators are at the end: Index lists are identical. Replace
3221       // %B = insertvalue { i32, { i32 } } %A, i32 42, 1, 0
3222       // %C = extractvalue { i32, { i32 } } %B, 1, 0
3223       // with "i32 42"
3224       return replaceInstUsesWith(EV, IV->getInsertedValueOperand());
3225     if (exti == exte) {
3226       // The extract list is a prefix of the insert list. i.e. replace
3227       // %I = insertvalue { i32, { i32 } } %A, i32 42, 1, 0
3228       // %E = extractvalue { i32, { i32 } } %I, 1
3229       // with
3230       // %X = extractvalue { i32, { i32 } } %A, 1
3231       // %E = insertvalue { i32 } %X, i32 42, 0
3232       // by switching the order of the insert and extract (though the
3233       // insertvalue should be left in, since it may have other uses).
3234       Value *NewEV = Builder.CreateExtractValue(IV->getAggregateOperand(),
3235                                                 EV.getIndices());
3236       return InsertValueInst::Create(NewEV, IV->getInsertedValueOperand(),
3237                                      makeArrayRef(insi, inse));
3238     }
3239     if (insi == inse)
3240       // The insert list is a prefix of the extract list
3241       // We can simply remove the common indices from the extract and make it
3242       // operate on the inserted value instead of the insertvalue result.
3243       // i.e., replace
3244       // %I = insertvalue { i32, { i32 } } %A, { i32 } { i32 42 }, 1
3245       // %E = extractvalue { i32, { i32 } } %I, 1, 0
3246       // with
3247       // %E extractvalue { i32 } { i32 42 }, 0
3248       return ExtractValueInst::Create(IV->getInsertedValueOperand(),
3249                                       makeArrayRef(exti, exte));
3250   }
3251   if (WithOverflowInst *WO = dyn_cast<WithOverflowInst>(Agg)) {
3252     // We're extracting from an overflow intrinsic, see if we're the only user,
3253     // which allows us to simplify multiple result intrinsics to simpler
3254     // things that just get one value.
3255     if (WO->hasOneUse()) {
3256       // Check if we're grabbing only the result of a 'with overflow' intrinsic
3257       // and replace it with a traditional binary instruction.
3258       if (*EV.idx_begin() == 0) {
3259         Instruction::BinaryOps BinOp = WO->getBinaryOp();
3260         Value *LHS = WO->getLHS(), *RHS = WO->getRHS();
3261         // Replace the old instruction's uses with poison.
3262         replaceInstUsesWith(*WO, PoisonValue::get(WO->getType()));
3263         eraseInstFromFunction(*WO);
3264         return BinaryOperator::Create(BinOp, LHS, RHS);
3265       }
3266 
3267       assert(*EV.idx_begin() == 1 &&
3268              "unexpected extract index for overflow inst");
3269 
3270       // If only the overflow result is used, and the right hand side is a
3271       // constant (or constant splat), we can remove the intrinsic by directly
3272       // checking for overflow.
3273       const APInt *C;
3274       if (match(WO->getRHS(), m_APInt(C))) {
3275         // Compute the no-wrap range for LHS given RHS=C, then construct an
3276         // equivalent icmp, potentially using an offset.
3277         ConstantRange NWR =
3278           ConstantRange::makeExactNoWrapRegion(WO->getBinaryOp(), *C,
3279                                                WO->getNoWrapKind());
3280 
3281         CmpInst::Predicate Pred;
3282         APInt NewRHSC, Offset;
3283         NWR.getEquivalentICmp(Pred, NewRHSC, Offset);
3284         auto *OpTy = WO->getRHS()->getType();
3285         auto *NewLHS = WO->getLHS();
3286         if (Offset != 0)
3287           NewLHS = Builder.CreateAdd(NewLHS, ConstantInt::get(OpTy, Offset));
3288         return new ICmpInst(ICmpInst::getInversePredicate(Pred), NewLHS,
3289                             ConstantInt::get(OpTy, NewRHSC));
3290       }
3291     }
3292   }
3293   if (LoadInst *L = dyn_cast<LoadInst>(Agg))
3294     // If the (non-volatile) load only has one use, we can rewrite this to a
3295     // load from a GEP. This reduces the size of the load. If a load is used
3296     // only by extractvalue instructions then this either must have been
3297     // optimized before, or it is a struct with padding, in which case we
3298     // don't want to do the transformation as it loses padding knowledge.
3299     if (L->isSimple() && L->hasOneUse()) {
3300       // extractvalue has integer indices, getelementptr has Value*s. Convert.
3301       SmallVector<Value*, 4> Indices;
3302       // Prefix an i32 0 since we need the first element.
3303       Indices.push_back(Builder.getInt32(0));
3304       for (unsigned Idx : EV.indices())
3305         Indices.push_back(Builder.getInt32(Idx));
3306 
3307       // We need to insert these at the location of the old load, not at that of
3308       // the extractvalue.
3309       Builder.SetInsertPoint(L);
3310       Value *GEP = Builder.CreateInBoundsGEP(L->getType(),
3311                                              L->getPointerOperand(), Indices);
3312       Instruction *NL = Builder.CreateLoad(EV.getType(), GEP);
3313       // Whatever aliasing information we had for the orignal load must also
3314       // hold for the smaller load, so propagate the annotations.
3315       NL->setAAMetadata(L->getAAMetadata());
3316       // Returning the load directly will cause the main loop to insert it in
3317       // the wrong spot, so use replaceInstUsesWith().
3318       return replaceInstUsesWith(EV, NL);
3319     }
3320   // We could simplify extracts from other values. Note that nested extracts may
3321   // already be simplified implicitly by the above: extract (extract (insert) )
3322   // will be translated into extract ( insert ( extract ) ) first and then just
3323   // the value inserted, if appropriate. Similarly for extracts from single-use
3324   // loads: extract (extract (load)) will be translated to extract (load (gep))
3325   // and if again single-use then via load (gep (gep)) to load (gep).
3326   // However, double extracts from e.g. function arguments or return values
3327   // aren't handled yet.
3328   return nullptr;
3329 }
3330 
3331 /// Return 'true' if the given typeinfo will match anything.
3332 static bool isCatchAll(EHPersonality Personality, Constant *TypeInfo) {
3333   switch (Personality) {
3334   case EHPersonality::GNU_C:
3335   case EHPersonality::GNU_C_SjLj:
3336   case EHPersonality::Rust:
3337     // The GCC C EH and Rust personality only exists to support cleanups, so
3338     // it's not clear what the semantics of catch clauses are.
3339     return false;
3340   case EHPersonality::Unknown:
3341     return false;
3342   case EHPersonality::GNU_Ada:
3343     // While __gnat_all_others_value will match any Ada exception, it doesn't
3344     // match foreign exceptions (or didn't, before gcc-4.7).
3345     return false;
3346   case EHPersonality::GNU_CXX:
3347   case EHPersonality::GNU_CXX_SjLj:
3348   case EHPersonality::GNU_ObjC:
3349   case EHPersonality::MSVC_X86SEH:
3350   case EHPersonality::MSVC_TableSEH:
3351   case EHPersonality::MSVC_CXX:
3352   case EHPersonality::CoreCLR:
3353   case EHPersonality::Wasm_CXX:
3354   case EHPersonality::XL_CXX:
3355     return TypeInfo->isNullValue();
3356   }
3357   llvm_unreachable("invalid enum");
3358 }
3359 
3360 static bool shorter_filter(const Value *LHS, const Value *RHS) {
3361   return
3362     cast<ArrayType>(LHS->getType())->getNumElements()
3363   <
3364     cast<ArrayType>(RHS->getType())->getNumElements();
3365 }
3366 
3367 Instruction *InstCombinerImpl::visitLandingPadInst(LandingPadInst &LI) {
3368   // The logic here should be correct for any real-world personality function.
3369   // However if that turns out not to be true, the offending logic can always
3370   // be conditioned on the personality function, like the catch-all logic is.
3371   EHPersonality Personality =
3372       classifyEHPersonality(LI.getParent()->getParent()->getPersonalityFn());
3373 
3374   // Simplify the list of clauses, eg by removing repeated catch clauses
3375   // (these are often created by inlining).
3376   bool MakeNewInstruction = false; // If true, recreate using the following:
3377   SmallVector<Constant *, 16> NewClauses; // - Clauses for the new instruction;
3378   bool CleanupFlag = LI.isCleanup();   // - The new instruction is a cleanup.
3379 
3380   SmallPtrSet<Value *, 16> AlreadyCaught; // Typeinfos known caught already.
3381   for (unsigned i = 0, e = LI.getNumClauses(); i != e; ++i) {
3382     bool isLastClause = i + 1 == e;
3383     if (LI.isCatch(i)) {
3384       // A catch clause.
3385       Constant *CatchClause = LI.getClause(i);
3386       Constant *TypeInfo = CatchClause->stripPointerCasts();
3387 
3388       // If we already saw this clause, there is no point in having a second
3389       // copy of it.
3390       if (AlreadyCaught.insert(TypeInfo).second) {
3391         // This catch clause was not already seen.
3392         NewClauses.push_back(CatchClause);
3393       } else {
3394         // Repeated catch clause - drop the redundant copy.
3395         MakeNewInstruction = true;
3396       }
3397 
3398       // If this is a catch-all then there is no point in keeping any following
3399       // clauses or marking the landingpad as having a cleanup.
3400       if (isCatchAll(Personality, TypeInfo)) {
3401         if (!isLastClause)
3402           MakeNewInstruction = true;
3403         CleanupFlag = false;
3404         break;
3405       }
3406     } else {
3407       // A filter clause.  If any of the filter elements were already caught
3408       // then they can be dropped from the filter.  It is tempting to try to
3409       // exploit the filter further by saying that any typeinfo that does not
3410       // occur in the filter can't be caught later (and thus can be dropped).
3411       // However this would be wrong, since typeinfos can match without being
3412       // equal (for example if one represents a C++ class, and the other some
3413       // class derived from it).
3414       assert(LI.isFilter(i) && "Unsupported landingpad clause!");
3415       Constant *FilterClause = LI.getClause(i);
3416       ArrayType *FilterType = cast<ArrayType>(FilterClause->getType());
3417       unsigned NumTypeInfos = FilterType->getNumElements();
3418 
3419       // An empty filter catches everything, so there is no point in keeping any
3420       // following clauses or marking the landingpad as having a cleanup.  By
3421       // dealing with this case here the following code is made a bit simpler.
3422       if (!NumTypeInfos) {
3423         NewClauses.push_back(FilterClause);
3424         if (!isLastClause)
3425           MakeNewInstruction = true;
3426         CleanupFlag = false;
3427         break;
3428       }
3429 
3430       bool MakeNewFilter = false; // If true, make a new filter.
3431       SmallVector<Constant *, 16> NewFilterElts; // New elements.
3432       if (isa<ConstantAggregateZero>(FilterClause)) {
3433         // Not an empty filter - it contains at least one null typeinfo.
3434         assert(NumTypeInfos > 0 && "Should have handled empty filter already!");
3435         Constant *TypeInfo =
3436           Constant::getNullValue(FilterType->getElementType());
3437         // If this typeinfo is a catch-all then the filter can never match.
3438         if (isCatchAll(Personality, TypeInfo)) {
3439           // Throw the filter away.
3440           MakeNewInstruction = true;
3441           continue;
3442         }
3443 
3444         // There is no point in having multiple copies of this typeinfo, so
3445         // discard all but the first copy if there is more than one.
3446         NewFilterElts.push_back(TypeInfo);
3447         if (NumTypeInfos > 1)
3448           MakeNewFilter = true;
3449       } else {
3450         ConstantArray *Filter = cast<ConstantArray>(FilterClause);
3451         SmallPtrSet<Value *, 16> SeenInFilter; // For uniquing the elements.
3452         NewFilterElts.reserve(NumTypeInfos);
3453 
3454         // Remove any filter elements that were already caught or that already
3455         // occurred in the filter.  While there, see if any of the elements are
3456         // catch-alls.  If so, the filter can be discarded.
3457         bool SawCatchAll = false;
3458         for (unsigned j = 0; j != NumTypeInfos; ++j) {
3459           Constant *Elt = Filter->getOperand(j);
3460           Constant *TypeInfo = Elt->stripPointerCasts();
3461           if (isCatchAll(Personality, TypeInfo)) {
3462             // This element is a catch-all.  Bail out, noting this fact.
3463             SawCatchAll = true;
3464             break;
3465           }
3466 
3467           // Even if we've seen a type in a catch clause, we don't want to
3468           // remove it from the filter.  An unexpected type handler may be
3469           // set up for a call site which throws an exception of the same
3470           // type caught.  In order for the exception thrown by the unexpected
3471           // handler to propagate correctly, the filter must be correctly
3472           // described for the call site.
3473           //
3474           // Example:
3475           //
3476           // void unexpected() { throw 1;}
3477           // void foo() throw (int) {
3478           //   std::set_unexpected(unexpected);
3479           //   try {
3480           //     throw 2.0;
3481           //   } catch (int i) {}
3482           // }
3483 
3484           // There is no point in having multiple copies of the same typeinfo in
3485           // a filter, so only add it if we didn't already.
3486           if (SeenInFilter.insert(TypeInfo).second)
3487             NewFilterElts.push_back(cast<Constant>(Elt));
3488         }
3489         // A filter containing a catch-all cannot match anything by definition.
3490         if (SawCatchAll) {
3491           // Throw the filter away.
3492           MakeNewInstruction = true;
3493           continue;
3494         }
3495 
3496         // If we dropped something from the filter, make a new one.
3497         if (NewFilterElts.size() < NumTypeInfos)
3498           MakeNewFilter = true;
3499       }
3500       if (MakeNewFilter) {
3501         FilterType = ArrayType::get(FilterType->getElementType(),
3502                                     NewFilterElts.size());
3503         FilterClause = ConstantArray::get(FilterType, NewFilterElts);
3504         MakeNewInstruction = true;
3505       }
3506 
3507       NewClauses.push_back(FilterClause);
3508 
3509       // If the new filter is empty then it will catch everything so there is
3510       // no point in keeping any following clauses or marking the landingpad
3511       // as having a cleanup.  The case of the original filter being empty was
3512       // already handled above.
3513       if (MakeNewFilter && !NewFilterElts.size()) {
3514         assert(MakeNewInstruction && "New filter but not a new instruction!");
3515         CleanupFlag = false;
3516         break;
3517       }
3518     }
3519   }
3520 
3521   // If several filters occur in a row then reorder them so that the shortest
3522   // filters come first (those with the smallest number of elements).  This is
3523   // advantageous because shorter filters are more likely to match, speeding up
3524   // unwinding, but mostly because it increases the effectiveness of the other
3525   // filter optimizations below.
3526   for (unsigned i = 0, e = NewClauses.size(); i + 1 < e; ) {
3527     unsigned j;
3528     // Find the maximal 'j' s.t. the range [i, j) consists entirely of filters.
3529     for (j = i; j != e; ++j)
3530       if (!isa<ArrayType>(NewClauses[j]->getType()))
3531         break;
3532 
3533     // Check whether the filters are already sorted by length.  We need to know
3534     // if sorting them is actually going to do anything so that we only make a
3535     // new landingpad instruction if it does.
3536     for (unsigned k = i; k + 1 < j; ++k)
3537       if (shorter_filter(NewClauses[k+1], NewClauses[k])) {
3538         // Not sorted, so sort the filters now.  Doing an unstable sort would be
3539         // correct too but reordering filters pointlessly might confuse users.
3540         std::stable_sort(NewClauses.begin() + i, NewClauses.begin() + j,
3541                          shorter_filter);
3542         MakeNewInstruction = true;
3543         break;
3544       }
3545 
3546     // Look for the next batch of filters.
3547     i = j + 1;
3548   }
3549 
3550   // If typeinfos matched if and only if equal, then the elements of a filter L
3551   // that occurs later than a filter F could be replaced by the intersection of
3552   // the elements of F and L.  In reality two typeinfos can match without being
3553   // equal (for example if one represents a C++ class, and the other some class
3554   // derived from it) so it would be wrong to perform this transform in general.
3555   // However the transform is correct and useful if F is a subset of L.  In that
3556   // case L can be replaced by F, and thus removed altogether since repeating a
3557   // filter is pointless.  So here we look at all pairs of filters F and L where
3558   // L follows F in the list of clauses, and remove L if every element of F is
3559   // an element of L.  This can occur when inlining C++ functions with exception
3560   // specifications.
3561   for (unsigned i = 0; i + 1 < NewClauses.size(); ++i) {
3562     // Examine each filter in turn.
3563     Value *Filter = NewClauses[i];
3564     ArrayType *FTy = dyn_cast<ArrayType>(Filter->getType());
3565     if (!FTy)
3566       // Not a filter - skip it.
3567       continue;
3568     unsigned FElts = FTy->getNumElements();
3569     // Examine each filter following this one.  Doing this backwards means that
3570     // we don't have to worry about filters disappearing under us when removed.
3571     for (unsigned j = NewClauses.size() - 1; j != i; --j) {
3572       Value *LFilter = NewClauses[j];
3573       ArrayType *LTy = dyn_cast<ArrayType>(LFilter->getType());
3574       if (!LTy)
3575         // Not a filter - skip it.
3576         continue;
3577       // If Filter is a subset of LFilter, i.e. every element of Filter is also
3578       // an element of LFilter, then discard LFilter.
3579       SmallVectorImpl<Constant *>::iterator J = NewClauses.begin() + j;
3580       // If Filter is empty then it is a subset of LFilter.
3581       if (!FElts) {
3582         // Discard LFilter.
3583         NewClauses.erase(J);
3584         MakeNewInstruction = true;
3585         // Move on to the next filter.
3586         continue;
3587       }
3588       unsigned LElts = LTy->getNumElements();
3589       // If Filter is longer than LFilter then it cannot be a subset of it.
3590       if (FElts > LElts)
3591         // Move on to the next filter.
3592         continue;
3593       // At this point we know that LFilter has at least one element.
3594       if (isa<ConstantAggregateZero>(LFilter)) { // LFilter only contains zeros.
3595         // Filter is a subset of LFilter iff Filter contains only zeros (as we
3596         // already know that Filter is not longer than LFilter).
3597         if (isa<ConstantAggregateZero>(Filter)) {
3598           assert(FElts <= LElts && "Should have handled this case earlier!");
3599           // Discard LFilter.
3600           NewClauses.erase(J);
3601           MakeNewInstruction = true;
3602         }
3603         // Move on to the next filter.
3604         continue;
3605       }
3606       ConstantArray *LArray = cast<ConstantArray>(LFilter);
3607       if (isa<ConstantAggregateZero>(Filter)) { // Filter only contains zeros.
3608         // Since Filter is non-empty and contains only zeros, it is a subset of
3609         // LFilter iff LFilter contains a zero.
3610         assert(FElts > 0 && "Should have eliminated the empty filter earlier!");
3611         for (unsigned l = 0; l != LElts; ++l)
3612           if (LArray->getOperand(l)->isNullValue()) {
3613             // LFilter contains a zero - discard it.
3614             NewClauses.erase(J);
3615             MakeNewInstruction = true;
3616             break;
3617           }
3618         // Move on to the next filter.
3619         continue;
3620       }
3621       // At this point we know that both filters are ConstantArrays.  Loop over
3622       // operands to see whether every element of Filter is also an element of
3623       // LFilter.  Since filters tend to be short this is probably faster than
3624       // using a method that scales nicely.
3625       ConstantArray *FArray = cast<ConstantArray>(Filter);
3626       bool AllFound = true;
3627       for (unsigned f = 0; f != FElts; ++f) {
3628         Value *FTypeInfo = FArray->getOperand(f)->stripPointerCasts();
3629         AllFound = false;
3630         for (unsigned l = 0; l != LElts; ++l) {
3631           Value *LTypeInfo = LArray->getOperand(l)->stripPointerCasts();
3632           if (LTypeInfo == FTypeInfo) {
3633             AllFound = true;
3634             break;
3635           }
3636         }
3637         if (!AllFound)
3638           break;
3639       }
3640       if (AllFound) {
3641         // Discard LFilter.
3642         NewClauses.erase(J);
3643         MakeNewInstruction = true;
3644       }
3645       // Move on to the next filter.
3646     }
3647   }
3648 
3649   // If we changed any of the clauses, replace the old landingpad instruction
3650   // with a new one.
3651   if (MakeNewInstruction) {
3652     LandingPadInst *NLI = LandingPadInst::Create(LI.getType(),
3653                                                  NewClauses.size());
3654     for (unsigned i = 0, e = NewClauses.size(); i != e; ++i)
3655       NLI->addClause(NewClauses[i]);
3656     // A landing pad with no clauses must have the cleanup flag set.  It is
3657     // theoretically possible, though highly unlikely, that we eliminated all
3658     // clauses.  If so, force the cleanup flag to true.
3659     if (NewClauses.empty())
3660       CleanupFlag = true;
3661     NLI->setCleanup(CleanupFlag);
3662     return NLI;
3663   }
3664 
3665   // Even if none of the clauses changed, we may nonetheless have understood
3666   // that the cleanup flag is pointless.  Clear it if so.
3667   if (LI.isCleanup() != CleanupFlag) {
3668     assert(!CleanupFlag && "Adding a cleanup, not removing one?!");
3669     LI.setCleanup(CleanupFlag);
3670     return &LI;
3671   }
3672 
3673   return nullptr;
3674 }
3675 
3676 Value *
3677 InstCombinerImpl::pushFreezeToPreventPoisonFromPropagating(FreezeInst &OrigFI) {
3678   // Try to push freeze through instructions that propagate but don't produce
3679   // poison as far as possible.  If an operand of freeze follows three
3680   // conditions 1) one-use, 2) does not produce poison, and 3) has all but one
3681   // guaranteed-non-poison operands then push the freeze through to the one
3682   // operand that is not guaranteed non-poison.  The actual transform is as
3683   // follows.
3684   //   Op1 = ...                        ; Op1 can be posion
3685   //   Op0 = Inst(Op1, NonPoisonOps...) ; Op0 has only one use and only have
3686   //                                    ; single guaranteed-non-poison operands
3687   //   ... = Freeze(Op0)
3688   // =>
3689   //   Op1 = ...
3690   //   Op1.fr = Freeze(Op1)
3691   //   ... = Inst(Op1.fr, NonPoisonOps...)
3692   auto *OrigOp = OrigFI.getOperand(0);
3693   auto *OrigOpInst = dyn_cast<Instruction>(OrigOp);
3694 
3695   // While we could change the other users of OrigOp to use freeze(OrigOp), that
3696   // potentially reduces their optimization potential, so let's only do this iff
3697   // the OrigOp is only used by the freeze.
3698   if (!OrigOpInst || !OrigOpInst->hasOneUse() || isa<PHINode>(OrigOp))
3699     return nullptr;
3700 
3701   // We can't push the freeze through an instruction which can itself create
3702   // poison.  If the only source of new poison is flags, we can simply
3703   // strip them (since we know the only use is the freeze and nothing can
3704   // benefit from them.)
3705   if (canCreateUndefOrPoison(cast<Operator>(OrigOp), /*ConsiderFlags*/ false))
3706     return nullptr;
3707 
3708   // If operand is guaranteed not to be poison, there is no need to add freeze
3709   // to the operand. So we first find the operand that is not guaranteed to be
3710   // poison.
3711   Use *MaybePoisonOperand = nullptr;
3712   for (Use &U : OrigOpInst->operands()) {
3713     if (isGuaranteedNotToBeUndefOrPoison(U.get()))
3714       continue;
3715     if (!MaybePoisonOperand)
3716       MaybePoisonOperand = &U;
3717     else
3718       return nullptr;
3719   }
3720 
3721   OrigOpInst->dropPoisonGeneratingFlags();
3722 
3723   // If all operands are guaranteed to be non-poison, we can drop freeze.
3724   if (!MaybePoisonOperand)
3725     return OrigOp;
3726 
3727   auto *FrozenMaybePoisonOperand = new FreezeInst(
3728       MaybePoisonOperand->get(), MaybePoisonOperand->get()->getName() + ".fr");
3729 
3730   replaceUse(*MaybePoisonOperand, FrozenMaybePoisonOperand);
3731   FrozenMaybePoisonOperand->insertBefore(OrigOpInst);
3732   return OrigOp;
3733 }
3734 
3735 bool InstCombinerImpl::freezeDominatedUses(FreezeInst &FI) {
3736   Value *Op = FI.getOperand(0);
3737 
3738   if (isa<Constant>(Op))
3739     return false;
3740 
3741   bool Changed = false;
3742   Op->replaceUsesWithIf(&FI, [&](Use &U) -> bool {
3743     bool Dominates = DT.dominates(&FI, U);
3744     Changed |= Dominates;
3745     return Dominates;
3746   });
3747 
3748   return Changed;
3749 }
3750 
3751 Instruction *InstCombinerImpl::visitFreeze(FreezeInst &I) {
3752   Value *Op0 = I.getOperand(0);
3753 
3754   if (Value *V = SimplifyFreezeInst(Op0, SQ.getWithInstruction(&I)))
3755     return replaceInstUsesWith(I, V);
3756 
3757   // freeze (phi const, x) --> phi const, (freeze x)
3758   if (auto *PN = dyn_cast<PHINode>(Op0)) {
3759     if (Instruction *NV = foldOpIntoPhi(I, PN))
3760       return NV;
3761   }
3762 
3763   if (Value *NI = pushFreezeToPreventPoisonFromPropagating(I))
3764     return replaceInstUsesWith(I, NI);
3765 
3766   if (match(Op0, m_Undef())) {
3767     // If I is freeze(undef), see its uses and fold it to the best constant.
3768     // - or: pick -1
3769     // - select's condition: pick the value that leads to choosing a constant
3770     // - other ops: pick 0
3771     Constant *BestValue = nullptr;
3772     Constant *NullValue = Constant::getNullValue(I.getType());
3773     for (const auto *U : I.users()) {
3774       Constant *C = NullValue;
3775 
3776       if (match(U, m_Or(m_Value(), m_Value())))
3777         C = Constant::getAllOnesValue(I.getType());
3778       else if (const auto *SI = dyn_cast<SelectInst>(U)) {
3779         if (SI->getCondition() == &I) {
3780           APInt CondVal(1, isa<Constant>(SI->getFalseValue()) ? 0 : 1);
3781           C = Constant::getIntegerValue(I.getType(), CondVal);
3782         }
3783       }
3784 
3785       if (!BestValue)
3786         BestValue = C;
3787       else if (BestValue != C)
3788         BestValue = NullValue;
3789     }
3790 
3791     return replaceInstUsesWith(I, BestValue);
3792   }
3793 
3794   // Replace all dominated uses of Op to freeze(Op).
3795   if (freezeDominatedUses(I))
3796     return &I;
3797 
3798   return nullptr;
3799 }
3800 
3801 /// Check for case where the call writes to an otherwise dead alloca.  This
3802 /// shows up for unused out-params in idiomatic C/C++ code.   Note that this
3803 /// helper *only* analyzes the write; doesn't check any other legality aspect.
3804 static bool SoleWriteToDeadLocal(Instruction *I, TargetLibraryInfo &TLI) {
3805   auto *CB = dyn_cast<CallBase>(I);
3806   if (!CB)
3807     // TODO: handle e.g. store to alloca here - only worth doing if we extend
3808     // to allow reload along used path as described below.  Otherwise, this
3809     // is simply a store to a dead allocation which will be removed.
3810     return false;
3811   Optional<MemoryLocation> Dest = MemoryLocation::getForDest(CB, TLI);
3812   if (!Dest)
3813     return false;
3814   auto *AI = dyn_cast<AllocaInst>(getUnderlyingObject(Dest->Ptr));
3815   if (!AI)
3816     // TODO: allow malloc?
3817     return false;
3818   // TODO: allow memory access dominated by move point?  Note that since AI
3819   // could have a reference to itself captured by the call, we would need to
3820   // account for cycles in doing so.
3821   SmallVector<const User *> AllocaUsers;
3822   SmallPtrSet<const User *, 4> Visited;
3823   auto pushUsers = [&](const Instruction &I) {
3824     for (const User *U : I.users()) {
3825       if (Visited.insert(U).second)
3826         AllocaUsers.push_back(U);
3827     }
3828   };
3829   pushUsers(*AI);
3830   while (!AllocaUsers.empty()) {
3831     auto *UserI = cast<Instruction>(AllocaUsers.pop_back_val());
3832     if (isa<BitCastInst>(UserI) || isa<GetElementPtrInst>(UserI) ||
3833         isa<AddrSpaceCastInst>(UserI)) {
3834       pushUsers(*UserI);
3835       continue;
3836     }
3837     if (UserI == CB)
3838       continue;
3839     // TODO: support lifetime.start/end here
3840     return false;
3841   }
3842   return true;
3843 }
3844 
3845 /// Try to move the specified instruction from its current block into the
3846 /// beginning of DestBlock, which can only happen if it's safe to move the
3847 /// instruction past all of the instructions between it and the end of its
3848 /// block.
3849 static bool TryToSinkInstruction(Instruction *I, BasicBlock *DestBlock,
3850                                  TargetLibraryInfo &TLI) {
3851   assert(I->getUniqueUndroppableUser() && "Invariants didn't hold!");
3852   BasicBlock *SrcBlock = I->getParent();
3853 
3854   // Cannot move control-flow-involving, volatile loads, vaarg, etc.
3855   if (isa<PHINode>(I) || I->isEHPad() || I->mayThrow() || !I->willReturn() ||
3856       I->isTerminator())
3857     return false;
3858 
3859   // Do not sink static or dynamic alloca instructions. Static allocas must
3860   // remain in the entry block, and dynamic allocas must not be sunk in between
3861   // a stacksave / stackrestore pair, which would incorrectly shorten its
3862   // lifetime.
3863   if (isa<AllocaInst>(I))
3864     return false;
3865 
3866   // Do not sink into catchswitch blocks.
3867   if (isa<CatchSwitchInst>(DestBlock->getTerminator()))
3868     return false;
3869 
3870   // Do not sink convergent call instructions.
3871   if (auto *CI = dyn_cast<CallInst>(I)) {
3872     if (CI->isConvergent())
3873       return false;
3874   }
3875 
3876   // Unless we can prove that the memory write isn't visibile except on the
3877   // path we're sinking to, we must bail.
3878   if (I->mayWriteToMemory()) {
3879     if (!SoleWriteToDeadLocal(I, TLI))
3880       return false;
3881   }
3882 
3883   // We can only sink load instructions if there is nothing between the load and
3884   // the end of block that could change the value.
3885   if (I->mayReadFromMemory()) {
3886     // We don't want to do any sophisticated alias analysis, so we only check
3887     // the instructions after I in I's parent block if we try to sink to its
3888     // successor block.
3889     if (DestBlock->getUniquePredecessor() != I->getParent())
3890       return false;
3891     for (BasicBlock::iterator Scan = std::next(I->getIterator()),
3892                               E = I->getParent()->end();
3893          Scan != E; ++Scan)
3894       if (Scan->mayWriteToMemory())
3895         return false;
3896   }
3897 
3898   I->dropDroppableUses([DestBlock](const Use *U) {
3899     if (auto *I = dyn_cast<Instruction>(U->getUser()))
3900       return I->getParent() != DestBlock;
3901     return true;
3902   });
3903   /// FIXME: We could remove droppable uses that are not dominated by
3904   /// the new position.
3905 
3906   BasicBlock::iterator InsertPos = DestBlock->getFirstInsertionPt();
3907   I->moveBefore(&*InsertPos);
3908   ++NumSunkInst;
3909 
3910   // Also sink all related debug uses from the source basic block. Otherwise we
3911   // get debug use before the def. Attempt to salvage debug uses first, to
3912   // maximise the range variables have location for. If we cannot salvage, then
3913   // mark the location undef: we know it was supposed to receive a new location
3914   // here, but that computation has been sunk.
3915   SmallVector<DbgVariableIntrinsic *, 2> DbgUsers;
3916   findDbgUsers(DbgUsers, I);
3917   // Process the sinking DbgUsers in reverse order, as we only want to clone the
3918   // last appearing debug intrinsic for each given variable.
3919   SmallVector<DbgVariableIntrinsic *, 2> DbgUsersToSink;
3920   for (DbgVariableIntrinsic *DVI : DbgUsers)
3921     if (DVI->getParent() == SrcBlock)
3922       DbgUsersToSink.push_back(DVI);
3923   llvm::sort(DbgUsersToSink,
3924              [](auto *A, auto *B) { return B->comesBefore(A); });
3925 
3926   SmallVector<DbgVariableIntrinsic *, 2> DIIClones;
3927   SmallSet<DebugVariable, 4> SunkVariables;
3928   for (auto User : DbgUsersToSink) {
3929     // A dbg.declare instruction should not be cloned, since there can only be
3930     // one per variable fragment. It should be left in the original place
3931     // because the sunk instruction is not an alloca (otherwise we could not be
3932     // here).
3933     if (isa<DbgDeclareInst>(User))
3934       continue;
3935 
3936     DebugVariable DbgUserVariable =
3937         DebugVariable(User->getVariable(), User->getExpression(),
3938                       User->getDebugLoc()->getInlinedAt());
3939 
3940     if (!SunkVariables.insert(DbgUserVariable).second)
3941       continue;
3942 
3943     DIIClones.emplace_back(cast<DbgVariableIntrinsic>(User->clone()));
3944     if (isa<DbgDeclareInst>(User) && isa<CastInst>(I))
3945       DIIClones.back()->replaceVariableLocationOp(I, I->getOperand(0));
3946     LLVM_DEBUG(dbgs() << "CLONE: " << *DIIClones.back() << '\n');
3947   }
3948 
3949   // Perform salvaging without the clones, then sink the clones.
3950   if (!DIIClones.empty()) {
3951     salvageDebugInfoForDbgValues(*I, DbgUsers);
3952     // The clones are in reverse order of original appearance, reverse again to
3953     // maintain the original order.
3954     for (auto &DIIClone : llvm::reverse(DIIClones)) {
3955       DIIClone->insertBefore(&*InsertPos);
3956       LLVM_DEBUG(dbgs() << "SINK: " << *DIIClone << '\n');
3957     }
3958   }
3959 
3960   return true;
3961 }
3962 
3963 bool InstCombinerImpl::run() {
3964   while (!Worklist.isEmpty()) {
3965     // Walk deferred instructions in reverse order, and push them to the
3966     // worklist, which means they'll end up popped from the worklist in-order.
3967     while (Instruction *I = Worklist.popDeferred()) {
3968       // Check to see if we can DCE the instruction. We do this already here to
3969       // reduce the number of uses and thus allow other folds to trigger.
3970       // Note that eraseInstFromFunction() may push additional instructions on
3971       // the deferred worklist, so this will DCE whole instruction chains.
3972       if (isInstructionTriviallyDead(I, &TLI)) {
3973         eraseInstFromFunction(*I);
3974         ++NumDeadInst;
3975         continue;
3976       }
3977 
3978       Worklist.push(I);
3979     }
3980 
3981     Instruction *I = Worklist.removeOne();
3982     if (I == nullptr) continue;  // skip null values.
3983 
3984     // Check to see if we can DCE the instruction.
3985     if (isInstructionTriviallyDead(I, &TLI)) {
3986       eraseInstFromFunction(*I);
3987       ++NumDeadInst;
3988       continue;
3989     }
3990 
3991     if (!DebugCounter::shouldExecute(VisitCounter))
3992       continue;
3993 
3994     // Instruction isn't dead, see if we can constant propagate it.
3995     if (!I->use_empty() &&
3996         (I->getNumOperands() == 0 || isa<Constant>(I->getOperand(0)))) {
3997       if (Constant *C = ConstantFoldInstruction(I, DL, &TLI)) {
3998         LLVM_DEBUG(dbgs() << "IC: ConstFold to: " << *C << " from: " << *I
3999                           << '\n');
4000 
4001         // Add operands to the worklist.
4002         replaceInstUsesWith(*I, C);
4003         ++NumConstProp;
4004         if (isInstructionTriviallyDead(I, &TLI))
4005           eraseInstFromFunction(*I);
4006         MadeIRChange = true;
4007         continue;
4008       }
4009     }
4010 
4011     // See if we can trivially sink this instruction to its user if we can
4012     // prove that the successor is not executed more frequently than our block.
4013     // Return the UserBlock if successful.
4014     auto getOptionalSinkBlockForInst =
4015         [this](Instruction *I) -> Optional<BasicBlock *> {
4016       if (!EnableCodeSinking)
4017         return None;
4018       auto *UserInst = cast_or_null<Instruction>(I->getUniqueUndroppableUser());
4019       if (!UserInst)
4020         return None;
4021 
4022       BasicBlock *BB = I->getParent();
4023       BasicBlock *UserParent = nullptr;
4024 
4025       // Special handling for Phi nodes - get the block the use occurs in.
4026       if (PHINode *PN = dyn_cast<PHINode>(UserInst)) {
4027         for (unsigned i = 0; i < PN->getNumIncomingValues(); i++) {
4028           if (PN->getIncomingValue(i) == I) {
4029             // Bail out if we have uses in different blocks. We don't do any
4030             // sophisticated analysis (i.e finding NearestCommonDominator of these
4031             // use blocks).
4032             if (UserParent && UserParent != PN->getIncomingBlock(i))
4033               return None;
4034             UserParent = PN->getIncomingBlock(i);
4035           }
4036         }
4037         assert(UserParent && "expected to find user block!");
4038       } else
4039         UserParent = UserInst->getParent();
4040 
4041       // Try sinking to another block. If that block is unreachable, then do
4042       // not bother. SimplifyCFG should handle it.
4043       if (UserParent == BB || !DT.isReachableFromEntry(UserParent))
4044         return None;
4045 
4046       auto *Term = UserParent->getTerminator();
4047       // See if the user is one of our successors that has only one
4048       // predecessor, so that we don't have to split the critical edge.
4049       // Another option where we can sink is a block that ends with a
4050       // terminator that does not pass control to other block (such as
4051       // return or unreachable or resume). In this case:
4052       //   - I dominates the User (by SSA form);
4053       //   - the User will be executed at most once.
4054       // So sinking I down to User is always profitable or neutral.
4055       if (UserParent->getUniquePredecessor() == BB || succ_empty(Term)) {
4056         assert(DT.dominates(BB, UserParent) && "Dominance relation broken?");
4057         return UserParent;
4058       }
4059       return None;
4060     };
4061 
4062     auto OptBB = getOptionalSinkBlockForInst(I);
4063     if (OptBB) {
4064       auto *UserParent = *OptBB;
4065       // Okay, the CFG is simple enough, try to sink this instruction.
4066       if (TryToSinkInstruction(I, UserParent, TLI)) {
4067         LLVM_DEBUG(dbgs() << "IC: Sink: " << *I << '\n');
4068         MadeIRChange = true;
4069         // We'll add uses of the sunk instruction below, but since
4070         // sinking can expose opportunities for it's *operands* add
4071         // them to the worklist
4072         for (Use &U : I->operands())
4073           if (Instruction *OpI = dyn_cast<Instruction>(U.get()))
4074             Worklist.push(OpI);
4075       }
4076     }
4077 
4078     // Now that we have an instruction, try combining it to simplify it.
4079     Builder.SetInsertPoint(I);
4080     Builder.CollectMetadataToCopy(
4081         I, {LLVMContext::MD_dbg, LLVMContext::MD_annotation});
4082 
4083 #ifndef NDEBUG
4084     std::string OrigI;
4085 #endif
4086     LLVM_DEBUG(raw_string_ostream SS(OrigI); I->print(SS); OrigI = SS.str(););
4087     LLVM_DEBUG(dbgs() << "IC: Visiting: " << OrigI << '\n');
4088 
4089     if (Instruction *Result = visit(*I)) {
4090       ++NumCombined;
4091       // Should we replace the old instruction with a new one?
4092       if (Result != I) {
4093         LLVM_DEBUG(dbgs() << "IC: Old = " << *I << '\n'
4094                           << "    New = " << *Result << '\n');
4095 
4096         Result->copyMetadata(*I,
4097                              {LLVMContext::MD_dbg, LLVMContext::MD_annotation});
4098         // Everything uses the new instruction now.
4099         I->replaceAllUsesWith(Result);
4100 
4101         // Move the name to the new instruction first.
4102         Result->takeName(I);
4103 
4104         // Insert the new instruction into the basic block...
4105         BasicBlock *InstParent = I->getParent();
4106         BasicBlock::iterator InsertPos = I->getIterator();
4107 
4108         // Are we replace a PHI with something that isn't a PHI, or vice versa?
4109         if (isa<PHINode>(Result) != isa<PHINode>(I)) {
4110           // We need to fix up the insertion point.
4111           if (isa<PHINode>(I)) // PHI -> Non-PHI
4112             InsertPos = InstParent->getFirstInsertionPt();
4113           else // Non-PHI -> PHI
4114             InsertPos = InstParent->getFirstNonPHI()->getIterator();
4115         }
4116 
4117         InstParent->getInstList().insert(InsertPos, Result);
4118 
4119         // Push the new instruction and any users onto the worklist.
4120         Worklist.pushUsersToWorkList(*Result);
4121         Worklist.push(Result);
4122 
4123         eraseInstFromFunction(*I);
4124       } else {
4125         LLVM_DEBUG(dbgs() << "IC: Mod = " << OrigI << '\n'
4126                           << "    New = " << *I << '\n');
4127 
4128         // If the instruction was modified, it's possible that it is now dead.
4129         // if so, remove it.
4130         if (isInstructionTriviallyDead(I, &TLI)) {
4131           eraseInstFromFunction(*I);
4132         } else {
4133           Worklist.pushUsersToWorkList(*I);
4134           Worklist.push(I);
4135         }
4136       }
4137       MadeIRChange = true;
4138     }
4139   }
4140 
4141   Worklist.zap();
4142   return MadeIRChange;
4143 }
4144 
4145 // Track the scopes used by !alias.scope and !noalias. In a function, a
4146 // @llvm.experimental.noalias.scope.decl is only useful if that scope is used
4147 // by both sets. If not, the declaration of the scope can be safely omitted.
4148 // The MDNode of the scope can be omitted as well for the instructions that are
4149 // part of this function. We do not do that at this point, as this might become
4150 // too time consuming to do.
4151 class AliasScopeTracker {
4152   SmallPtrSet<const MDNode *, 8> UsedAliasScopesAndLists;
4153   SmallPtrSet<const MDNode *, 8> UsedNoAliasScopesAndLists;
4154 
4155 public:
4156   void analyse(Instruction *I) {
4157     // This seems to be faster than checking 'mayReadOrWriteMemory()'.
4158     if (!I->hasMetadataOtherThanDebugLoc())
4159       return;
4160 
4161     auto Track = [](Metadata *ScopeList, auto &Container) {
4162       const auto *MDScopeList = dyn_cast_or_null<MDNode>(ScopeList);
4163       if (!MDScopeList || !Container.insert(MDScopeList).second)
4164         return;
4165       for (auto &MDOperand : MDScopeList->operands())
4166         if (auto *MDScope = dyn_cast<MDNode>(MDOperand))
4167           Container.insert(MDScope);
4168     };
4169 
4170     Track(I->getMetadata(LLVMContext::MD_alias_scope), UsedAliasScopesAndLists);
4171     Track(I->getMetadata(LLVMContext::MD_noalias), UsedNoAliasScopesAndLists);
4172   }
4173 
4174   bool isNoAliasScopeDeclDead(Instruction *Inst) {
4175     NoAliasScopeDeclInst *Decl = dyn_cast<NoAliasScopeDeclInst>(Inst);
4176     if (!Decl)
4177       return false;
4178 
4179     assert(Decl->use_empty() &&
4180            "llvm.experimental.noalias.scope.decl in use ?");
4181     const MDNode *MDSL = Decl->getScopeList();
4182     assert(MDSL->getNumOperands() == 1 &&
4183            "llvm.experimental.noalias.scope should refer to a single scope");
4184     auto &MDOperand = MDSL->getOperand(0);
4185     if (auto *MD = dyn_cast<MDNode>(MDOperand))
4186       return !UsedAliasScopesAndLists.contains(MD) ||
4187              !UsedNoAliasScopesAndLists.contains(MD);
4188 
4189     // Not an MDNode ? throw away.
4190     return true;
4191   }
4192 };
4193 
4194 /// Populate the IC worklist from a function, by walking it in depth-first
4195 /// order and adding all reachable code to the worklist.
4196 ///
4197 /// This has a couple of tricks to make the code faster and more powerful.  In
4198 /// particular, we constant fold and DCE instructions as we go, to avoid adding
4199 /// them to the worklist (this significantly speeds up instcombine on code where
4200 /// many instructions are dead or constant).  Additionally, if we find a branch
4201 /// whose condition is a known constant, we only visit the reachable successors.
4202 static bool prepareICWorklistFromFunction(Function &F, const DataLayout &DL,
4203                                           const TargetLibraryInfo *TLI,
4204                                           InstructionWorklist &ICWorklist) {
4205   bool MadeIRChange = false;
4206   SmallPtrSet<BasicBlock *, 32> Visited;
4207   SmallVector<BasicBlock*, 256> Worklist;
4208   Worklist.push_back(&F.front());
4209 
4210   SmallVector<Instruction *, 128> InstrsForInstructionWorklist;
4211   DenseMap<Constant *, Constant *> FoldedConstants;
4212   AliasScopeTracker SeenAliasScopes;
4213 
4214   do {
4215     BasicBlock *BB = Worklist.pop_back_val();
4216 
4217     // We have now visited this block!  If we've already been here, ignore it.
4218     if (!Visited.insert(BB).second)
4219       continue;
4220 
4221     for (Instruction &Inst : llvm::make_early_inc_range(*BB)) {
4222       // ConstantProp instruction if trivially constant.
4223       if (!Inst.use_empty() &&
4224           (Inst.getNumOperands() == 0 || isa<Constant>(Inst.getOperand(0))))
4225         if (Constant *C = ConstantFoldInstruction(&Inst, DL, TLI)) {
4226           LLVM_DEBUG(dbgs() << "IC: ConstFold to: " << *C << " from: " << Inst
4227                             << '\n');
4228           Inst.replaceAllUsesWith(C);
4229           ++NumConstProp;
4230           if (isInstructionTriviallyDead(&Inst, TLI))
4231             Inst.eraseFromParent();
4232           MadeIRChange = true;
4233           continue;
4234         }
4235 
4236       // See if we can constant fold its operands.
4237       for (Use &U : Inst.operands()) {
4238         if (!isa<ConstantVector>(U) && !isa<ConstantExpr>(U))
4239           continue;
4240 
4241         auto *C = cast<Constant>(U);
4242         Constant *&FoldRes = FoldedConstants[C];
4243         if (!FoldRes)
4244           FoldRes = ConstantFoldConstant(C, DL, TLI);
4245 
4246         if (FoldRes != C) {
4247           LLVM_DEBUG(dbgs() << "IC: ConstFold operand of: " << Inst
4248                             << "\n    Old = " << *C
4249                             << "\n    New = " << *FoldRes << '\n');
4250           U = FoldRes;
4251           MadeIRChange = true;
4252         }
4253       }
4254 
4255       // Skip processing debug and pseudo intrinsics in InstCombine. Processing
4256       // these call instructions consumes non-trivial amount of time and
4257       // provides no value for the optimization.
4258       if (!Inst.isDebugOrPseudoInst()) {
4259         InstrsForInstructionWorklist.push_back(&Inst);
4260         SeenAliasScopes.analyse(&Inst);
4261       }
4262     }
4263 
4264     // Recursively visit successors.  If this is a branch or switch on a
4265     // constant, only visit the reachable successor.
4266     Instruction *TI = BB->getTerminator();
4267     if (BranchInst *BI = dyn_cast<BranchInst>(TI)) {
4268       if (BI->isConditional() && isa<ConstantInt>(BI->getCondition())) {
4269         bool CondVal = cast<ConstantInt>(BI->getCondition())->getZExtValue();
4270         BasicBlock *ReachableBB = BI->getSuccessor(!CondVal);
4271         Worklist.push_back(ReachableBB);
4272         continue;
4273       }
4274     } else if (SwitchInst *SI = dyn_cast<SwitchInst>(TI)) {
4275       if (ConstantInt *Cond = dyn_cast<ConstantInt>(SI->getCondition())) {
4276         Worklist.push_back(SI->findCaseValue(Cond)->getCaseSuccessor());
4277         continue;
4278       }
4279     }
4280 
4281     append_range(Worklist, successors(TI));
4282   } while (!Worklist.empty());
4283 
4284   // Remove instructions inside unreachable blocks. This prevents the
4285   // instcombine code from having to deal with some bad special cases, and
4286   // reduces use counts of instructions.
4287   for (BasicBlock &BB : F) {
4288     if (Visited.count(&BB))
4289       continue;
4290 
4291     unsigned NumDeadInstInBB;
4292     unsigned NumDeadDbgInstInBB;
4293     std::tie(NumDeadInstInBB, NumDeadDbgInstInBB) =
4294         removeAllNonTerminatorAndEHPadInstructions(&BB);
4295 
4296     MadeIRChange |= NumDeadInstInBB + NumDeadDbgInstInBB > 0;
4297     NumDeadInst += NumDeadInstInBB;
4298   }
4299 
4300   // Once we've found all of the instructions to add to instcombine's worklist,
4301   // add them in reverse order.  This way instcombine will visit from the top
4302   // of the function down.  This jives well with the way that it adds all uses
4303   // of instructions to the worklist after doing a transformation, thus avoiding
4304   // some N^2 behavior in pathological cases.
4305   ICWorklist.reserve(InstrsForInstructionWorklist.size());
4306   for (Instruction *Inst : reverse(InstrsForInstructionWorklist)) {
4307     // DCE instruction if trivially dead. As we iterate in reverse program
4308     // order here, we will clean up whole chains of dead instructions.
4309     if (isInstructionTriviallyDead(Inst, TLI) ||
4310         SeenAliasScopes.isNoAliasScopeDeclDead(Inst)) {
4311       ++NumDeadInst;
4312       LLVM_DEBUG(dbgs() << "IC: DCE: " << *Inst << '\n');
4313       salvageDebugInfo(*Inst);
4314       Inst->eraseFromParent();
4315       MadeIRChange = true;
4316       continue;
4317     }
4318 
4319     ICWorklist.push(Inst);
4320   }
4321 
4322   return MadeIRChange;
4323 }
4324 
4325 static bool combineInstructionsOverFunction(
4326     Function &F, InstructionWorklist &Worklist, AliasAnalysis *AA,
4327     AssumptionCache &AC, TargetLibraryInfo &TLI, TargetTransformInfo &TTI,
4328     DominatorTree &DT, OptimizationRemarkEmitter &ORE, BlockFrequencyInfo *BFI,
4329     ProfileSummaryInfo *PSI, unsigned MaxIterations, LoopInfo *LI) {
4330   auto &DL = F.getParent()->getDataLayout();
4331   MaxIterations = std::min(MaxIterations, LimitMaxIterations.getValue());
4332 
4333   /// Builder - This is an IRBuilder that automatically inserts new
4334   /// instructions into the worklist when they are created.
4335   IRBuilder<TargetFolder, IRBuilderCallbackInserter> Builder(
4336       F.getContext(), TargetFolder(DL),
4337       IRBuilderCallbackInserter([&Worklist, &AC](Instruction *I) {
4338         Worklist.add(I);
4339         if (auto *Assume = dyn_cast<AssumeInst>(I))
4340           AC.registerAssumption(Assume);
4341       }));
4342 
4343   // Lower dbg.declare intrinsics otherwise their value may be clobbered
4344   // by instcombiner.
4345   bool MadeIRChange = false;
4346   if (ShouldLowerDbgDeclare)
4347     MadeIRChange = LowerDbgDeclare(F);
4348 
4349   // Iterate while there is work to do.
4350   unsigned Iteration = 0;
4351   while (true) {
4352     ++NumWorklistIterations;
4353     ++Iteration;
4354 
4355     if (Iteration > InfiniteLoopDetectionThreshold) {
4356       report_fatal_error(
4357           "Instruction Combining seems stuck in an infinite loop after " +
4358           Twine(InfiniteLoopDetectionThreshold) + " iterations.");
4359     }
4360 
4361     if (Iteration > MaxIterations) {
4362       LLVM_DEBUG(dbgs() << "\n\n[IC] Iteration limit #" << MaxIterations
4363                         << " on " << F.getName()
4364                         << " reached; stopping before reaching a fixpoint\n");
4365       break;
4366     }
4367 
4368     LLVM_DEBUG(dbgs() << "\n\nINSTCOMBINE ITERATION #" << Iteration << " on "
4369                       << F.getName() << "\n");
4370 
4371     MadeIRChange |= prepareICWorklistFromFunction(F, DL, &TLI, Worklist);
4372 
4373     InstCombinerImpl IC(Worklist, Builder, F.hasMinSize(), AA, AC, TLI, TTI, DT,
4374                         ORE, BFI, PSI, DL, LI);
4375     IC.MaxArraySizeForCombine = MaxArraySize;
4376 
4377     if (!IC.run())
4378       break;
4379 
4380     MadeIRChange = true;
4381   }
4382 
4383   return MadeIRChange;
4384 }
4385 
4386 InstCombinePass::InstCombinePass() : MaxIterations(LimitMaxIterations) {}
4387 
4388 InstCombinePass::InstCombinePass(unsigned MaxIterations)
4389     : MaxIterations(MaxIterations) {}
4390 
4391 PreservedAnalyses InstCombinePass::run(Function &F,
4392                                        FunctionAnalysisManager &AM) {
4393   auto &AC = AM.getResult<AssumptionAnalysis>(F);
4394   auto &DT = AM.getResult<DominatorTreeAnalysis>(F);
4395   auto &TLI = AM.getResult<TargetLibraryAnalysis>(F);
4396   auto &ORE = AM.getResult<OptimizationRemarkEmitterAnalysis>(F);
4397   auto &TTI = AM.getResult<TargetIRAnalysis>(F);
4398 
4399   auto *LI = AM.getCachedResult<LoopAnalysis>(F);
4400 
4401   auto *AA = &AM.getResult<AAManager>(F);
4402   auto &MAMProxy = AM.getResult<ModuleAnalysisManagerFunctionProxy>(F);
4403   ProfileSummaryInfo *PSI =
4404       MAMProxy.getCachedResult<ProfileSummaryAnalysis>(*F.getParent());
4405   auto *BFI = (PSI && PSI->hasProfileSummary()) ?
4406       &AM.getResult<BlockFrequencyAnalysis>(F) : nullptr;
4407 
4408   if (!combineInstructionsOverFunction(F, Worklist, AA, AC, TLI, TTI, DT, ORE,
4409                                        BFI, PSI, MaxIterations, LI))
4410     // No changes, all analyses are preserved.
4411     return PreservedAnalyses::all();
4412 
4413   // Mark all the analyses that instcombine updates as preserved.
4414   PreservedAnalyses PA;
4415   PA.preserveSet<CFGAnalyses>();
4416   return PA;
4417 }
4418 
4419 void InstructionCombiningPass::getAnalysisUsage(AnalysisUsage &AU) const {
4420   AU.setPreservesCFG();
4421   AU.addRequired<AAResultsWrapperPass>();
4422   AU.addRequired<AssumptionCacheTracker>();
4423   AU.addRequired<TargetLibraryInfoWrapperPass>();
4424   AU.addRequired<TargetTransformInfoWrapperPass>();
4425   AU.addRequired<DominatorTreeWrapperPass>();
4426   AU.addRequired<OptimizationRemarkEmitterWrapperPass>();
4427   AU.addPreserved<DominatorTreeWrapperPass>();
4428   AU.addPreserved<AAResultsWrapperPass>();
4429   AU.addPreserved<BasicAAWrapperPass>();
4430   AU.addPreserved<GlobalsAAWrapperPass>();
4431   AU.addRequired<ProfileSummaryInfoWrapperPass>();
4432   LazyBlockFrequencyInfoPass::getLazyBFIAnalysisUsage(AU);
4433 }
4434 
4435 bool InstructionCombiningPass::runOnFunction(Function &F) {
4436   if (skipFunction(F))
4437     return false;
4438 
4439   // Required analyses.
4440   auto AA = &getAnalysis<AAResultsWrapperPass>().getAAResults();
4441   auto &AC = getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F);
4442   auto &TLI = getAnalysis<TargetLibraryInfoWrapperPass>().getTLI(F);
4443   auto &TTI = getAnalysis<TargetTransformInfoWrapperPass>().getTTI(F);
4444   auto &DT = getAnalysis<DominatorTreeWrapperPass>().getDomTree();
4445   auto &ORE = getAnalysis<OptimizationRemarkEmitterWrapperPass>().getORE();
4446 
4447   // Optional analyses.
4448   auto *LIWP = getAnalysisIfAvailable<LoopInfoWrapperPass>();
4449   auto *LI = LIWP ? &LIWP->getLoopInfo() : nullptr;
4450   ProfileSummaryInfo *PSI =
4451       &getAnalysis<ProfileSummaryInfoWrapperPass>().getPSI();
4452   BlockFrequencyInfo *BFI =
4453       (PSI && PSI->hasProfileSummary()) ?
4454       &getAnalysis<LazyBlockFrequencyInfoPass>().getBFI() :
4455       nullptr;
4456 
4457   return combineInstructionsOverFunction(F, Worklist, AA, AC, TLI, TTI, DT, ORE,
4458                                          BFI, PSI, MaxIterations, LI);
4459 }
4460 
4461 char InstructionCombiningPass::ID = 0;
4462 
4463 InstructionCombiningPass::InstructionCombiningPass()
4464     : FunctionPass(ID), MaxIterations(InstCombineDefaultMaxIterations) {
4465   initializeInstructionCombiningPassPass(*PassRegistry::getPassRegistry());
4466 }
4467 
4468 InstructionCombiningPass::InstructionCombiningPass(unsigned MaxIterations)
4469     : FunctionPass(ID), MaxIterations(MaxIterations) {
4470   initializeInstructionCombiningPassPass(*PassRegistry::getPassRegistry());
4471 }
4472 
4473 INITIALIZE_PASS_BEGIN(InstructionCombiningPass, "instcombine",
4474                       "Combine redundant instructions", false, false)
4475 INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker)
4476 INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass)
4477 INITIALIZE_PASS_DEPENDENCY(TargetTransformInfoWrapperPass)
4478 INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)
4479 INITIALIZE_PASS_DEPENDENCY(AAResultsWrapperPass)
4480 INITIALIZE_PASS_DEPENDENCY(GlobalsAAWrapperPass)
4481 INITIALIZE_PASS_DEPENDENCY(OptimizationRemarkEmitterWrapperPass)
4482 INITIALIZE_PASS_DEPENDENCY(LazyBlockFrequencyInfoPass)
4483 INITIALIZE_PASS_DEPENDENCY(ProfileSummaryInfoWrapperPass)
4484 INITIALIZE_PASS_END(InstructionCombiningPass, "instcombine",
4485                     "Combine redundant instructions", false, false)
4486 
4487 // Initialization Routines
4488 void llvm::initializeInstCombine(PassRegistry &Registry) {
4489   initializeInstructionCombiningPassPass(Registry);
4490 }
4491 
4492 void LLVMInitializeInstCombine(LLVMPassRegistryRef R) {
4493   initializeInstructionCombiningPassPass(*unwrap(R));
4494 }
4495 
4496 FunctionPass *llvm::createInstructionCombiningPass() {
4497   return new InstructionCombiningPass();
4498 }
4499 
4500 FunctionPass *llvm::createInstructionCombiningPass(unsigned MaxIterations) {
4501   return new InstructionCombiningPass(MaxIterations);
4502 }
4503 
4504 void LLVMAddInstructionCombiningPass(LLVMPassManagerRef PM) {
4505   unwrap(PM)->add(createInstructionCombiningPass());
4506 }
4507