1 //===- InstructionCombining.cpp - Combine multiple instructions -----------===//
2 //
3 //                     The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // InstructionCombining - Combine instructions to form fewer, simple
11 // instructions.  This pass does not modify the CFG.  This pass is where
12 // algebraic simplification happens.
13 //
14 // This pass combines things like:
15 //    %Y = add i32 %X, 1
16 //    %Z = add i32 %Y, 1
17 // into:
18 //    %Z = add i32 %X, 2
19 //
20 // This is a simple worklist driven algorithm.
21 //
22 // This pass guarantees that the following canonicalizations are performed on
23 // the program:
24 //    1. If a binary operator has a constant operand, it is moved to the RHS
25 //    2. Bitwise operators with constant operands are always grouped so that
26 //       shifts are performed first, then or's, then and's, then xor's.
27 //    3. Compare instructions are converted from <,>,<=,>= to ==,!= if possible
28 //    4. All cmp instructions on boolean values are replaced with logical ops
29 //    5. add X, X is represented as (X*2) => (X << 1)
30 //    6. Multiplies with a power-of-two constant argument are transformed into
31 //       shifts.
32 //   ... etc.
33 //
34 //===----------------------------------------------------------------------===//
35 
36 #include "InstCombineInternal.h"
37 #include "llvm-c/Initialization.h"
38 #include "llvm/ADT/SmallPtrSet.h"
39 #include "llvm/ADT/Statistic.h"
40 #include "llvm/ADT/StringSwitch.h"
41 #include "llvm/Analysis/AliasAnalysis.h"
42 #include "llvm/Analysis/AssumptionCache.h"
43 #include "llvm/Analysis/BasicAliasAnalysis.h"
44 #include "llvm/Analysis/CFG.h"
45 #include "llvm/Analysis/ConstantFolding.h"
46 #include "llvm/Analysis/EHPersonalities.h"
47 #include "llvm/Analysis/GlobalsModRef.h"
48 #include "llvm/Analysis/InstructionSimplify.h"
49 #include "llvm/Analysis/LoopInfo.h"
50 #include "llvm/Analysis/MemoryBuiltins.h"
51 #include "llvm/Analysis/TargetLibraryInfo.h"
52 #include "llvm/Analysis/ValueTracking.h"
53 #include "llvm/IR/CFG.h"
54 #include "llvm/IR/DataLayout.h"
55 #include "llvm/IR/Dominators.h"
56 #include "llvm/IR/GetElementPtrTypeIterator.h"
57 #include "llvm/IR/IntrinsicInst.h"
58 #include "llvm/IR/PatternMatch.h"
59 #include "llvm/IR/ValueHandle.h"
60 #include "llvm/Support/CommandLine.h"
61 #include "llvm/Support/Debug.h"
62 #include "llvm/Support/KnownBits.h"
63 #include "llvm/Support/raw_ostream.h"
64 #include "llvm/Transforms/InstCombine/InstCombine.h"
65 #include "llvm/Transforms/Scalar.h"
66 #include "llvm/Transforms/Utils/Local.h"
67 #include <algorithm>
68 #include <climits>
69 using namespace llvm;
70 using namespace llvm::PatternMatch;
71 
72 #define DEBUG_TYPE "instcombine"
73 
74 STATISTIC(NumCombined , "Number of insts combined");
75 STATISTIC(NumConstProp, "Number of constant folds");
76 STATISTIC(NumDeadInst , "Number of dead inst eliminated");
77 STATISTIC(NumSunkInst , "Number of instructions sunk");
78 STATISTIC(NumExpand,    "Number of expansions");
79 STATISTIC(NumFactor   , "Number of factorizations");
80 STATISTIC(NumReassoc  , "Number of reassociations");
81 
82 static cl::opt<bool>
83 EnableExpensiveCombines("expensive-combines",
84                         cl::desc("Enable expensive instruction combines"));
85 
86 static cl::opt<unsigned>
87 MaxArraySize("instcombine-maxarray-size", cl::init(1024),
88              cl::desc("Maximum array size considered when doing a combine"));
89 
90 Value *InstCombiner::EmitGEPOffset(User *GEP) {
91   return llvm::EmitGEPOffset(&Builder, DL, GEP);
92 }
93 
94 /// Return true if it is desirable to convert an integer computation from a
95 /// given bit width to a new bit width.
96 /// We don't want to convert from a legal to an illegal type or from a smaller
97 /// to a larger illegal type. A width of '1' is always treated as a legal type
98 /// because i1 is a fundamental type in IR, and there are many specialized
99 /// optimizations for i1 types.
100 bool InstCombiner::shouldChangeType(unsigned FromWidth,
101                                     unsigned ToWidth) const {
102   bool FromLegal = FromWidth == 1 || DL.isLegalInteger(FromWidth);
103   bool ToLegal = ToWidth == 1 || DL.isLegalInteger(ToWidth);
104 
105   // If this is a legal integer from type, and the result would be an illegal
106   // type, don't do the transformation.
107   if (FromLegal && !ToLegal)
108     return false;
109 
110   // Otherwise, if both are illegal, do not increase the size of the result. We
111   // do allow things like i160 -> i64, but not i64 -> i160.
112   if (!FromLegal && !ToLegal && ToWidth > FromWidth)
113     return false;
114 
115   return true;
116 }
117 
118 /// Return true if it is desirable to convert a computation from 'From' to 'To'.
119 /// We don't want to convert from a legal to an illegal type or from a smaller
120 /// to a larger illegal type. i1 is always treated as a legal type because it is
121 /// a fundamental type in IR, and there are many specialized optimizations for
122 /// i1 types.
123 bool InstCombiner::shouldChangeType(Type *From, Type *To) const {
124   assert(From->isIntegerTy() && To->isIntegerTy());
125 
126   unsigned FromWidth = From->getPrimitiveSizeInBits();
127   unsigned ToWidth = To->getPrimitiveSizeInBits();
128   return shouldChangeType(FromWidth, ToWidth);
129 }
130 
131 // Return true, if No Signed Wrap should be maintained for I.
132 // The No Signed Wrap flag can be kept if the operation "B (I.getOpcode) C",
133 // where both B and C should be ConstantInts, results in a constant that does
134 // not overflow. This function only handles the Add and Sub opcodes. For
135 // all other opcodes, the function conservatively returns false.
136 static bool MaintainNoSignedWrap(BinaryOperator &I, Value *B, Value *C) {
137   OverflowingBinaryOperator *OBO = dyn_cast<OverflowingBinaryOperator>(&I);
138   if (!OBO || !OBO->hasNoSignedWrap())
139     return false;
140 
141   // We reason about Add and Sub Only.
142   Instruction::BinaryOps Opcode = I.getOpcode();
143   if (Opcode != Instruction::Add && Opcode != Instruction::Sub)
144     return false;
145 
146   const APInt *BVal, *CVal;
147   if (!match(B, m_APInt(BVal)) || !match(C, m_APInt(CVal)))
148     return false;
149 
150   bool Overflow = false;
151   if (Opcode == Instruction::Add)
152     (void)BVal->sadd_ov(*CVal, Overflow);
153   else
154     (void)BVal->ssub_ov(*CVal, Overflow);
155 
156   return !Overflow;
157 }
158 
159 /// Conservatively clears subclassOptionalData after a reassociation or
160 /// commutation. We preserve fast-math flags when applicable as they can be
161 /// preserved.
162 static void ClearSubclassDataAfterReassociation(BinaryOperator &I) {
163   FPMathOperator *FPMO = dyn_cast<FPMathOperator>(&I);
164   if (!FPMO) {
165     I.clearSubclassOptionalData();
166     return;
167   }
168 
169   FastMathFlags FMF = I.getFastMathFlags();
170   I.clearSubclassOptionalData();
171   I.setFastMathFlags(FMF);
172 }
173 
174 /// Combine constant operands of associative operations either before or after a
175 /// cast to eliminate one of the associative operations:
176 /// (op (cast (op X, C2)), C1) --> (cast (op X, op (C1, C2)))
177 /// (op (cast (op X, C2)), C1) --> (op (cast X), op (C1, C2))
178 static bool simplifyAssocCastAssoc(BinaryOperator *BinOp1) {
179   auto *Cast = dyn_cast<CastInst>(BinOp1->getOperand(0));
180   if (!Cast || !Cast->hasOneUse())
181     return false;
182 
183   // TODO: Enhance logic for other casts and remove this check.
184   auto CastOpcode = Cast->getOpcode();
185   if (CastOpcode != Instruction::ZExt)
186     return false;
187 
188   // TODO: Enhance logic for other BinOps and remove this check.
189   if (!BinOp1->isBitwiseLogicOp())
190     return false;
191 
192   auto AssocOpcode = BinOp1->getOpcode();
193   auto *BinOp2 = dyn_cast<BinaryOperator>(Cast->getOperand(0));
194   if (!BinOp2 || !BinOp2->hasOneUse() || BinOp2->getOpcode() != AssocOpcode)
195     return false;
196 
197   Constant *C1, *C2;
198   if (!match(BinOp1->getOperand(1), m_Constant(C1)) ||
199       !match(BinOp2->getOperand(1), m_Constant(C2)))
200     return false;
201 
202   // TODO: This assumes a zext cast.
203   // Eg, if it was a trunc, we'd cast C1 to the source type because casting C2
204   // to the destination type might lose bits.
205 
206   // Fold the constants together in the destination type:
207   // (op (cast (op X, C2)), C1) --> (op (cast X), FoldedC)
208   Type *DestTy = C1->getType();
209   Constant *CastC2 = ConstantExpr::getCast(CastOpcode, C2, DestTy);
210   Constant *FoldedC = ConstantExpr::get(AssocOpcode, C1, CastC2);
211   Cast->setOperand(0, BinOp2->getOperand(0));
212   BinOp1->setOperand(1, FoldedC);
213   return true;
214 }
215 
216 /// This performs a few simplifications for operators that are associative or
217 /// commutative:
218 ///
219 ///  Commutative operators:
220 ///
221 ///  1. Order operands such that they are listed from right (least complex) to
222 ///     left (most complex).  This puts constants before unary operators before
223 ///     binary operators.
224 ///
225 ///  Associative operators:
226 ///
227 ///  2. Transform: "(A op B) op C" ==> "A op (B op C)" if "B op C" simplifies.
228 ///  3. Transform: "A op (B op C)" ==> "(A op B) op C" if "A op B" simplifies.
229 ///
230 ///  Associative and commutative operators:
231 ///
232 ///  4. Transform: "(A op B) op C" ==> "(C op A) op B" if "C op A" simplifies.
233 ///  5. Transform: "A op (B op C)" ==> "B op (C op A)" if "C op A" simplifies.
234 ///  6. Transform: "(A op C1) op (B op C2)" ==> "(A op B) op (C1 op C2)"
235 ///     if C1 and C2 are constants.
236 bool InstCombiner::SimplifyAssociativeOrCommutative(BinaryOperator &I) {
237   Instruction::BinaryOps Opcode = I.getOpcode();
238   bool Changed = false;
239 
240   do {
241     // Order operands such that they are listed from right (least complex) to
242     // left (most complex).  This puts constants before unary operators before
243     // binary operators.
244     if (I.isCommutative() && getComplexity(I.getOperand(0)) <
245         getComplexity(I.getOperand(1)))
246       Changed = !I.swapOperands();
247 
248     BinaryOperator *Op0 = dyn_cast<BinaryOperator>(I.getOperand(0));
249     BinaryOperator *Op1 = dyn_cast<BinaryOperator>(I.getOperand(1));
250 
251     if (I.isAssociative()) {
252       // Transform: "(A op B) op C" ==> "A op (B op C)" if "B op C" simplifies.
253       if (Op0 && Op0->getOpcode() == Opcode) {
254         Value *A = Op0->getOperand(0);
255         Value *B = Op0->getOperand(1);
256         Value *C = I.getOperand(1);
257 
258         // Does "B op C" simplify?
259         if (Value *V = SimplifyBinOp(Opcode, B, C, SQ.getWithInstruction(&I))) {
260           // It simplifies to V.  Form "A op V".
261           I.setOperand(0, A);
262           I.setOperand(1, V);
263           // Conservatively clear the optional flags, since they may not be
264           // preserved by the reassociation.
265           if (MaintainNoSignedWrap(I, B, C) &&
266               (!Op0 || (isa<BinaryOperator>(Op0) && Op0->hasNoSignedWrap()))) {
267             // Note: this is only valid because SimplifyBinOp doesn't look at
268             // the operands to Op0.
269             I.clearSubclassOptionalData();
270             I.setHasNoSignedWrap(true);
271           } else {
272             ClearSubclassDataAfterReassociation(I);
273           }
274 
275           Changed = true;
276           ++NumReassoc;
277           continue;
278         }
279       }
280 
281       // Transform: "A op (B op C)" ==> "(A op B) op C" if "A op B" simplifies.
282       if (Op1 && Op1->getOpcode() == Opcode) {
283         Value *A = I.getOperand(0);
284         Value *B = Op1->getOperand(0);
285         Value *C = Op1->getOperand(1);
286 
287         // Does "A op B" simplify?
288         if (Value *V = SimplifyBinOp(Opcode, A, B, SQ.getWithInstruction(&I))) {
289           // It simplifies to V.  Form "V op C".
290           I.setOperand(0, V);
291           I.setOperand(1, C);
292           // Conservatively clear the optional flags, since they may not be
293           // preserved by the reassociation.
294           ClearSubclassDataAfterReassociation(I);
295           Changed = true;
296           ++NumReassoc;
297           continue;
298         }
299       }
300     }
301 
302     if (I.isAssociative() && I.isCommutative()) {
303       if (simplifyAssocCastAssoc(&I)) {
304         Changed = true;
305         ++NumReassoc;
306         continue;
307       }
308 
309       // Transform: "(A op B) op C" ==> "(C op A) op B" if "C op A" simplifies.
310       if (Op0 && Op0->getOpcode() == Opcode) {
311         Value *A = Op0->getOperand(0);
312         Value *B = Op0->getOperand(1);
313         Value *C = I.getOperand(1);
314 
315         // Does "C op A" simplify?
316         if (Value *V = SimplifyBinOp(Opcode, C, A, SQ.getWithInstruction(&I))) {
317           // It simplifies to V.  Form "V op B".
318           I.setOperand(0, V);
319           I.setOperand(1, B);
320           // Conservatively clear the optional flags, since they may not be
321           // preserved by the reassociation.
322           ClearSubclassDataAfterReassociation(I);
323           Changed = true;
324           ++NumReassoc;
325           continue;
326         }
327       }
328 
329       // Transform: "A op (B op C)" ==> "B op (C op A)" if "C op A" simplifies.
330       if (Op1 && Op1->getOpcode() == Opcode) {
331         Value *A = I.getOperand(0);
332         Value *B = Op1->getOperand(0);
333         Value *C = Op1->getOperand(1);
334 
335         // Does "C op A" simplify?
336         if (Value *V = SimplifyBinOp(Opcode, C, A, SQ.getWithInstruction(&I))) {
337           // It simplifies to V.  Form "B op V".
338           I.setOperand(0, B);
339           I.setOperand(1, V);
340           // Conservatively clear the optional flags, since they may not be
341           // preserved by the reassociation.
342           ClearSubclassDataAfterReassociation(I);
343           Changed = true;
344           ++NumReassoc;
345           continue;
346         }
347       }
348 
349       // Transform: "(A op C1) op (B op C2)" ==> "(A op B) op (C1 op C2)"
350       // if C1 and C2 are constants.
351       if (Op0 && Op1 &&
352           Op0->getOpcode() == Opcode && Op1->getOpcode() == Opcode &&
353           isa<Constant>(Op0->getOperand(1)) &&
354           isa<Constant>(Op1->getOperand(1)) &&
355           Op0->hasOneUse() && Op1->hasOneUse()) {
356         Value *A = Op0->getOperand(0);
357         Constant *C1 = cast<Constant>(Op0->getOperand(1));
358         Value *B = Op1->getOperand(0);
359         Constant *C2 = cast<Constant>(Op1->getOperand(1));
360 
361         Constant *Folded = ConstantExpr::get(Opcode, C1, C2);
362         BinaryOperator *New = BinaryOperator::Create(Opcode, A, B);
363         if (isa<FPMathOperator>(New)) {
364           FastMathFlags Flags = I.getFastMathFlags();
365           Flags &= Op0->getFastMathFlags();
366           Flags &= Op1->getFastMathFlags();
367           New->setFastMathFlags(Flags);
368         }
369         InsertNewInstWith(New, I);
370         New->takeName(Op1);
371         I.setOperand(0, New);
372         I.setOperand(1, Folded);
373         // Conservatively clear the optional flags, since they may not be
374         // preserved by the reassociation.
375         ClearSubclassDataAfterReassociation(I);
376 
377         Changed = true;
378         continue;
379       }
380     }
381 
382     // No further simplifications.
383     return Changed;
384   } while (1);
385 }
386 
387 /// Return whether "X LOp (Y ROp Z)" is always equal to
388 /// "(X LOp Y) ROp (X LOp Z)".
389 static bool LeftDistributesOverRight(Instruction::BinaryOps LOp,
390                                      Instruction::BinaryOps ROp) {
391   switch (LOp) {
392   default:
393     return false;
394 
395   case Instruction::And:
396     // And distributes over Or and Xor.
397     switch (ROp) {
398     default:
399       return false;
400     case Instruction::Or:
401     case Instruction::Xor:
402       return true;
403     }
404 
405   case Instruction::Mul:
406     // Multiplication distributes over addition and subtraction.
407     switch (ROp) {
408     default:
409       return false;
410     case Instruction::Add:
411     case Instruction::Sub:
412       return true;
413     }
414 
415   case Instruction::Or:
416     // Or distributes over And.
417     switch (ROp) {
418     default:
419       return false;
420     case Instruction::And:
421       return true;
422     }
423   }
424 }
425 
426 /// Return whether "(X LOp Y) ROp Z" is always equal to
427 /// "(X ROp Z) LOp (Y ROp Z)".
428 static bool RightDistributesOverLeft(Instruction::BinaryOps LOp,
429                                      Instruction::BinaryOps ROp) {
430   if (Instruction::isCommutative(ROp))
431     return LeftDistributesOverRight(ROp, LOp);
432 
433   switch (LOp) {
434   default:
435     return false;
436   // (X >> Z) & (Y >> Z)  -> (X&Y) >> Z  for all shifts.
437   // (X >> Z) | (Y >> Z)  -> (X|Y) >> Z  for all shifts.
438   // (X >> Z) ^ (Y >> Z)  -> (X^Y) >> Z  for all shifts.
439   case Instruction::And:
440   case Instruction::Or:
441   case Instruction::Xor:
442     switch (ROp) {
443     default:
444       return false;
445     case Instruction::Shl:
446     case Instruction::LShr:
447     case Instruction::AShr:
448       return true;
449     }
450   }
451   // TODO: It would be nice to handle division, aka "(X + Y)/Z = X/Z + Y/Z",
452   // but this requires knowing that the addition does not overflow and other
453   // such subtleties.
454   return false;
455 }
456 
457 /// This function returns identity value for given opcode, which can be used to
458 /// factor patterns like (X * 2) + X ==> (X * 2) + (X * 1) ==> X * (2 + 1).
459 static Value *getIdentityValue(Instruction::BinaryOps Opcode, Value *V) {
460   if (isa<Constant>(V))
461     return nullptr;
462 
463   return ConstantExpr::getBinOpIdentity(Opcode, V->getType());
464 }
465 
466 /// This function factors binary ops which can be combined using distributive
467 /// laws. This function tries to transform 'Op' based TopLevelOpcode to enable
468 /// factorization e.g for ADD(SHL(X , 2), MUL(X, 5)), When this function called
469 /// with TopLevelOpcode == Instruction::Add and Op = SHL(X, 2), transforms
470 /// SHL(X, 2) to MUL(X, 4) i.e. returns Instruction::Mul with LHS set to 'X' and
471 /// RHS to 4.
472 static Instruction::BinaryOps
473 getBinOpsForFactorization(Instruction::BinaryOps TopLevelOpcode,
474                           BinaryOperator *Op, Value *&LHS, Value *&RHS) {
475   assert(Op && "Expected a binary operator");
476 
477   LHS = Op->getOperand(0);
478   RHS = Op->getOperand(1);
479 
480   switch (TopLevelOpcode) {
481   default:
482     return Op->getOpcode();
483 
484   case Instruction::Add:
485   case Instruction::Sub:
486     if (Op->getOpcode() == Instruction::Shl) {
487       if (Constant *CST = dyn_cast<Constant>(Op->getOperand(1))) {
488         // The multiplier is really 1 << CST.
489         RHS = ConstantExpr::getShl(ConstantInt::get(Op->getType(), 1), CST);
490         return Instruction::Mul;
491       }
492     }
493     return Op->getOpcode();
494   }
495 
496   // TODO: We can add other conversions e.g. shr => div etc.
497 }
498 
499 /// This tries to simplify binary operations by factorizing out common terms
500 /// (e. g. "(A*B)+(A*C)" -> "A*(B+C)").
501 Value *InstCombiner::tryFactorization(BinaryOperator &I,
502                                       Instruction::BinaryOps InnerOpcode,
503                                       Value *A, Value *B, Value *C, Value *D) {
504   assert(A && B && C && D && "All values must be provided");
505 
506   Value *V = nullptr;
507   Value *SimplifiedInst = nullptr;
508   Value *LHS = I.getOperand(0), *RHS = I.getOperand(1);
509   Instruction::BinaryOps TopLevelOpcode = I.getOpcode();
510 
511   // Does "X op' Y" always equal "Y op' X"?
512   bool InnerCommutative = Instruction::isCommutative(InnerOpcode);
513 
514   // Does "X op' (Y op Z)" always equal "(X op' Y) op (X op' Z)"?
515   if (LeftDistributesOverRight(InnerOpcode, TopLevelOpcode))
516     // Does the instruction have the form "(A op' B) op (A op' D)" or, in the
517     // commutative case, "(A op' B) op (C op' A)"?
518     if (A == C || (InnerCommutative && A == D)) {
519       if (A != C)
520         std::swap(C, D);
521       // Consider forming "A op' (B op D)".
522       // If "B op D" simplifies then it can be formed with no cost.
523       V = SimplifyBinOp(TopLevelOpcode, B, D, SQ.getWithInstruction(&I));
524       // If "B op D" doesn't simplify then only go on if both of the existing
525       // operations "A op' B" and "C op' D" will be zapped as no longer used.
526       if (!V && LHS->hasOneUse() && RHS->hasOneUse())
527         V = Builder.CreateBinOp(TopLevelOpcode, B, D, RHS->getName());
528       if (V) {
529         SimplifiedInst = Builder.CreateBinOp(InnerOpcode, A, V);
530       }
531     }
532 
533   // Does "(X op Y) op' Z" always equal "(X op' Z) op (Y op' Z)"?
534   if (!SimplifiedInst && RightDistributesOverLeft(TopLevelOpcode, InnerOpcode))
535     // Does the instruction have the form "(A op' B) op (C op' B)" or, in the
536     // commutative case, "(A op' B) op (B op' D)"?
537     if (B == D || (InnerCommutative && B == C)) {
538       if (B != D)
539         std::swap(C, D);
540       // Consider forming "(A op C) op' B".
541       // If "A op C" simplifies then it can be formed with no cost.
542       V = SimplifyBinOp(TopLevelOpcode, A, C, SQ.getWithInstruction(&I));
543 
544       // If "A op C" doesn't simplify then only go on if both of the existing
545       // operations "A op' B" and "C op' D" will be zapped as no longer used.
546       if (!V && LHS->hasOneUse() && RHS->hasOneUse())
547         V = Builder.CreateBinOp(TopLevelOpcode, A, C, LHS->getName());
548       if (V) {
549         SimplifiedInst = Builder.CreateBinOp(InnerOpcode, V, B);
550       }
551     }
552 
553   if (SimplifiedInst) {
554     ++NumFactor;
555     SimplifiedInst->takeName(&I);
556 
557     // Check if we can add NSW flag to SimplifiedInst. If so, set NSW flag.
558     // TODO: Check for NUW.
559     if (BinaryOperator *BO = dyn_cast<BinaryOperator>(SimplifiedInst)) {
560       if (isa<OverflowingBinaryOperator>(SimplifiedInst)) {
561         bool HasNSW = false;
562         if (isa<OverflowingBinaryOperator>(&I))
563           HasNSW = I.hasNoSignedWrap();
564 
565         if (auto *LOBO = dyn_cast<OverflowingBinaryOperator>(LHS))
566           HasNSW &= LOBO->hasNoSignedWrap();
567 
568         if (auto *ROBO = dyn_cast<OverflowingBinaryOperator>(RHS))
569           HasNSW &= ROBO->hasNoSignedWrap();
570 
571         // We can propagate 'nsw' if we know that
572         //  %Y = mul nsw i16 %X, C
573         //  %Z = add nsw i16 %Y, %X
574         // =>
575         //  %Z = mul nsw i16 %X, C+1
576         //
577         // iff C+1 isn't INT_MIN
578         const APInt *CInt;
579         if (TopLevelOpcode == Instruction::Add &&
580             InnerOpcode == Instruction::Mul)
581           if (match(V, m_APInt(CInt)) && !CInt->isMinSignedValue())
582             BO->setHasNoSignedWrap(HasNSW);
583       }
584     }
585   }
586   return SimplifiedInst;
587 }
588 
589 /// This tries to simplify binary operations which some other binary operation
590 /// distributes over either by factorizing out common terms
591 /// (eg "(A*B)+(A*C)" -> "A*(B+C)") or expanding out if this results in
592 /// simplifications (eg: "A & (B | C) -> (A&B) | (A&C)" if this is a win).
593 /// Returns the simplified value, or null if it didn't simplify.
594 Value *InstCombiner::SimplifyUsingDistributiveLaws(BinaryOperator &I) {
595   Value *LHS = I.getOperand(0), *RHS = I.getOperand(1);
596   BinaryOperator *Op0 = dyn_cast<BinaryOperator>(LHS);
597   BinaryOperator *Op1 = dyn_cast<BinaryOperator>(RHS);
598   Instruction::BinaryOps TopLevelOpcode = I.getOpcode();
599 
600   {
601     // Factorization.
602     Value *A, *B, *C, *D;
603     Instruction::BinaryOps LHSOpcode, RHSOpcode;
604     if (Op0)
605       LHSOpcode = getBinOpsForFactorization(TopLevelOpcode, Op0, A, B);
606     if (Op1)
607       RHSOpcode = getBinOpsForFactorization(TopLevelOpcode, Op1, C, D);
608 
609     // The instruction has the form "(A op' B) op (C op' D)".  Try to factorize
610     // a common term.
611     if (Op0 && Op1 && LHSOpcode == RHSOpcode)
612       if (Value *V = tryFactorization(I, LHSOpcode, A, B, C, D))
613         return V;
614 
615     // The instruction has the form "(A op' B) op (C)".  Try to factorize common
616     // term.
617     if (Op0)
618       if (Value *Ident = getIdentityValue(LHSOpcode, RHS))
619         if (Value *V =
620                 tryFactorization(I, LHSOpcode, A, B, RHS, Ident))
621           return V;
622 
623     // The instruction has the form "(B) op (C op' D)".  Try to factorize common
624     // term.
625     if (Op1)
626       if (Value *Ident = getIdentityValue(RHSOpcode, LHS))
627         if (Value *V =
628                 tryFactorization(I, RHSOpcode, LHS, Ident, C, D))
629           return V;
630   }
631 
632   // Expansion.
633   if (Op0 && RightDistributesOverLeft(Op0->getOpcode(), TopLevelOpcode)) {
634     // The instruction has the form "(A op' B) op C".  See if expanding it out
635     // to "(A op C) op' (B op C)" results in simplifications.
636     Value *A = Op0->getOperand(0), *B = Op0->getOperand(1), *C = RHS;
637     Instruction::BinaryOps InnerOpcode = Op0->getOpcode(); // op'
638 
639     Value *L = SimplifyBinOp(TopLevelOpcode, A, C, SQ.getWithInstruction(&I));
640     Value *R = SimplifyBinOp(TopLevelOpcode, B, C, SQ.getWithInstruction(&I));
641 
642     // Do "A op C" and "B op C" both simplify?
643     if (L && R) {
644       // They do! Return "L op' R".
645       ++NumExpand;
646       C = Builder.CreateBinOp(InnerOpcode, L, R);
647       C->takeName(&I);
648       return C;
649     }
650 
651     // Does "A op C" simplify to the identity value for the inner opcode?
652     if (L && L == ConstantExpr::getBinOpIdentity(InnerOpcode, L->getType())) {
653       // They do! Return "B op C".
654       ++NumExpand;
655       C = Builder.CreateBinOp(TopLevelOpcode, B, C);
656       C->takeName(&I);
657       return C;
658     }
659 
660     // Does "B op C" simplify to the identity value for the inner opcode?
661     if (R && R == ConstantExpr::getBinOpIdentity(InnerOpcode, R->getType())) {
662       // They do! Return "A op C".
663       ++NumExpand;
664       C = Builder.CreateBinOp(TopLevelOpcode, A, C);
665       C->takeName(&I);
666       return C;
667     }
668   }
669 
670   if (Op1 && LeftDistributesOverRight(TopLevelOpcode, Op1->getOpcode())) {
671     // The instruction has the form "A op (B op' C)".  See if expanding it out
672     // to "(A op B) op' (A op C)" results in simplifications.
673     Value *A = LHS, *B = Op1->getOperand(0), *C = Op1->getOperand(1);
674     Instruction::BinaryOps InnerOpcode = Op1->getOpcode(); // op'
675 
676     Value *L = SimplifyBinOp(TopLevelOpcode, A, B, SQ.getWithInstruction(&I));
677     Value *R = SimplifyBinOp(TopLevelOpcode, A, C, SQ.getWithInstruction(&I));
678 
679     // Do "A op B" and "A op C" both simplify?
680     if (L && R) {
681       // They do! Return "L op' R".
682       ++NumExpand;
683       A = Builder.CreateBinOp(InnerOpcode, L, R);
684       A->takeName(&I);
685       return A;
686     }
687 
688     // Does "A op B" simplify to the identity value for the inner opcode?
689     if (L && L == ConstantExpr::getBinOpIdentity(InnerOpcode, L->getType())) {
690       // They do! Return "A op C".
691       ++NumExpand;
692       A = Builder.CreateBinOp(TopLevelOpcode, A, C);
693       A->takeName(&I);
694       return A;
695     }
696 
697     // Does "A op C" simplify to the identity value for the inner opcode?
698     if (R && R == ConstantExpr::getBinOpIdentity(InnerOpcode, R->getType())) {
699       // They do! Return "A op B".
700       ++NumExpand;
701       A = Builder.CreateBinOp(TopLevelOpcode, A, B);
702       A->takeName(&I);
703       return A;
704     }
705   }
706 
707   // (op (select (a, c, b)), (select (a, d, b))) -> (select (a, (op c, d), 0))
708   // (op (select (a, b, c)), (select (a, b, d))) -> (select (a, 0, (op c, d)))
709   if (auto *SI0 = dyn_cast<SelectInst>(LHS)) {
710     if (auto *SI1 = dyn_cast<SelectInst>(RHS)) {
711       if (SI0->getCondition() == SI1->getCondition()) {
712         Value *SI = nullptr;
713         if (Value *V =
714                 SimplifyBinOp(TopLevelOpcode, SI0->getFalseValue(),
715                               SI1->getFalseValue(), SQ.getWithInstruction(&I)))
716           SI = Builder.CreateSelect(SI0->getCondition(),
717                                     Builder.CreateBinOp(TopLevelOpcode,
718                                                         SI0->getTrueValue(),
719                                                         SI1->getTrueValue()),
720                                     V);
721         if (Value *V =
722                 SimplifyBinOp(TopLevelOpcode, SI0->getTrueValue(),
723                               SI1->getTrueValue(), SQ.getWithInstruction(&I)))
724           SI = Builder.CreateSelect(
725               SI0->getCondition(), V,
726               Builder.CreateBinOp(TopLevelOpcode, SI0->getFalseValue(),
727                                   SI1->getFalseValue()));
728         if (SI) {
729           SI->takeName(&I);
730           return SI;
731         }
732       }
733     }
734   }
735 
736   return nullptr;
737 }
738 
739 /// Given a 'sub' instruction, return the RHS of the instruction if the LHS is a
740 /// constant zero (which is the 'negate' form).
741 Value *InstCombiner::dyn_castNegVal(Value *V) const {
742   if (BinaryOperator::isNeg(V))
743     return BinaryOperator::getNegArgument(V);
744 
745   // Constants can be considered to be negated values if they can be folded.
746   if (ConstantInt *C = dyn_cast<ConstantInt>(V))
747     return ConstantExpr::getNeg(C);
748 
749   if (ConstantDataVector *C = dyn_cast<ConstantDataVector>(V))
750     if (C->getType()->getElementType()->isIntegerTy())
751       return ConstantExpr::getNeg(C);
752 
753   if (ConstantVector *CV = dyn_cast<ConstantVector>(V)) {
754     for (unsigned i = 0, e = CV->getNumOperands(); i != e; ++i) {
755       Constant *Elt = CV->getAggregateElement(i);
756       if (!Elt)
757         return nullptr;
758 
759       if (isa<UndefValue>(Elt))
760         continue;
761 
762       if (!isa<ConstantInt>(Elt))
763         return nullptr;
764     }
765     return ConstantExpr::getNeg(CV);
766   }
767 
768   return nullptr;
769 }
770 
771 /// Given a 'fsub' instruction, return the RHS of the instruction if the LHS is
772 /// a constant negative zero (which is the 'negate' form).
773 Value *InstCombiner::dyn_castFNegVal(Value *V, bool IgnoreZeroSign) const {
774   if (BinaryOperator::isFNeg(V, IgnoreZeroSign))
775     return BinaryOperator::getFNegArgument(V);
776 
777   // Constants can be considered to be negated values if they can be folded.
778   if (ConstantFP *C = dyn_cast<ConstantFP>(V))
779     return ConstantExpr::getFNeg(C);
780 
781   if (ConstantDataVector *C = dyn_cast<ConstantDataVector>(V))
782     if (C->getType()->getElementType()->isFloatingPointTy())
783       return ConstantExpr::getFNeg(C);
784 
785   return nullptr;
786 }
787 
788 static Value *foldOperationIntoSelectOperand(Instruction &I, Value *SO,
789                                              InstCombiner::BuilderTy &Builder) {
790   if (auto *Cast = dyn_cast<CastInst>(&I))
791     return Builder.CreateCast(Cast->getOpcode(), SO, I.getType());
792 
793   assert(I.isBinaryOp() && "Unexpected opcode for select folding");
794 
795   // Figure out if the constant is the left or the right argument.
796   bool ConstIsRHS = isa<Constant>(I.getOperand(1));
797   Constant *ConstOperand = cast<Constant>(I.getOperand(ConstIsRHS));
798 
799   if (auto *SOC = dyn_cast<Constant>(SO)) {
800     if (ConstIsRHS)
801       return ConstantExpr::get(I.getOpcode(), SOC, ConstOperand);
802     return ConstantExpr::get(I.getOpcode(), ConstOperand, SOC);
803   }
804 
805   Value *Op0 = SO, *Op1 = ConstOperand;
806   if (!ConstIsRHS)
807     std::swap(Op0, Op1);
808 
809   auto *BO = cast<BinaryOperator>(&I);
810   Value *RI = Builder.CreateBinOp(BO->getOpcode(), Op0, Op1,
811                                   SO->getName() + ".op");
812   auto *FPInst = dyn_cast<Instruction>(RI);
813   if (FPInst && isa<FPMathOperator>(FPInst))
814     FPInst->copyFastMathFlags(BO);
815   return RI;
816 }
817 
818 Instruction *InstCombiner::FoldOpIntoSelect(Instruction &Op, SelectInst *SI) {
819   // Don't modify shared select instructions.
820   if (!SI->hasOneUse())
821     return nullptr;
822 
823   Value *TV = SI->getTrueValue();
824   Value *FV = SI->getFalseValue();
825   if (!(isa<Constant>(TV) || isa<Constant>(FV)))
826     return nullptr;
827 
828   // Bool selects with constant operands can be folded to logical ops.
829   if (SI->getType()->isIntOrIntVectorTy(1))
830     return nullptr;
831 
832   // If it's a bitcast involving vectors, make sure it has the same number of
833   // elements on both sides.
834   if (auto *BC = dyn_cast<BitCastInst>(&Op)) {
835     VectorType *DestTy = dyn_cast<VectorType>(BC->getDestTy());
836     VectorType *SrcTy = dyn_cast<VectorType>(BC->getSrcTy());
837 
838     // Verify that either both or neither are vectors.
839     if ((SrcTy == nullptr) != (DestTy == nullptr))
840       return nullptr;
841 
842     // If vectors, verify that they have the same number of elements.
843     if (SrcTy && SrcTy->getNumElements() != DestTy->getNumElements())
844       return nullptr;
845   }
846 
847   // Test if a CmpInst instruction is used exclusively by a select as
848   // part of a minimum or maximum operation. If so, refrain from doing
849   // any other folding. This helps out other analyses which understand
850   // non-obfuscated minimum and maximum idioms, such as ScalarEvolution
851   // and CodeGen. And in this case, at least one of the comparison
852   // operands has at least one user besides the compare (the select),
853   // which would often largely negate the benefit of folding anyway.
854   if (auto *CI = dyn_cast<CmpInst>(SI->getCondition())) {
855     if (CI->hasOneUse()) {
856       Value *Op0 = CI->getOperand(0), *Op1 = CI->getOperand(1);
857       if ((SI->getOperand(1) == Op0 && SI->getOperand(2) == Op1) ||
858           (SI->getOperand(2) == Op0 && SI->getOperand(1) == Op1))
859         return nullptr;
860     }
861   }
862 
863   Value *NewTV = foldOperationIntoSelectOperand(Op, TV, Builder);
864   Value *NewFV = foldOperationIntoSelectOperand(Op, FV, Builder);
865   return SelectInst::Create(SI->getCondition(), NewTV, NewFV, "", nullptr, SI);
866 }
867 
868 static Value *foldOperationIntoPhiValue(BinaryOperator *I, Value *InV,
869                                         InstCombiner::BuilderTy &Builder) {
870   bool ConstIsRHS = isa<Constant>(I->getOperand(1));
871   Constant *C = cast<Constant>(I->getOperand(ConstIsRHS));
872 
873   if (auto *InC = dyn_cast<Constant>(InV)) {
874     if (ConstIsRHS)
875       return ConstantExpr::get(I->getOpcode(), InC, C);
876     return ConstantExpr::get(I->getOpcode(), C, InC);
877   }
878 
879   Value *Op0 = InV, *Op1 = C;
880   if (!ConstIsRHS)
881     std::swap(Op0, Op1);
882 
883   Value *RI = Builder.CreateBinOp(I->getOpcode(), Op0, Op1, "phitmp");
884   auto *FPInst = dyn_cast<Instruction>(RI);
885   if (FPInst && isa<FPMathOperator>(FPInst))
886     FPInst->copyFastMathFlags(I);
887   return RI;
888 }
889 
890 Instruction *InstCombiner::foldOpIntoPhi(Instruction &I, PHINode *PN) {
891   unsigned NumPHIValues = PN->getNumIncomingValues();
892   if (NumPHIValues == 0)
893     return nullptr;
894 
895   // We normally only transform phis with a single use.  However, if a PHI has
896   // multiple uses and they are all the same operation, we can fold *all* of the
897   // uses into the PHI.
898   if (!PN->hasOneUse()) {
899     // Walk the use list for the instruction, comparing them to I.
900     for (User *U : PN->users()) {
901       Instruction *UI = cast<Instruction>(U);
902       if (UI != &I && !I.isIdenticalTo(UI))
903         return nullptr;
904     }
905     // Otherwise, we can replace *all* users with the new PHI we form.
906   }
907 
908   // Check to see if all of the operands of the PHI are simple constants
909   // (constantint/constantfp/undef).  If there is one non-constant value,
910   // remember the BB it is in.  If there is more than one or if *it* is a PHI,
911   // bail out.  We don't do arbitrary constant expressions here because moving
912   // their computation can be expensive without a cost model.
913   BasicBlock *NonConstBB = nullptr;
914   for (unsigned i = 0; i != NumPHIValues; ++i) {
915     Value *InVal = PN->getIncomingValue(i);
916     if (isa<Constant>(InVal) && !isa<ConstantExpr>(InVal))
917       continue;
918 
919     if (isa<PHINode>(InVal)) return nullptr;  // Itself a phi.
920     if (NonConstBB) return nullptr;  // More than one non-const value.
921 
922     NonConstBB = PN->getIncomingBlock(i);
923 
924     // If the InVal is an invoke at the end of the pred block, then we can't
925     // insert a computation after it without breaking the edge.
926     if (InvokeInst *II = dyn_cast<InvokeInst>(InVal))
927       if (II->getParent() == NonConstBB)
928         return nullptr;
929 
930     // If the incoming non-constant value is in I's block, we will remove one
931     // instruction, but insert another equivalent one, leading to infinite
932     // instcombine.
933     if (isPotentiallyReachable(I.getParent(), NonConstBB, &DT, LI))
934       return nullptr;
935   }
936 
937   // If there is exactly one non-constant value, we can insert a copy of the
938   // operation in that block.  However, if this is a critical edge, we would be
939   // inserting the computation on some other paths (e.g. inside a loop).  Only
940   // do this if the pred block is unconditionally branching into the phi block.
941   if (NonConstBB != nullptr) {
942     BranchInst *BI = dyn_cast<BranchInst>(NonConstBB->getTerminator());
943     if (!BI || !BI->isUnconditional()) return nullptr;
944   }
945 
946   // Okay, we can do the transformation: create the new PHI node.
947   PHINode *NewPN = PHINode::Create(I.getType(), PN->getNumIncomingValues());
948   InsertNewInstBefore(NewPN, *PN);
949   NewPN->takeName(PN);
950 
951   // If we are going to have to insert a new computation, do so right before the
952   // predecessor's terminator.
953   if (NonConstBB)
954     Builder.SetInsertPoint(NonConstBB->getTerminator());
955 
956   // Next, add all of the operands to the PHI.
957   if (SelectInst *SI = dyn_cast<SelectInst>(&I)) {
958     // We only currently try to fold the condition of a select when it is a phi,
959     // not the true/false values.
960     Value *TrueV = SI->getTrueValue();
961     Value *FalseV = SI->getFalseValue();
962     BasicBlock *PhiTransBB = PN->getParent();
963     for (unsigned i = 0; i != NumPHIValues; ++i) {
964       BasicBlock *ThisBB = PN->getIncomingBlock(i);
965       Value *TrueVInPred = TrueV->DoPHITranslation(PhiTransBB, ThisBB);
966       Value *FalseVInPred = FalseV->DoPHITranslation(PhiTransBB, ThisBB);
967       Value *InV = nullptr;
968       // Beware of ConstantExpr:  it may eventually evaluate to getNullValue,
969       // even if currently isNullValue gives false.
970       Constant *InC = dyn_cast<Constant>(PN->getIncomingValue(i));
971       // For vector constants, we cannot use isNullValue to fold into
972       // FalseVInPred versus TrueVInPred. When we have individual nonzero
973       // elements in the vector, we will incorrectly fold InC to
974       // `TrueVInPred`.
975       if (InC && !isa<ConstantExpr>(InC) && isa<ConstantInt>(InC))
976         InV = InC->isNullValue() ? FalseVInPred : TrueVInPred;
977       else {
978         // Generate the select in the same block as PN's current incoming block.
979         // Note: ThisBB need not be the NonConstBB because vector constants
980         // which are constants by definition are handled here.
981         // FIXME: This can lead to an increase in IR generation because we might
982         // generate selects for vector constant phi operand, that could not be
983         // folded to TrueVInPred or FalseVInPred as done for ConstantInt. For
984         // non-vector phis, this transformation was always profitable because
985         // the select would be generated exactly once in the NonConstBB.
986         Builder.SetInsertPoint(ThisBB->getTerminator());
987         InV = Builder.CreateSelect(PN->getIncomingValue(i), TrueVInPred,
988                                    FalseVInPred, "phitmp");
989       }
990       NewPN->addIncoming(InV, ThisBB);
991     }
992   } else if (CmpInst *CI = dyn_cast<CmpInst>(&I)) {
993     Constant *C = cast<Constant>(I.getOperand(1));
994     for (unsigned i = 0; i != NumPHIValues; ++i) {
995       Value *InV = nullptr;
996       if (Constant *InC = dyn_cast<Constant>(PN->getIncomingValue(i)))
997         InV = ConstantExpr::getCompare(CI->getPredicate(), InC, C);
998       else if (isa<ICmpInst>(CI))
999         InV = Builder.CreateICmp(CI->getPredicate(), PN->getIncomingValue(i),
1000                                  C, "phitmp");
1001       else
1002         InV = Builder.CreateFCmp(CI->getPredicate(), PN->getIncomingValue(i),
1003                                  C, "phitmp");
1004       NewPN->addIncoming(InV, PN->getIncomingBlock(i));
1005     }
1006   } else if (auto *BO = dyn_cast<BinaryOperator>(&I)) {
1007     for (unsigned i = 0; i != NumPHIValues; ++i) {
1008       Value *InV = foldOperationIntoPhiValue(BO, PN->getIncomingValue(i),
1009                                              Builder);
1010       NewPN->addIncoming(InV, PN->getIncomingBlock(i));
1011     }
1012   } else {
1013     CastInst *CI = cast<CastInst>(&I);
1014     Type *RetTy = CI->getType();
1015     for (unsigned i = 0; i != NumPHIValues; ++i) {
1016       Value *InV;
1017       if (Constant *InC = dyn_cast<Constant>(PN->getIncomingValue(i)))
1018         InV = ConstantExpr::getCast(CI->getOpcode(), InC, RetTy);
1019       else
1020         InV = Builder.CreateCast(CI->getOpcode(), PN->getIncomingValue(i),
1021                                  I.getType(), "phitmp");
1022       NewPN->addIncoming(InV, PN->getIncomingBlock(i));
1023     }
1024   }
1025 
1026   for (auto UI = PN->user_begin(), E = PN->user_end(); UI != E;) {
1027     Instruction *User = cast<Instruction>(*UI++);
1028     if (User == &I) continue;
1029     replaceInstUsesWith(*User, NewPN);
1030     eraseInstFromFunction(*User);
1031   }
1032   return replaceInstUsesWith(I, NewPN);
1033 }
1034 
1035 Instruction *InstCombiner::foldOpWithConstantIntoOperand(BinaryOperator &I) {
1036   assert(isa<Constant>(I.getOperand(1)) && "Unexpected operand type");
1037 
1038   if (auto *Sel = dyn_cast<SelectInst>(I.getOperand(0))) {
1039     if (Instruction *NewSel = FoldOpIntoSelect(I, Sel))
1040       return NewSel;
1041   } else if (auto *PN = dyn_cast<PHINode>(I.getOperand(0))) {
1042     if (Instruction *NewPhi = foldOpIntoPhi(I, PN))
1043       return NewPhi;
1044   }
1045   return nullptr;
1046 }
1047 
1048 /// Given a pointer type and a constant offset, determine whether or not there
1049 /// is a sequence of GEP indices into the pointed type that will land us at the
1050 /// specified offset. If so, fill them into NewIndices and return the resultant
1051 /// element type, otherwise return null.
1052 Type *InstCombiner::FindElementAtOffset(PointerType *PtrTy, int64_t Offset,
1053                                         SmallVectorImpl<Value *> &NewIndices) {
1054   Type *Ty = PtrTy->getElementType();
1055   if (!Ty->isSized())
1056     return nullptr;
1057 
1058   // Start with the index over the outer type.  Note that the type size
1059   // might be zero (even if the offset isn't zero) if the indexed type
1060   // is something like [0 x {int, int}]
1061   Type *IntPtrTy = DL.getIntPtrType(PtrTy);
1062   int64_t FirstIdx = 0;
1063   if (int64_t TySize = DL.getTypeAllocSize(Ty)) {
1064     FirstIdx = Offset/TySize;
1065     Offset -= FirstIdx*TySize;
1066 
1067     // Handle hosts where % returns negative instead of values [0..TySize).
1068     if (Offset < 0) {
1069       --FirstIdx;
1070       Offset += TySize;
1071       assert(Offset >= 0);
1072     }
1073     assert((uint64_t)Offset < (uint64_t)TySize && "Out of range offset");
1074   }
1075 
1076   NewIndices.push_back(ConstantInt::get(IntPtrTy, FirstIdx));
1077 
1078   // Index into the types.  If we fail, set OrigBase to null.
1079   while (Offset) {
1080     // Indexing into tail padding between struct/array elements.
1081     if (uint64_t(Offset * 8) >= DL.getTypeSizeInBits(Ty))
1082       return nullptr;
1083 
1084     if (StructType *STy = dyn_cast<StructType>(Ty)) {
1085       const StructLayout *SL = DL.getStructLayout(STy);
1086       assert(Offset < (int64_t)SL->getSizeInBytes() &&
1087              "Offset must stay within the indexed type");
1088 
1089       unsigned Elt = SL->getElementContainingOffset(Offset);
1090       NewIndices.push_back(ConstantInt::get(Type::getInt32Ty(Ty->getContext()),
1091                                             Elt));
1092 
1093       Offset -= SL->getElementOffset(Elt);
1094       Ty = STy->getElementType(Elt);
1095     } else if (ArrayType *AT = dyn_cast<ArrayType>(Ty)) {
1096       uint64_t EltSize = DL.getTypeAllocSize(AT->getElementType());
1097       assert(EltSize && "Cannot index into a zero-sized array");
1098       NewIndices.push_back(ConstantInt::get(IntPtrTy,Offset/EltSize));
1099       Offset %= EltSize;
1100       Ty = AT->getElementType();
1101     } else {
1102       // Otherwise, we can't index into the middle of this atomic type, bail.
1103       return nullptr;
1104     }
1105   }
1106 
1107   return Ty;
1108 }
1109 
1110 static bool shouldMergeGEPs(GEPOperator &GEP, GEPOperator &Src) {
1111   // If this GEP has only 0 indices, it is the same pointer as
1112   // Src. If Src is not a trivial GEP too, don't combine
1113   // the indices.
1114   if (GEP.hasAllZeroIndices() && !Src.hasAllZeroIndices() &&
1115       !Src.hasOneUse())
1116     return false;
1117   return true;
1118 }
1119 
1120 /// Return a value X such that Val = X * Scale, or null if none.
1121 /// If the multiplication is known not to overflow, then NoSignedWrap is set.
1122 Value *InstCombiner::Descale(Value *Val, APInt Scale, bool &NoSignedWrap) {
1123   assert(isa<IntegerType>(Val->getType()) && "Can only descale integers!");
1124   assert(cast<IntegerType>(Val->getType())->getBitWidth() ==
1125          Scale.getBitWidth() && "Scale not compatible with value!");
1126 
1127   // If Val is zero or Scale is one then Val = Val * Scale.
1128   if (match(Val, m_Zero()) || Scale == 1) {
1129     NoSignedWrap = true;
1130     return Val;
1131   }
1132 
1133   // If Scale is zero then it does not divide Val.
1134   if (Scale.isMinValue())
1135     return nullptr;
1136 
1137   // Look through chains of multiplications, searching for a constant that is
1138   // divisible by Scale.  For example, descaling X*(Y*(Z*4)) by a factor of 4
1139   // will find the constant factor 4 and produce X*(Y*Z).  Descaling X*(Y*8) by
1140   // a factor of 4 will produce X*(Y*2).  The principle of operation is to bore
1141   // down from Val:
1142   //
1143   //     Val = M1 * X          ||   Analysis starts here and works down
1144   //      M1 = M2 * Y          ||   Doesn't descend into terms with more
1145   //      M2 =  Z * 4          \/   than one use
1146   //
1147   // Then to modify a term at the bottom:
1148   //
1149   //     Val = M1 * X
1150   //      M1 =  Z * Y          ||   Replaced M2 with Z
1151   //
1152   // Then to work back up correcting nsw flags.
1153 
1154   // Op - the term we are currently analyzing.  Starts at Val then drills down.
1155   // Replaced with its descaled value before exiting from the drill down loop.
1156   Value *Op = Val;
1157 
1158   // Parent - initially null, but after drilling down notes where Op came from.
1159   // In the example above, Parent is (Val, 0) when Op is M1, because M1 is the
1160   // 0'th operand of Val.
1161   std::pair<Instruction*, unsigned> Parent;
1162 
1163   // Set if the transform requires a descaling at deeper levels that doesn't
1164   // overflow.
1165   bool RequireNoSignedWrap = false;
1166 
1167   // Log base 2 of the scale. Negative if not a power of 2.
1168   int32_t logScale = Scale.exactLogBase2();
1169 
1170   for (;; Op = Parent.first->getOperand(Parent.second)) { // Drill down
1171 
1172     if (ConstantInt *CI = dyn_cast<ConstantInt>(Op)) {
1173       // If Op is a constant divisible by Scale then descale to the quotient.
1174       APInt Quotient(Scale), Remainder(Scale); // Init ensures right bitwidth.
1175       APInt::sdivrem(CI->getValue(), Scale, Quotient, Remainder);
1176       if (!Remainder.isMinValue())
1177         // Not divisible by Scale.
1178         return nullptr;
1179       // Replace with the quotient in the parent.
1180       Op = ConstantInt::get(CI->getType(), Quotient);
1181       NoSignedWrap = true;
1182       break;
1183     }
1184 
1185     if (BinaryOperator *BO = dyn_cast<BinaryOperator>(Op)) {
1186 
1187       if (BO->getOpcode() == Instruction::Mul) {
1188         // Multiplication.
1189         NoSignedWrap = BO->hasNoSignedWrap();
1190         if (RequireNoSignedWrap && !NoSignedWrap)
1191           return nullptr;
1192 
1193         // There are three cases for multiplication: multiplication by exactly
1194         // the scale, multiplication by a constant different to the scale, and
1195         // multiplication by something else.
1196         Value *LHS = BO->getOperand(0);
1197         Value *RHS = BO->getOperand(1);
1198 
1199         if (ConstantInt *CI = dyn_cast<ConstantInt>(RHS)) {
1200           // Multiplication by a constant.
1201           if (CI->getValue() == Scale) {
1202             // Multiplication by exactly the scale, replace the multiplication
1203             // by its left-hand side in the parent.
1204             Op = LHS;
1205             break;
1206           }
1207 
1208           // Otherwise drill down into the constant.
1209           if (!Op->hasOneUse())
1210             return nullptr;
1211 
1212           Parent = std::make_pair(BO, 1);
1213           continue;
1214         }
1215 
1216         // Multiplication by something else. Drill down into the left-hand side
1217         // since that's where the reassociate pass puts the good stuff.
1218         if (!Op->hasOneUse())
1219           return nullptr;
1220 
1221         Parent = std::make_pair(BO, 0);
1222         continue;
1223       }
1224 
1225       if (logScale > 0 && BO->getOpcode() == Instruction::Shl &&
1226           isa<ConstantInt>(BO->getOperand(1))) {
1227         // Multiplication by a power of 2.
1228         NoSignedWrap = BO->hasNoSignedWrap();
1229         if (RequireNoSignedWrap && !NoSignedWrap)
1230           return nullptr;
1231 
1232         Value *LHS = BO->getOperand(0);
1233         int32_t Amt = cast<ConstantInt>(BO->getOperand(1))->
1234           getLimitedValue(Scale.getBitWidth());
1235         // Op = LHS << Amt.
1236 
1237         if (Amt == logScale) {
1238           // Multiplication by exactly the scale, replace the multiplication
1239           // by its left-hand side in the parent.
1240           Op = LHS;
1241           break;
1242         }
1243         if (Amt < logScale || !Op->hasOneUse())
1244           return nullptr;
1245 
1246         // Multiplication by more than the scale.  Reduce the multiplying amount
1247         // by the scale in the parent.
1248         Parent = std::make_pair(BO, 1);
1249         Op = ConstantInt::get(BO->getType(), Amt - logScale);
1250         break;
1251       }
1252     }
1253 
1254     if (!Op->hasOneUse())
1255       return nullptr;
1256 
1257     if (CastInst *Cast = dyn_cast<CastInst>(Op)) {
1258       if (Cast->getOpcode() == Instruction::SExt) {
1259         // Op is sign-extended from a smaller type, descale in the smaller type.
1260         unsigned SmallSize = Cast->getSrcTy()->getPrimitiveSizeInBits();
1261         APInt SmallScale = Scale.trunc(SmallSize);
1262         // Suppose Op = sext X, and we descale X as Y * SmallScale.  We want to
1263         // descale Op as (sext Y) * Scale.  In order to have
1264         //   sext (Y * SmallScale) = (sext Y) * Scale
1265         // some conditions need to hold however: SmallScale must sign-extend to
1266         // Scale and the multiplication Y * SmallScale should not overflow.
1267         if (SmallScale.sext(Scale.getBitWidth()) != Scale)
1268           // SmallScale does not sign-extend to Scale.
1269           return nullptr;
1270         assert(SmallScale.exactLogBase2() == logScale);
1271         // Require that Y * SmallScale must not overflow.
1272         RequireNoSignedWrap = true;
1273 
1274         // Drill down through the cast.
1275         Parent = std::make_pair(Cast, 0);
1276         Scale = SmallScale;
1277         continue;
1278       }
1279 
1280       if (Cast->getOpcode() == Instruction::Trunc) {
1281         // Op is truncated from a larger type, descale in the larger type.
1282         // Suppose Op = trunc X, and we descale X as Y * sext Scale.  Then
1283         //   trunc (Y * sext Scale) = (trunc Y) * Scale
1284         // always holds.  However (trunc Y) * Scale may overflow even if
1285         // trunc (Y * sext Scale) does not, so nsw flags need to be cleared
1286         // from this point up in the expression (see later).
1287         if (RequireNoSignedWrap)
1288           return nullptr;
1289 
1290         // Drill down through the cast.
1291         unsigned LargeSize = Cast->getSrcTy()->getPrimitiveSizeInBits();
1292         Parent = std::make_pair(Cast, 0);
1293         Scale = Scale.sext(LargeSize);
1294         if (logScale + 1 == (int32_t)Cast->getType()->getPrimitiveSizeInBits())
1295           logScale = -1;
1296         assert(Scale.exactLogBase2() == logScale);
1297         continue;
1298       }
1299     }
1300 
1301     // Unsupported expression, bail out.
1302     return nullptr;
1303   }
1304 
1305   // If Op is zero then Val = Op * Scale.
1306   if (match(Op, m_Zero())) {
1307     NoSignedWrap = true;
1308     return Op;
1309   }
1310 
1311   // We know that we can successfully descale, so from here on we can safely
1312   // modify the IR.  Op holds the descaled version of the deepest term in the
1313   // expression.  NoSignedWrap is 'true' if multiplying Op by Scale is known
1314   // not to overflow.
1315 
1316   if (!Parent.first)
1317     // The expression only had one term.
1318     return Op;
1319 
1320   // Rewrite the parent using the descaled version of its operand.
1321   assert(Parent.first->hasOneUse() && "Drilled down when more than one use!");
1322   assert(Op != Parent.first->getOperand(Parent.second) &&
1323          "Descaling was a no-op?");
1324   Parent.first->setOperand(Parent.second, Op);
1325   Worklist.Add(Parent.first);
1326 
1327   // Now work back up the expression correcting nsw flags.  The logic is based
1328   // on the following observation: if X * Y is known not to overflow as a signed
1329   // multiplication, and Y is replaced by a value Z with smaller absolute value,
1330   // then X * Z will not overflow as a signed multiplication either.  As we work
1331   // our way up, having NoSignedWrap 'true' means that the descaled value at the
1332   // current level has strictly smaller absolute value than the original.
1333   Instruction *Ancestor = Parent.first;
1334   do {
1335     if (BinaryOperator *BO = dyn_cast<BinaryOperator>(Ancestor)) {
1336       // If the multiplication wasn't nsw then we can't say anything about the
1337       // value of the descaled multiplication, and we have to clear nsw flags
1338       // from this point on up.
1339       bool OpNoSignedWrap = BO->hasNoSignedWrap();
1340       NoSignedWrap &= OpNoSignedWrap;
1341       if (NoSignedWrap != OpNoSignedWrap) {
1342         BO->setHasNoSignedWrap(NoSignedWrap);
1343         Worklist.Add(Ancestor);
1344       }
1345     } else if (Ancestor->getOpcode() == Instruction::Trunc) {
1346       // The fact that the descaled input to the trunc has smaller absolute
1347       // value than the original input doesn't tell us anything useful about
1348       // the absolute values of the truncations.
1349       NoSignedWrap = false;
1350     }
1351     assert((Ancestor->getOpcode() != Instruction::SExt || NoSignedWrap) &&
1352            "Failed to keep proper track of nsw flags while drilling down?");
1353 
1354     if (Ancestor == Val)
1355       // Got to the top, all done!
1356       return Val;
1357 
1358     // Move up one level in the expression.
1359     assert(Ancestor->hasOneUse() && "Drilled down when more than one use!");
1360     Ancestor = Ancestor->user_back();
1361   } while (1);
1362 }
1363 
1364 /// \brief Creates node of binary operation with the same attributes as the
1365 /// specified one but with other operands.
1366 static Value *CreateBinOpAsGiven(BinaryOperator &Inst, Value *LHS, Value *RHS,
1367                                  InstCombiner::BuilderTy &B) {
1368   Value *BO = B.CreateBinOp(Inst.getOpcode(), LHS, RHS);
1369   // If LHS and RHS are constant, BO won't be a binary operator.
1370   if (BinaryOperator *NewBO = dyn_cast<BinaryOperator>(BO))
1371     NewBO->copyIRFlags(&Inst);
1372   return BO;
1373 }
1374 
1375 /// \brief Makes transformation of binary operation specific for vector types.
1376 /// \param Inst Binary operator to transform.
1377 /// \return Pointer to node that must replace the original binary operator, or
1378 ///         null pointer if no transformation was made.
1379 Value *InstCombiner::SimplifyVectorOp(BinaryOperator &Inst) {
1380   if (!Inst.getType()->isVectorTy()) return nullptr;
1381 
1382   // It may not be safe to reorder shuffles and things like div, urem, etc.
1383   // because we may trap when executing those ops on unknown vector elements.
1384   // See PR20059.
1385   if (!isSafeToSpeculativelyExecute(&Inst))
1386     return nullptr;
1387 
1388   unsigned VWidth = cast<VectorType>(Inst.getType())->getNumElements();
1389   Value *LHS = Inst.getOperand(0), *RHS = Inst.getOperand(1);
1390   assert(cast<VectorType>(LHS->getType())->getNumElements() == VWidth);
1391   assert(cast<VectorType>(RHS->getType())->getNumElements() == VWidth);
1392 
1393   // If both arguments of the binary operation are shuffles that use the same
1394   // mask and shuffle within a single vector, move the shuffle after the binop:
1395   //   Op(shuffle(v1, m), shuffle(v2, m)) -> shuffle(Op(v1, v2), m)
1396   auto *LShuf = dyn_cast<ShuffleVectorInst>(LHS);
1397   auto *RShuf = dyn_cast<ShuffleVectorInst>(RHS);
1398   if (LShuf && RShuf && LShuf->getMask() == RShuf->getMask() &&
1399       isa<UndefValue>(LShuf->getOperand(1)) &&
1400       isa<UndefValue>(RShuf->getOperand(1)) &&
1401       LShuf->getOperand(0)->getType() == RShuf->getOperand(0)->getType()) {
1402     Value *NewBO = CreateBinOpAsGiven(Inst, LShuf->getOperand(0),
1403                                       RShuf->getOperand(0), Builder);
1404     return Builder.CreateShuffleVector(
1405         NewBO, UndefValue::get(NewBO->getType()), LShuf->getMask());
1406   }
1407 
1408   // If one argument is a shuffle within one vector, the other is a constant,
1409   // try moving the shuffle after the binary operation.
1410   ShuffleVectorInst *Shuffle = nullptr;
1411   Constant *C1 = nullptr;
1412   if (isa<ShuffleVectorInst>(LHS)) Shuffle = cast<ShuffleVectorInst>(LHS);
1413   if (isa<ShuffleVectorInst>(RHS)) Shuffle = cast<ShuffleVectorInst>(RHS);
1414   if (isa<Constant>(LHS)) C1 = cast<Constant>(LHS);
1415   if (isa<Constant>(RHS)) C1 = cast<Constant>(RHS);
1416   if (Shuffle && C1 &&
1417       (isa<ConstantVector>(C1) || isa<ConstantDataVector>(C1)) &&
1418       isa<UndefValue>(Shuffle->getOperand(1)) &&
1419       Shuffle->getType() == Shuffle->getOperand(0)->getType()) {
1420     SmallVector<int, 16> ShMask = Shuffle->getShuffleMask();
1421     // Find constant C2 that has property:
1422     //   shuffle(C2, ShMask) = C1
1423     // If such constant does not exist (example: ShMask=<0,0> and C1=<1,2>)
1424     // reorder is not possible.
1425     SmallVector<Constant*, 16> C2M(VWidth,
1426                                UndefValue::get(C1->getType()->getScalarType()));
1427     bool MayChange = true;
1428     for (unsigned I = 0; I < VWidth; ++I) {
1429       if (ShMask[I] >= 0) {
1430         assert(ShMask[I] < (int)VWidth);
1431         if (!isa<UndefValue>(C2M[ShMask[I]])) {
1432           MayChange = false;
1433           break;
1434         }
1435         C2M[ShMask[I]] = C1->getAggregateElement(I);
1436       }
1437     }
1438     if (MayChange) {
1439       Constant *C2 = ConstantVector::get(C2M);
1440       Value *NewLHS = isa<Constant>(LHS) ? C2 : Shuffle->getOperand(0);
1441       Value *NewRHS = isa<Constant>(LHS) ? Shuffle->getOperand(0) : C2;
1442       Value *NewBO = CreateBinOpAsGiven(Inst, NewLHS, NewRHS, Builder);
1443       return Builder.CreateShuffleVector(NewBO,
1444           UndefValue::get(Inst.getType()), Shuffle->getMask());
1445     }
1446   }
1447 
1448   return nullptr;
1449 }
1450 
1451 Instruction *InstCombiner::visitGetElementPtrInst(GetElementPtrInst &GEP) {
1452   SmallVector<Value*, 8> Ops(GEP.op_begin(), GEP.op_end());
1453 
1454   if (Value *V = SimplifyGEPInst(GEP.getSourceElementType(), Ops,
1455                                  SQ.getWithInstruction(&GEP)))
1456     return replaceInstUsesWith(GEP, V);
1457 
1458   Value *PtrOp = GEP.getOperand(0);
1459 
1460   // Eliminate unneeded casts for indices, and replace indices which displace
1461   // by multiples of a zero size type with zero.
1462   bool MadeChange = false;
1463   Type *IntPtrTy =
1464     DL.getIntPtrType(GEP.getPointerOperandType()->getScalarType());
1465 
1466   gep_type_iterator GTI = gep_type_begin(GEP);
1467   for (User::op_iterator I = GEP.op_begin() + 1, E = GEP.op_end(); I != E;
1468        ++I, ++GTI) {
1469     // Skip indices into struct types.
1470     if (GTI.isStruct())
1471       continue;
1472 
1473     // Index type should have the same width as IntPtr
1474     Type *IndexTy = (*I)->getType();
1475     Type *NewIndexType = IndexTy->isVectorTy() ?
1476       VectorType::get(IntPtrTy, IndexTy->getVectorNumElements()) : IntPtrTy;
1477 
1478     // If the element type has zero size then any index over it is equivalent
1479     // to an index of zero, so replace it with zero if it is not zero already.
1480     Type *EltTy = GTI.getIndexedType();
1481     if (EltTy->isSized() && DL.getTypeAllocSize(EltTy) == 0)
1482       if (!isa<Constant>(*I) || !cast<Constant>(*I)->isNullValue()) {
1483         *I = Constant::getNullValue(NewIndexType);
1484         MadeChange = true;
1485       }
1486 
1487     if (IndexTy != NewIndexType) {
1488       // If we are using a wider index than needed for this platform, shrink
1489       // it to what we need.  If narrower, sign-extend it to what we need.
1490       // This explicit cast can make subsequent optimizations more obvious.
1491       *I = Builder.CreateIntCast(*I, NewIndexType, true);
1492       MadeChange = true;
1493     }
1494   }
1495   if (MadeChange)
1496     return &GEP;
1497 
1498   // Check to see if the inputs to the PHI node are getelementptr instructions.
1499   if (PHINode *PN = dyn_cast<PHINode>(PtrOp)) {
1500     GetElementPtrInst *Op1 = dyn_cast<GetElementPtrInst>(PN->getOperand(0));
1501     if (!Op1)
1502       return nullptr;
1503 
1504     // Don't fold a GEP into itself through a PHI node. This can only happen
1505     // through the back-edge of a loop. Folding a GEP into itself means that
1506     // the value of the previous iteration needs to be stored in the meantime,
1507     // thus requiring an additional register variable to be live, but not
1508     // actually achieving anything (the GEP still needs to be executed once per
1509     // loop iteration).
1510     if (Op1 == &GEP)
1511       return nullptr;
1512 
1513     int DI = -1;
1514 
1515     for (auto I = PN->op_begin()+1, E = PN->op_end(); I !=E; ++I) {
1516       GetElementPtrInst *Op2 = dyn_cast<GetElementPtrInst>(*I);
1517       if (!Op2 || Op1->getNumOperands() != Op2->getNumOperands())
1518         return nullptr;
1519 
1520       // As for Op1 above, don't try to fold a GEP into itself.
1521       if (Op2 == &GEP)
1522         return nullptr;
1523 
1524       // Keep track of the type as we walk the GEP.
1525       Type *CurTy = nullptr;
1526 
1527       for (unsigned J = 0, F = Op1->getNumOperands(); J != F; ++J) {
1528         if (Op1->getOperand(J)->getType() != Op2->getOperand(J)->getType())
1529           return nullptr;
1530 
1531         if (Op1->getOperand(J) != Op2->getOperand(J)) {
1532           if (DI == -1) {
1533             // We have not seen any differences yet in the GEPs feeding the
1534             // PHI yet, so we record this one if it is allowed to be a
1535             // variable.
1536 
1537             // The first two arguments can vary for any GEP, the rest have to be
1538             // static for struct slots
1539             if (J > 1 && CurTy->isStructTy())
1540               return nullptr;
1541 
1542             DI = J;
1543           } else {
1544             // The GEP is different by more than one input. While this could be
1545             // extended to support GEPs that vary by more than one variable it
1546             // doesn't make sense since it greatly increases the complexity and
1547             // would result in an R+R+R addressing mode which no backend
1548             // directly supports and would need to be broken into several
1549             // simpler instructions anyway.
1550             return nullptr;
1551           }
1552         }
1553 
1554         // Sink down a layer of the type for the next iteration.
1555         if (J > 0) {
1556           if (J == 1) {
1557             CurTy = Op1->getSourceElementType();
1558           } else if (CompositeType *CT = dyn_cast<CompositeType>(CurTy)) {
1559             CurTy = CT->getTypeAtIndex(Op1->getOperand(J));
1560           } else {
1561             CurTy = nullptr;
1562           }
1563         }
1564       }
1565     }
1566 
1567     // If not all GEPs are identical we'll have to create a new PHI node.
1568     // Check that the old PHI node has only one use so that it will get
1569     // removed.
1570     if (DI != -1 && !PN->hasOneUse())
1571       return nullptr;
1572 
1573     GetElementPtrInst *NewGEP = cast<GetElementPtrInst>(Op1->clone());
1574     if (DI == -1) {
1575       // All the GEPs feeding the PHI are identical. Clone one down into our
1576       // BB so that it can be merged with the current GEP.
1577       GEP.getParent()->getInstList().insert(
1578           GEP.getParent()->getFirstInsertionPt(), NewGEP);
1579     } else {
1580       // All the GEPs feeding the PHI differ at a single offset. Clone a GEP
1581       // into the current block so it can be merged, and create a new PHI to
1582       // set that index.
1583       PHINode *NewPN;
1584       {
1585         IRBuilderBase::InsertPointGuard Guard(Builder);
1586         Builder.SetInsertPoint(PN);
1587         NewPN = Builder.CreatePHI(Op1->getOperand(DI)->getType(),
1588                                   PN->getNumOperands());
1589       }
1590 
1591       for (auto &I : PN->operands())
1592         NewPN->addIncoming(cast<GEPOperator>(I)->getOperand(DI),
1593                            PN->getIncomingBlock(I));
1594 
1595       NewGEP->setOperand(DI, NewPN);
1596       GEP.getParent()->getInstList().insert(
1597           GEP.getParent()->getFirstInsertionPt(), NewGEP);
1598       NewGEP->setOperand(DI, NewPN);
1599     }
1600 
1601     GEP.setOperand(0, NewGEP);
1602     PtrOp = NewGEP;
1603   }
1604 
1605   // Combine Indices - If the source pointer to this getelementptr instruction
1606   // is a getelementptr instruction, combine the indices of the two
1607   // getelementptr instructions into a single instruction.
1608   //
1609   if (GEPOperator *Src = dyn_cast<GEPOperator>(PtrOp)) {
1610     if (!shouldMergeGEPs(*cast<GEPOperator>(&GEP), *Src))
1611       return nullptr;
1612 
1613     // Note that if our source is a gep chain itself then we wait for that
1614     // chain to be resolved before we perform this transformation.  This
1615     // avoids us creating a TON of code in some cases.
1616     if (GEPOperator *SrcGEP =
1617           dyn_cast<GEPOperator>(Src->getOperand(0)))
1618       if (SrcGEP->getNumOperands() == 2 && shouldMergeGEPs(*Src, *SrcGEP))
1619         return nullptr;   // Wait until our source is folded to completion.
1620 
1621     SmallVector<Value*, 8> Indices;
1622 
1623     // Find out whether the last index in the source GEP is a sequential idx.
1624     bool EndsWithSequential = false;
1625     for (gep_type_iterator I = gep_type_begin(*Src), E = gep_type_end(*Src);
1626          I != E; ++I)
1627       EndsWithSequential = I.isSequential();
1628 
1629     // Can we combine the two pointer arithmetics offsets?
1630     if (EndsWithSequential) {
1631       // Replace: gep (gep %P, long B), long A, ...
1632       // With:    T = long A+B; gep %P, T, ...
1633       //
1634       Value *SO1 = Src->getOperand(Src->getNumOperands()-1);
1635       Value *GO1 = GEP.getOperand(1);
1636 
1637       // If they aren't the same type, then the input hasn't been processed
1638       // by the loop above yet (which canonicalizes sequential index types to
1639       // intptr_t).  Just avoid transforming this until the input has been
1640       // normalized.
1641       if (SO1->getType() != GO1->getType())
1642         return nullptr;
1643 
1644       Value *Sum =
1645           SimplifyAddInst(GO1, SO1, false, false, SQ.getWithInstruction(&GEP));
1646       // Only do the combine when we are sure the cost after the
1647       // merge is never more than that before the merge.
1648       if (Sum == nullptr)
1649         return nullptr;
1650 
1651       // Update the GEP in place if possible.
1652       if (Src->getNumOperands() == 2) {
1653         GEP.setOperand(0, Src->getOperand(0));
1654         GEP.setOperand(1, Sum);
1655         return &GEP;
1656       }
1657       Indices.append(Src->op_begin()+1, Src->op_end()-1);
1658       Indices.push_back(Sum);
1659       Indices.append(GEP.op_begin()+2, GEP.op_end());
1660     } else if (isa<Constant>(*GEP.idx_begin()) &&
1661                cast<Constant>(*GEP.idx_begin())->isNullValue() &&
1662                Src->getNumOperands() != 1) {
1663       // Otherwise we can do the fold if the first index of the GEP is a zero
1664       Indices.append(Src->op_begin()+1, Src->op_end());
1665       Indices.append(GEP.idx_begin()+1, GEP.idx_end());
1666     }
1667 
1668     if (!Indices.empty())
1669       return GEP.isInBounds() && Src->isInBounds()
1670                  ? GetElementPtrInst::CreateInBounds(
1671                        Src->getSourceElementType(), Src->getOperand(0), Indices,
1672                        GEP.getName())
1673                  : GetElementPtrInst::Create(Src->getSourceElementType(),
1674                                              Src->getOperand(0), Indices,
1675                                              GEP.getName());
1676   }
1677 
1678   if (GEP.getNumIndices() == 1) {
1679     unsigned AS = GEP.getPointerAddressSpace();
1680     if (GEP.getOperand(1)->getType()->getScalarSizeInBits() ==
1681         DL.getPointerSizeInBits(AS)) {
1682       Type *Ty = GEP.getSourceElementType();
1683       uint64_t TyAllocSize = DL.getTypeAllocSize(Ty);
1684 
1685       bool Matched = false;
1686       uint64_t C;
1687       Value *V = nullptr;
1688       if (TyAllocSize == 1) {
1689         V = GEP.getOperand(1);
1690         Matched = true;
1691       } else if (match(GEP.getOperand(1),
1692                        m_AShr(m_Value(V), m_ConstantInt(C)))) {
1693         if (TyAllocSize == 1ULL << C)
1694           Matched = true;
1695       } else if (match(GEP.getOperand(1),
1696                        m_SDiv(m_Value(V), m_ConstantInt(C)))) {
1697         if (TyAllocSize == C)
1698           Matched = true;
1699       }
1700 
1701       if (Matched) {
1702         // Canonicalize (gep i8* X, -(ptrtoint Y))
1703         // to (inttoptr (sub (ptrtoint X), (ptrtoint Y)))
1704         // The GEP pattern is emitted by the SCEV expander for certain kinds of
1705         // pointer arithmetic.
1706         if (match(V, m_Neg(m_PtrToInt(m_Value())))) {
1707           Operator *Index = cast<Operator>(V);
1708           Value *PtrToInt = Builder.CreatePtrToInt(PtrOp, Index->getType());
1709           Value *NewSub = Builder.CreateSub(PtrToInt, Index->getOperand(1));
1710           return CastInst::Create(Instruction::IntToPtr, NewSub, GEP.getType());
1711         }
1712         // Canonicalize (gep i8* X, (ptrtoint Y)-(ptrtoint X))
1713         // to (bitcast Y)
1714         Value *Y;
1715         if (match(V, m_Sub(m_PtrToInt(m_Value(Y)),
1716                            m_PtrToInt(m_Specific(GEP.getOperand(0)))))) {
1717           return CastInst::CreatePointerBitCastOrAddrSpaceCast(Y,
1718                                                                GEP.getType());
1719         }
1720       }
1721     }
1722   }
1723 
1724   // We do not handle pointer-vector geps here.
1725   if (GEP.getType()->isVectorTy())
1726     return nullptr;
1727 
1728   // Handle gep(bitcast x) and gep(gep x, 0, 0, 0).
1729   Value *StrippedPtr = PtrOp->stripPointerCasts();
1730   PointerType *StrippedPtrTy = cast<PointerType>(StrippedPtr->getType());
1731 
1732   if (StrippedPtr != PtrOp) {
1733     bool HasZeroPointerIndex = false;
1734     if (ConstantInt *C = dyn_cast<ConstantInt>(GEP.getOperand(1)))
1735       HasZeroPointerIndex = C->isZero();
1736 
1737     // Transform: GEP (bitcast [10 x i8]* X to [0 x i8]*), i32 0, ...
1738     // into     : GEP [10 x i8]* X, i32 0, ...
1739     //
1740     // Likewise, transform: GEP (bitcast i8* X to [0 x i8]*), i32 0, ...
1741     //           into     : GEP i8* X, ...
1742     //
1743     // This occurs when the program declares an array extern like "int X[];"
1744     if (HasZeroPointerIndex) {
1745       if (ArrayType *CATy =
1746           dyn_cast<ArrayType>(GEP.getSourceElementType())) {
1747         // GEP (bitcast i8* X to [0 x i8]*), i32 0, ... ?
1748         if (CATy->getElementType() == StrippedPtrTy->getElementType()) {
1749           // -> GEP i8* X, ...
1750           SmallVector<Value*, 8> Idx(GEP.idx_begin()+1, GEP.idx_end());
1751           GetElementPtrInst *Res = GetElementPtrInst::Create(
1752               StrippedPtrTy->getElementType(), StrippedPtr, Idx, GEP.getName());
1753           Res->setIsInBounds(GEP.isInBounds());
1754           if (StrippedPtrTy->getAddressSpace() == GEP.getAddressSpace())
1755             return Res;
1756           // Insert Res, and create an addrspacecast.
1757           // e.g.,
1758           // GEP (addrspacecast i8 addrspace(1)* X to [0 x i8]*), i32 0, ...
1759           // ->
1760           // %0 = GEP i8 addrspace(1)* X, ...
1761           // addrspacecast i8 addrspace(1)* %0 to i8*
1762           return new AddrSpaceCastInst(Builder.Insert(Res), GEP.getType());
1763         }
1764 
1765         if (ArrayType *XATy =
1766               dyn_cast<ArrayType>(StrippedPtrTy->getElementType())){
1767           // GEP (bitcast [10 x i8]* X to [0 x i8]*), i32 0, ... ?
1768           if (CATy->getElementType() == XATy->getElementType()) {
1769             // -> GEP [10 x i8]* X, i32 0, ...
1770             // At this point, we know that the cast source type is a pointer
1771             // to an array of the same type as the destination pointer
1772             // array.  Because the array type is never stepped over (there
1773             // is a leading zero) we can fold the cast into this GEP.
1774             if (StrippedPtrTy->getAddressSpace() == GEP.getAddressSpace()) {
1775               GEP.setOperand(0, StrippedPtr);
1776               GEP.setSourceElementType(XATy);
1777               return &GEP;
1778             }
1779             // Cannot replace the base pointer directly because StrippedPtr's
1780             // address space is different. Instead, create a new GEP followed by
1781             // an addrspacecast.
1782             // e.g.,
1783             // GEP (addrspacecast [10 x i8] addrspace(1)* X to [0 x i8]*),
1784             //   i32 0, ...
1785             // ->
1786             // %0 = GEP [10 x i8] addrspace(1)* X, ...
1787             // addrspacecast i8 addrspace(1)* %0 to i8*
1788             SmallVector<Value*, 8> Idx(GEP.idx_begin(), GEP.idx_end());
1789             Value *NewGEP = GEP.isInBounds()
1790                                 ? Builder.CreateInBoundsGEP(
1791                                       nullptr, StrippedPtr, Idx, GEP.getName())
1792                                 : Builder.CreateGEP(nullptr, StrippedPtr, Idx,
1793                                                     GEP.getName());
1794             return new AddrSpaceCastInst(NewGEP, GEP.getType());
1795           }
1796         }
1797       }
1798     } else if (GEP.getNumOperands() == 2) {
1799       // Transform things like:
1800       // %t = getelementptr i32* bitcast ([2 x i32]* %str to i32*), i32 %V
1801       // into:  %t1 = getelementptr [2 x i32]* %str, i32 0, i32 %V; bitcast
1802       Type *SrcElTy = StrippedPtrTy->getElementType();
1803       Type *ResElTy = GEP.getSourceElementType();
1804       if (SrcElTy->isArrayTy() &&
1805           DL.getTypeAllocSize(SrcElTy->getArrayElementType()) ==
1806               DL.getTypeAllocSize(ResElTy)) {
1807         Type *IdxType = DL.getIntPtrType(GEP.getType());
1808         Value *Idx[2] = { Constant::getNullValue(IdxType), GEP.getOperand(1) };
1809         Value *NewGEP =
1810             GEP.isInBounds()
1811                 ? Builder.CreateInBoundsGEP(nullptr, StrippedPtr, Idx,
1812                                             GEP.getName())
1813                 : Builder.CreateGEP(nullptr, StrippedPtr, Idx, GEP.getName());
1814 
1815         // V and GEP are both pointer types --> BitCast
1816         return CastInst::CreatePointerBitCastOrAddrSpaceCast(NewGEP,
1817                                                              GEP.getType());
1818       }
1819 
1820       // Transform things like:
1821       // %V = mul i64 %N, 4
1822       // %t = getelementptr i8* bitcast (i32* %arr to i8*), i32 %V
1823       // into:  %t1 = getelementptr i32* %arr, i32 %N; bitcast
1824       if (ResElTy->isSized() && SrcElTy->isSized()) {
1825         // Check that changing the type amounts to dividing the index by a scale
1826         // factor.
1827         uint64_t ResSize = DL.getTypeAllocSize(ResElTy);
1828         uint64_t SrcSize = DL.getTypeAllocSize(SrcElTy);
1829         if (ResSize && SrcSize % ResSize == 0) {
1830           Value *Idx = GEP.getOperand(1);
1831           unsigned BitWidth = Idx->getType()->getPrimitiveSizeInBits();
1832           uint64_t Scale = SrcSize / ResSize;
1833 
1834           // Earlier transforms ensure that the index has type IntPtrType, which
1835           // considerably simplifies the logic by eliminating implicit casts.
1836           assert(Idx->getType() == DL.getIntPtrType(GEP.getType()) &&
1837                  "Index not cast to pointer width?");
1838 
1839           bool NSW;
1840           if (Value *NewIdx = Descale(Idx, APInt(BitWidth, Scale), NSW)) {
1841             // Successfully decomposed Idx as NewIdx * Scale, form a new GEP.
1842             // If the multiplication NewIdx * Scale may overflow then the new
1843             // GEP may not be "inbounds".
1844             Value *NewGEP =
1845                 GEP.isInBounds() && NSW
1846                     ? Builder.CreateInBoundsGEP(nullptr, StrippedPtr, NewIdx,
1847                                                 GEP.getName())
1848                     : Builder.CreateGEP(nullptr, StrippedPtr, NewIdx,
1849                                         GEP.getName());
1850 
1851             // The NewGEP must be pointer typed, so must the old one -> BitCast
1852             return CastInst::CreatePointerBitCastOrAddrSpaceCast(NewGEP,
1853                                                                  GEP.getType());
1854           }
1855         }
1856       }
1857 
1858       // Similarly, transform things like:
1859       // getelementptr i8* bitcast ([100 x double]* X to i8*), i32 %tmp
1860       //   (where tmp = 8*tmp2) into:
1861       // getelementptr [100 x double]* %arr, i32 0, i32 %tmp2; bitcast
1862       if (ResElTy->isSized() && SrcElTy->isSized() && SrcElTy->isArrayTy()) {
1863         // Check that changing to the array element type amounts to dividing the
1864         // index by a scale factor.
1865         uint64_t ResSize = DL.getTypeAllocSize(ResElTy);
1866         uint64_t ArrayEltSize =
1867             DL.getTypeAllocSize(SrcElTy->getArrayElementType());
1868         if (ResSize && ArrayEltSize % ResSize == 0) {
1869           Value *Idx = GEP.getOperand(1);
1870           unsigned BitWidth = Idx->getType()->getPrimitiveSizeInBits();
1871           uint64_t Scale = ArrayEltSize / ResSize;
1872 
1873           // Earlier transforms ensure that the index has type IntPtrType, which
1874           // considerably simplifies the logic by eliminating implicit casts.
1875           assert(Idx->getType() == DL.getIntPtrType(GEP.getType()) &&
1876                  "Index not cast to pointer width?");
1877 
1878           bool NSW;
1879           if (Value *NewIdx = Descale(Idx, APInt(BitWidth, Scale), NSW)) {
1880             // Successfully decomposed Idx as NewIdx * Scale, form a new GEP.
1881             // If the multiplication NewIdx * Scale may overflow then the new
1882             // GEP may not be "inbounds".
1883             Value *Off[2] = {
1884                 Constant::getNullValue(DL.getIntPtrType(GEP.getType())),
1885                 NewIdx};
1886 
1887             Value *NewGEP = GEP.isInBounds() && NSW
1888                                 ? Builder.CreateInBoundsGEP(
1889                                       SrcElTy, StrippedPtr, Off, GEP.getName())
1890                                 : Builder.CreateGEP(SrcElTy, StrippedPtr, Off,
1891                                                     GEP.getName());
1892             // The NewGEP must be pointer typed, so must the old one -> BitCast
1893             return CastInst::CreatePointerBitCastOrAddrSpaceCast(NewGEP,
1894                                                                  GEP.getType());
1895           }
1896         }
1897       }
1898     }
1899   }
1900 
1901   // addrspacecast between types is canonicalized as a bitcast, then an
1902   // addrspacecast. To take advantage of the below bitcast + struct GEP, look
1903   // through the addrspacecast.
1904   if (AddrSpaceCastInst *ASC = dyn_cast<AddrSpaceCastInst>(PtrOp)) {
1905     //   X = bitcast A addrspace(1)* to B addrspace(1)*
1906     //   Y = addrspacecast A addrspace(1)* to B addrspace(2)*
1907     //   Z = gep Y, <...constant indices...>
1908     // Into an addrspacecasted GEP of the struct.
1909     if (BitCastInst *BC = dyn_cast<BitCastInst>(ASC->getOperand(0)))
1910       PtrOp = BC;
1911   }
1912 
1913   /// See if we can simplify:
1914   ///   X = bitcast A* to B*
1915   ///   Y = gep X, <...constant indices...>
1916   /// into a gep of the original struct.  This is important for SROA and alias
1917   /// analysis of unions.  If "A" is also a bitcast, wait for A/X to be merged.
1918   if (BitCastInst *BCI = dyn_cast<BitCastInst>(PtrOp)) {
1919     Value *Operand = BCI->getOperand(0);
1920     PointerType *OpType = cast<PointerType>(Operand->getType());
1921     unsigned OffsetBits = DL.getPointerTypeSizeInBits(GEP.getType());
1922     APInt Offset(OffsetBits, 0);
1923     if (!isa<BitCastInst>(Operand) &&
1924         GEP.accumulateConstantOffset(DL, Offset)) {
1925 
1926       // If this GEP instruction doesn't move the pointer, just replace the GEP
1927       // with a bitcast of the real input to the dest type.
1928       if (!Offset) {
1929         // If the bitcast is of an allocation, and the allocation will be
1930         // converted to match the type of the cast, don't touch this.
1931         if (isa<AllocaInst>(Operand) || isAllocationFn(Operand, &TLI)) {
1932           // See if the bitcast simplifies, if so, don't nuke this GEP yet.
1933           if (Instruction *I = visitBitCast(*BCI)) {
1934             if (I != BCI) {
1935               I->takeName(BCI);
1936               BCI->getParent()->getInstList().insert(BCI->getIterator(), I);
1937               replaceInstUsesWith(*BCI, I);
1938             }
1939             return &GEP;
1940           }
1941         }
1942 
1943         if (Operand->getType()->getPointerAddressSpace() != GEP.getAddressSpace())
1944           return new AddrSpaceCastInst(Operand, GEP.getType());
1945         return new BitCastInst(Operand, GEP.getType());
1946       }
1947 
1948       // Otherwise, if the offset is non-zero, we need to find out if there is a
1949       // field at Offset in 'A's type.  If so, we can pull the cast through the
1950       // GEP.
1951       SmallVector<Value*, 8> NewIndices;
1952       if (FindElementAtOffset(OpType, Offset.getSExtValue(), NewIndices)) {
1953         Value *NGEP =
1954             GEP.isInBounds()
1955                 ? Builder.CreateInBoundsGEP(nullptr, Operand, NewIndices)
1956                 : Builder.CreateGEP(nullptr, Operand, NewIndices);
1957 
1958         if (NGEP->getType() == GEP.getType())
1959           return replaceInstUsesWith(GEP, NGEP);
1960         NGEP->takeName(&GEP);
1961 
1962         if (NGEP->getType()->getPointerAddressSpace() != GEP.getAddressSpace())
1963           return new AddrSpaceCastInst(NGEP, GEP.getType());
1964         return new BitCastInst(NGEP, GEP.getType());
1965       }
1966     }
1967   }
1968 
1969   if (!GEP.isInBounds()) {
1970     unsigned PtrWidth =
1971         DL.getPointerSizeInBits(PtrOp->getType()->getPointerAddressSpace());
1972     APInt BasePtrOffset(PtrWidth, 0);
1973     Value *UnderlyingPtrOp =
1974             PtrOp->stripAndAccumulateInBoundsConstantOffsets(DL,
1975                                                              BasePtrOffset);
1976     if (auto *AI = dyn_cast<AllocaInst>(UnderlyingPtrOp)) {
1977       if (GEP.accumulateConstantOffset(DL, BasePtrOffset) &&
1978           BasePtrOffset.isNonNegative()) {
1979         APInt AllocSize(PtrWidth, DL.getTypeAllocSize(AI->getAllocatedType()));
1980         if (BasePtrOffset.ule(AllocSize)) {
1981           return GetElementPtrInst::CreateInBounds(
1982               PtrOp, makeArrayRef(Ops).slice(1), GEP.getName());
1983         }
1984       }
1985     }
1986   }
1987 
1988   return nullptr;
1989 }
1990 
1991 static bool isNeverEqualToUnescapedAlloc(Value *V, const TargetLibraryInfo *TLI,
1992                                          Instruction *AI) {
1993   if (isa<ConstantPointerNull>(V))
1994     return true;
1995   if (auto *LI = dyn_cast<LoadInst>(V))
1996     return isa<GlobalVariable>(LI->getPointerOperand());
1997   // Two distinct allocations will never be equal.
1998   // We rely on LookThroughBitCast in isAllocLikeFn being false, since looking
1999   // through bitcasts of V can cause
2000   // the result statement below to be true, even when AI and V (ex:
2001   // i8* ->i32* ->i8* of AI) are the same allocations.
2002   return isAllocLikeFn(V, TLI) && V != AI;
2003 }
2004 
2005 static bool isAllocSiteRemovable(Instruction *AI,
2006                                  SmallVectorImpl<WeakTrackingVH> &Users,
2007                                  const TargetLibraryInfo *TLI) {
2008   SmallVector<Instruction*, 4> Worklist;
2009   Worklist.push_back(AI);
2010 
2011   do {
2012     Instruction *PI = Worklist.pop_back_val();
2013     for (User *U : PI->users()) {
2014       Instruction *I = cast<Instruction>(U);
2015       switch (I->getOpcode()) {
2016       default:
2017         // Give up the moment we see something we can't handle.
2018         return false;
2019 
2020       case Instruction::AddrSpaceCast:
2021       case Instruction::BitCast:
2022       case Instruction::GetElementPtr:
2023         Users.emplace_back(I);
2024         Worklist.push_back(I);
2025         continue;
2026 
2027       case Instruction::ICmp: {
2028         ICmpInst *ICI = cast<ICmpInst>(I);
2029         // We can fold eq/ne comparisons with null to false/true, respectively.
2030         // We also fold comparisons in some conditions provided the alloc has
2031         // not escaped (see isNeverEqualToUnescapedAlloc).
2032         if (!ICI->isEquality())
2033           return false;
2034         unsigned OtherIndex = (ICI->getOperand(0) == PI) ? 1 : 0;
2035         if (!isNeverEqualToUnescapedAlloc(ICI->getOperand(OtherIndex), TLI, AI))
2036           return false;
2037         Users.emplace_back(I);
2038         continue;
2039       }
2040 
2041       case Instruction::Call:
2042         // Ignore no-op and store intrinsics.
2043         if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) {
2044           switch (II->getIntrinsicID()) {
2045           default:
2046             return false;
2047 
2048           case Intrinsic::memmove:
2049           case Intrinsic::memcpy:
2050           case Intrinsic::memset: {
2051             MemIntrinsic *MI = cast<MemIntrinsic>(II);
2052             if (MI->isVolatile() || MI->getRawDest() != PI)
2053               return false;
2054             LLVM_FALLTHROUGH;
2055           }
2056           case Intrinsic::dbg_declare:
2057           case Intrinsic::dbg_value:
2058           case Intrinsic::invariant_start:
2059           case Intrinsic::invariant_end:
2060           case Intrinsic::lifetime_start:
2061           case Intrinsic::lifetime_end:
2062           case Intrinsic::objectsize:
2063             Users.emplace_back(I);
2064             continue;
2065           }
2066         }
2067 
2068         if (isFreeCall(I, TLI)) {
2069           Users.emplace_back(I);
2070           continue;
2071         }
2072         return false;
2073 
2074       case Instruction::Store: {
2075         StoreInst *SI = cast<StoreInst>(I);
2076         if (SI->isVolatile() || SI->getPointerOperand() != PI)
2077           return false;
2078         Users.emplace_back(I);
2079         continue;
2080       }
2081       }
2082       llvm_unreachable("missing a return?");
2083     }
2084   } while (!Worklist.empty());
2085   return true;
2086 }
2087 
2088 Instruction *InstCombiner::visitAllocSite(Instruction &MI) {
2089   // If we have a malloc call which is only used in any amount of comparisons
2090   // to null and free calls, delete the calls and replace the comparisons with
2091   // true or false as appropriate.
2092   SmallVector<WeakTrackingVH, 64> Users;
2093   if (isAllocSiteRemovable(&MI, Users, &TLI)) {
2094     for (unsigned i = 0, e = Users.size(); i != e; ++i) {
2095       // Lowering all @llvm.objectsize calls first because they may
2096       // use a bitcast/GEP of the alloca we are removing.
2097       if (!Users[i])
2098        continue;
2099 
2100       Instruction *I = cast<Instruction>(&*Users[i]);
2101 
2102       if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) {
2103         if (II->getIntrinsicID() == Intrinsic::objectsize) {
2104           ConstantInt *Result = lowerObjectSizeCall(II, DL, &TLI,
2105                                                     /*MustSucceed=*/true);
2106           replaceInstUsesWith(*I, Result);
2107           eraseInstFromFunction(*I);
2108           Users[i] = nullptr; // Skip examining in the next loop.
2109         }
2110       }
2111     }
2112     for (unsigned i = 0, e = Users.size(); i != e; ++i) {
2113       if (!Users[i])
2114         continue;
2115 
2116       Instruction *I = cast<Instruction>(&*Users[i]);
2117 
2118       if (ICmpInst *C = dyn_cast<ICmpInst>(I)) {
2119         replaceInstUsesWith(*C,
2120                             ConstantInt::get(Type::getInt1Ty(C->getContext()),
2121                                              C->isFalseWhenEqual()));
2122       } else if (isa<BitCastInst>(I) || isa<GetElementPtrInst>(I) ||
2123                  isa<AddrSpaceCastInst>(I)) {
2124         replaceInstUsesWith(*I, UndefValue::get(I->getType()));
2125       }
2126       eraseInstFromFunction(*I);
2127     }
2128 
2129     if (InvokeInst *II = dyn_cast<InvokeInst>(&MI)) {
2130       // Replace invoke with a NOP intrinsic to maintain the original CFG
2131       Module *M = II->getModule();
2132       Function *F = Intrinsic::getDeclaration(M, Intrinsic::donothing);
2133       InvokeInst::Create(F, II->getNormalDest(), II->getUnwindDest(),
2134                          None, "", II->getParent());
2135     }
2136     return eraseInstFromFunction(MI);
2137   }
2138   return nullptr;
2139 }
2140 
2141 /// \brief Move the call to free before a NULL test.
2142 ///
2143 /// Check if this free is accessed after its argument has been test
2144 /// against NULL (property 0).
2145 /// If yes, it is legal to move this call in its predecessor block.
2146 ///
2147 /// The move is performed only if the block containing the call to free
2148 /// will be removed, i.e.:
2149 /// 1. it has only one predecessor P, and P has two successors
2150 /// 2. it contains the call and an unconditional branch
2151 /// 3. its successor is the same as its predecessor's successor
2152 ///
2153 /// The profitability is out-of concern here and this function should
2154 /// be called only if the caller knows this transformation would be
2155 /// profitable (e.g., for code size).
2156 static Instruction *
2157 tryToMoveFreeBeforeNullTest(CallInst &FI) {
2158   Value *Op = FI.getArgOperand(0);
2159   BasicBlock *FreeInstrBB = FI.getParent();
2160   BasicBlock *PredBB = FreeInstrBB->getSinglePredecessor();
2161 
2162   // Validate part of constraint #1: Only one predecessor
2163   // FIXME: We can extend the number of predecessor, but in that case, we
2164   //        would duplicate the call to free in each predecessor and it may
2165   //        not be profitable even for code size.
2166   if (!PredBB)
2167     return nullptr;
2168 
2169   // Validate constraint #2: Does this block contains only the call to
2170   //                         free and an unconditional branch?
2171   // FIXME: We could check if we can speculate everything in the
2172   //        predecessor block
2173   if (FreeInstrBB->size() != 2)
2174     return nullptr;
2175   BasicBlock *SuccBB;
2176   if (!match(FreeInstrBB->getTerminator(), m_UnconditionalBr(SuccBB)))
2177     return nullptr;
2178 
2179   // Validate the rest of constraint #1 by matching on the pred branch.
2180   TerminatorInst *TI = PredBB->getTerminator();
2181   BasicBlock *TrueBB, *FalseBB;
2182   ICmpInst::Predicate Pred;
2183   if (!match(TI, m_Br(m_ICmp(Pred, m_Specific(Op), m_Zero()), TrueBB, FalseBB)))
2184     return nullptr;
2185   if (Pred != ICmpInst::ICMP_EQ && Pred != ICmpInst::ICMP_NE)
2186     return nullptr;
2187 
2188   // Validate constraint #3: Ensure the null case just falls through.
2189   if (SuccBB != (Pred == ICmpInst::ICMP_EQ ? TrueBB : FalseBB))
2190     return nullptr;
2191   assert(FreeInstrBB == (Pred == ICmpInst::ICMP_EQ ? FalseBB : TrueBB) &&
2192          "Broken CFG: missing edge from predecessor to successor");
2193 
2194   FI.moveBefore(TI);
2195   return &FI;
2196 }
2197 
2198 
2199 Instruction *InstCombiner::visitFree(CallInst &FI) {
2200   Value *Op = FI.getArgOperand(0);
2201 
2202   // free undef -> unreachable.
2203   if (isa<UndefValue>(Op)) {
2204     // Insert a new store to null because we cannot modify the CFG here.
2205     Builder.CreateStore(ConstantInt::getTrue(FI.getContext()),
2206                         UndefValue::get(Type::getInt1PtrTy(FI.getContext())));
2207     return eraseInstFromFunction(FI);
2208   }
2209 
2210   // If we have 'free null' delete the instruction.  This can happen in stl code
2211   // when lots of inlining happens.
2212   if (isa<ConstantPointerNull>(Op))
2213     return eraseInstFromFunction(FI);
2214 
2215   // If we optimize for code size, try to move the call to free before the null
2216   // test so that simplify cfg can remove the empty block and dead code
2217   // elimination the branch. I.e., helps to turn something like:
2218   // if (foo) free(foo);
2219   // into
2220   // free(foo);
2221   if (MinimizeSize)
2222     if (Instruction *I = tryToMoveFreeBeforeNullTest(FI))
2223       return I;
2224 
2225   return nullptr;
2226 }
2227 
2228 Instruction *InstCombiner::visitReturnInst(ReturnInst &RI) {
2229   if (RI.getNumOperands() == 0) // ret void
2230     return nullptr;
2231 
2232   Value *ResultOp = RI.getOperand(0);
2233   Type *VTy = ResultOp->getType();
2234   if (!VTy->isIntegerTy())
2235     return nullptr;
2236 
2237   // There might be assume intrinsics dominating this return that completely
2238   // determine the value. If so, constant fold it.
2239   KnownBits Known = computeKnownBits(ResultOp, 0, &RI);
2240   if (Known.isConstant())
2241     RI.setOperand(0, Constant::getIntegerValue(VTy, Known.getConstant()));
2242 
2243   return nullptr;
2244 }
2245 
2246 Instruction *InstCombiner::visitBranchInst(BranchInst &BI) {
2247   // Change br (not X), label True, label False to: br X, label False, True
2248   Value *X = nullptr;
2249   BasicBlock *TrueDest;
2250   BasicBlock *FalseDest;
2251   if (match(&BI, m_Br(m_Not(m_Value(X)), TrueDest, FalseDest)) &&
2252       !isa<Constant>(X)) {
2253     // Swap Destinations and condition...
2254     BI.setCondition(X);
2255     BI.swapSuccessors();
2256     return &BI;
2257   }
2258 
2259   // If the condition is irrelevant, remove the use so that other
2260   // transforms on the condition become more effective.
2261   if (BI.isConditional() &&
2262       BI.getSuccessor(0) == BI.getSuccessor(1) &&
2263       !isa<UndefValue>(BI.getCondition())) {
2264     BI.setCondition(UndefValue::get(BI.getCondition()->getType()));
2265     return &BI;
2266   }
2267 
2268   // Canonicalize, for example, icmp_ne -> icmp_eq or fcmp_one -> fcmp_oeq.
2269   CmpInst::Predicate Pred;
2270   if (match(&BI, m_Br(m_OneUse(m_Cmp(Pred, m_Value(), m_Value())), TrueDest,
2271                       FalseDest)) &&
2272       !isCanonicalPredicate(Pred)) {
2273     // Swap destinations and condition.
2274     CmpInst *Cond = cast<CmpInst>(BI.getCondition());
2275     Cond->setPredicate(CmpInst::getInversePredicate(Pred));
2276     BI.swapSuccessors();
2277     Worklist.Add(Cond);
2278     return &BI;
2279   }
2280 
2281   return nullptr;
2282 }
2283 
2284 Instruction *InstCombiner::visitSwitchInst(SwitchInst &SI) {
2285   Value *Cond = SI.getCondition();
2286   Value *Op0;
2287   ConstantInt *AddRHS;
2288   if (match(Cond, m_Add(m_Value(Op0), m_ConstantInt(AddRHS)))) {
2289     // Change 'switch (X+4) case 1:' into 'switch (X) case -3'.
2290     for (auto Case : SI.cases()) {
2291       Constant *NewCase = ConstantExpr::getSub(Case.getCaseValue(), AddRHS);
2292       assert(isa<ConstantInt>(NewCase) &&
2293              "Result of expression should be constant");
2294       Case.setValue(cast<ConstantInt>(NewCase));
2295     }
2296     SI.setCondition(Op0);
2297     return &SI;
2298   }
2299 
2300   KnownBits Known = computeKnownBits(Cond, 0, &SI);
2301   unsigned LeadingKnownZeros = Known.countMinLeadingZeros();
2302   unsigned LeadingKnownOnes = Known.countMinLeadingOnes();
2303 
2304   // Compute the number of leading bits we can ignore.
2305   // TODO: A better way to determine this would use ComputeNumSignBits().
2306   for (auto &C : SI.cases()) {
2307     LeadingKnownZeros = std::min(
2308         LeadingKnownZeros, C.getCaseValue()->getValue().countLeadingZeros());
2309     LeadingKnownOnes = std::min(
2310         LeadingKnownOnes, C.getCaseValue()->getValue().countLeadingOnes());
2311   }
2312 
2313   unsigned NewWidth = Known.getBitWidth() - std::max(LeadingKnownZeros, LeadingKnownOnes);
2314 
2315   // Shrink the condition operand if the new type is smaller than the old type.
2316   // This may produce a non-standard type for the switch, but that's ok because
2317   // the backend should extend back to a legal type for the target.
2318   if (NewWidth > 0 && NewWidth < Known.getBitWidth()) {
2319     IntegerType *Ty = IntegerType::get(SI.getContext(), NewWidth);
2320     Builder.SetInsertPoint(&SI);
2321     Value *NewCond = Builder.CreateTrunc(Cond, Ty, "trunc");
2322     SI.setCondition(NewCond);
2323 
2324     for (auto Case : SI.cases()) {
2325       APInt TruncatedCase = Case.getCaseValue()->getValue().trunc(NewWidth);
2326       Case.setValue(ConstantInt::get(SI.getContext(), TruncatedCase));
2327     }
2328     return &SI;
2329   }
2330 
2331   return nullptr;
2332 }
2333 
2334 Instruction *InstCombiner::visitExtractValueInst(ExtractValueInst &EV) {
2335   Value *Agg = EV.getAggregateOperand();
2336 
2337   if (!EV.hasIndices())
2338     return replaceInstUsesWith(EV, Agg);
2339 
2340   if (Value *V = SimplifyExtractValueInst(Agg, EV.getIndices(),
2341                                           SQ.getWithInstruction(&EV)))
2342     return replaceInstUsesWith(EV, V);
2343 
2344   if (InsertValueInst *IV = dyn_cast<InsertValueInst>(Agg)) {
2345     // We're extracting from an insertvalue instruction, compare the indices
2346     const unsigned *exti, *exte, *insi, *inse;
2347     for (exti = EV.idx_begin(), insi = IV->idx_begin(),
2348          exte = EV.idx_end(), inse = IV->idx_end();
2349          exti != exte && insi != inse;
2350          ++exti, ++insi) {
2351       if (*insi != *exti)
2352         // The insert and extract both reference distinctly different elements.
2353         // This means the extract is not influenced by the insert, and we can
2354         // replace the aggregate operand of the extract with the aggregate
2355         // operand of the insert. i.e., replace
2356         // %I = insertvalue { i32, { i32 } } %A, { i32 } { i32 42 }, 1
2357         // %E = extractvalue { i32, { i32 } } %I, 0
2358         // with
2359         // %E = extractvalue { i32, { i32 } } %A, 0
2360         return ExtractValueInst::Create(IV->getAggregateOperand(),
2361                                         EV.getIndices());
2362     }
2363     if (exti == exte && insi == inse)
2364       // Both iterators are at the end: Index lists are identical. Replace
2365       // %B = insertvalue { i32, { i32 } } %A, i32 42, 1, 0
2366       // %C = extractvalue { i32, { i32 } } %B, 1, 0
2367       // with "i32 42"
2368       return replaceInstUsesWith(EV, IV->getInsertedValueOperand());
2369     if (exti == exte) {
2370       // The extract list is a prefix of the insert list. i.e. replace
2371       // %I = insertvalue { i32, { i32 } } %A, i32 42, 1, 0
2372       // %E = extractvalue { i32, { i32 } } %I, 1
2373       // with
2374       // %X = extractvalue { i32, { i32 } } %A, 1
2375       // %E = insertvalue { i32 } %X, i32 42, 0
2376       // by switching the order of the insert and extract (though the
2377       // insertvalue should be left in, since it may have other uses).
2378       Value *NewEV = Builder.CreateExtractValue(IV->getAggregateOperand(),
2379                                                 EV.getIndices());
2380       return InsertValueInst::Create(NewEV, IV->getInsertedValueOperand(),
2381                                      makeArrayRef(insi, inse));
2382     }
2383     if (insi == inse)
2384       // The insert list is a prefix of the extract list
2385       // We can simply remove the common indices from the extract and make it
2386       // operate on the inserted value instead of the insertvalue result.
2387       // i.e., replace
2388       // %I = insertvalue { i32, { i32 } } %A, { i32 } { i32 42 }, 1
2389       // %E = extractvalue { i32, { i32 } } %I, 1, 0
2390       // with
2391       // %E extractvalue { i32 } { i32 42 }, 0
2392       return ExtractValueInst::Create(IV->getInsertedValueOperand(),
2393                                       makeArrayRef(exti, exte));
2394   }
2395   if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(Agg)) {
2396     // We're extracting from an intrinsic, see if we're the only user, which
2397     // allows us to simplify multiple result intrinsics to simpler things that
2398     // just get one value.
2399     if (II->hasOneUse()) {
2400       // Check if we're grabbing the overflow bit or the result of a 'with
2401       // overflow' intrinsic.  If it's the latter we can remove the intrinsic
2402       // and replace it with a traditional binary instruction.
2403       switch (II->getIntrinsicID()) {
2404       case Intrinsic::uadd_with_overflow:
2405       case Intrinsic::sadd_with_overflow:
2406         if (*EV.idx_begin() == 0) {  // Normal result.
2407           Value *LHS = II->getArgOperand(0), *RHS = II->getArgOperand(1);
2408           replaceInstUsesWith(*II, UndefValue::get(II->getType()));
2409           eraseInstFromFunction(*II);
2410           return BinaryOperator::CreateAdd(LHS, RHS);
2411         }
2412 
2413         // If the normal result of the add is dead, and the RHS is a constant,
2414         // we can transform this into a range comparison.
2415         // overflow = uadd a, -4  -->  overflow = icmp ugt a, 3
2416         if (II->getIntrinsicID() == Intrinsic::uadd_with_overflow)
2417           if (ConstantInt *CI = dyn_cast<ConstantInt>(II->getArgOperand(1)))
2418             return new ICmpInst(ICmpInst::ICMP_UGT, II->getArgOperand(0),
2419                                 ConstantExpr::getNot(CI));
2420         break;
2421       case Intrinsic::usub_with_overflow:
2422       case Intrinsic::ssub_with_overflow:
2423         if (*EV.idx_begin() == 0) {  // Normal result.
2424           Value *LHS = II->getArgOperand(0), *RHS = II->getArgOperand(1);
2425           replaceInstUsesWith(*II, UndefValue::get(II->getType()));
2426           eraseInstFromFunction(*II);
2427           return BinaryOperator::CreateSub(LHS, RHS);
2428         }
2429         break;
2430       case Intrinsic::umul_with_overflow:
2431       case Intrinsic::smul_with_overflow:
2432         if (*EV.idx_begin() == 0) {  // Normal result.
2433           Value *LHS = II->getArgOperand(0), *RHS = II->getArgOperand(1);
2434           replaceInstUsesWith(*II, UndefValue::get(II->getType()));
2435           eraseInstFromFunction(*II);
2436           return BinaryOperator::CreateMul(LHS, RHS);
2437         }
2438         break;
2439       default:
2440         break;
2441       }
2442     }
2443   }
2444   if (LoadInst *L = dyn_cast<LoadInst>(Agg))
2445     // If the (non-volatile) load only has one use, we can rewrite this to a
2446     // load from a GEP. This reduces the size of the load. If a load is used
2447     // only by extractvalue instructions then this either must have been
2448     // optimized before, or it is a struct with padding, in which case we
2449     // don't want to do the transformation as it loses padding knowledge.
2450     if (L->isSimple() && L->hasOneUse()) {
2451       // extractvalue has integer indices, getelementptr has Value*s. Convert.
2452       SmallVector<Value*, 4> Indices;
2453       // Prefix an i32 0 since we need the first element.
2454       Indices.push_back(Builder.getInt32(0));
2455       for (ExtractValueInst::idx_iterator I = EV.idx_begin(), E = EV.idx_end();
2456             I != E; ++I)
2457         Indices.push_back(Builder.getInt32(*I));
2458 
2459       // We need to insert these at the location of the old load, not at that of
2460       // the extractvalue.
2461       Builder.SetInsertPoint(L);
2462       Value *GEP = Builder.CreateInBoundsGEP(L->getType(),
2463                                              L->getPointerOperand(), Indices);
2464       Instruction *NL = Builder.CreateLoad(GEP);
2465       // Whatever aliasing information we had for the orignal load must also
2466       // hold for the smaller load, so propagate the annotations.
2467       AAMDNodes Nodes;
2468       L->getAAMetadata(Nodes);
2469       NL->setAAMetadata(Nodes);
2470       // Returning the load directly will cause the main loop to insert it in
2471       // the wrong spot, so use replaceInstUsesWith().
2472       return replaceInstUsesWith(EV, NL);
2473     }
2474   // We could simplify extracts from other values. Note that nested extracts may
2475   // already be simplified implicitly by the above: extract (extract (insert) )
2476   // will be translated into extract ( insert ( extract ) ) first and then just
2477   // the value inserted, if appropriate. Similarly for extracts from single-use
2478   // loads: extract (extract (load)) will be translated to extract (load (gep))
2479   // and if again single-use then via load (gep (gep)) to load (gep).
2480   // However, double extracts from e.g. function arguments or return values
2481   // aren't handled yet.
2482   return nullptr;
2483 }
2484 
2485 /// Return 'true' if the given typeinfo will match anything.
2486 static bool isCatchAll(EHPersonality Personality, Constant *TypeInfo) {
2487   switch (Personality) {
2488   case EHPersonality::GNU_C:
2489   case EHPersonality::GNU_C_SjLj:
2490   case EHPersonality::Rust:
2491     // The GCC C EH and Rust personality only exists to support cleanups, so
2492     // it's not clear what the semantics of catch clauses are.
2493     return false;
2494   case EHPersonality::Unknown:
2495     return false;
2496   case EHPersonality::GNU_Ada:
2497     // While __gnat_all_others_value will match any Ada exception, it doesn't
2498     // match foreign exceptions (or didn't, before gcc-4.7).
2499     return false;
2500   case EHPersonality::GNU_CXX:
2501   case EHPersonality::GNU_CXX_SjLj:
2502   case EHPersonality::GNU_ObjC:
2503   case EHPersonality::MSVC_X86SEH:
2504   case EHPersonality::MSVC_Win64SEH:
2505   case EHPersonality::MSVC_CXX:
2506   case EHPersonality::CoreCLR:
2507     return TypeInfo->isNullValue();
2508   }
2509   llvm_unreachable("invalid enum");
2510 }
2511 
2512 static bool shorter_filter(const Value *LHS, const Value *RHS) {
2513   return
2514     cast<ArrayType>(LHS->getType())->getNumElements()
2515   <
2516     cast<ArrayType>(RHS->getType())->getNumElements();
2517 }
2518 
2519 Instruction *InstCombiner::visitLandingPadInst(LandingPadInst &LI) {
2520   // The logic here should be correct for any real-world personality function.
2521   // However if that turns out not to be true, the offending logic can always
2522   // be conditioned on the personality function, like the catch-all logic is.
2523   EHPersonality Personality =
2524       classifyEHPersonality(LI.getParent()->getParent()->getPersonalityFn());
2525 
2526   // Simplify the list of clauses, eg by removing repeated catch clauses
2527   // (these are often created by inlining).
2528   bool MakeNewInstruction = false; // If true, recreate using the following:
2529   SmallVector<Constant *, 16> NewClauses; // - Clauses for the new instruction;
2530   bool CleanupFlag = LI.isCleanup();   // - The new instruction is a cleanup.
2531 
2532   SmallPtrSet<Value *, 16> AlreadyCaught; // Typeinfos known caught already.
2533   for (unsigned i = 0, e = LI.getNumClauses(); i != e; ++i) {
2534     bool isLastClause = i + 1 == e;
2535     if (LI.isCatch(i)) {
2536       // A catch clause.
2537       Constant *CatchClause = LI.getClause(i);
2538       Constant *TypeInfo = CatchClause->stripPointerCasts();
2539 
2540       // If we already saw this clause, there is no point in having a second
2541       // copy of it.
2542       if (AlreadyCaught.insert(TypeInfo).second) {
2543         // This catch clause was not already seen.
2544         NewClauses.push_back(CatchClause);
2545       } else {
2546         // Repeated catch clause - drop the redundant copy.
2547         MakeNewInstruction = true;
2548       }
2549 
2550       // If this is a catch-all then there is no point in keeping any following
2551       // clauses or marking the landingpad as having a cleanup.
2552       if (isCatchAll(Personality, TypeInfo)) {
2553         if (!isLastClause)
2554           MakeNewInstruction = true;
2555         CleanupFlag = false;
2556         break;
2557       }
2558     } else {
2559       // A filter clause.  If any of the filter elements were already caught
2560       // then they can be dropped from the filter.  It is tempting to try to
2561       // exploit the filter further by saying that any typeinfo that does not
2562       // occur in the filter can't be caught later (and thus can be dropped).
2563       // However this would be wrong, since typeinfos can match without being
2564       // equal (for example if one represents a C++ class, and the other some
2565       // class derived from it).
2566       assert(LI.isFilter(i) && "Unsupported landingpad clause!");
2567       Constant *FilterClause = LI.getClause(i);
2568       ArrayType *FilterType = cast<ArrayType>(FilterClause->getType());
2569       unsigned NumTypeInfos = FilterType->getNumElements();
2570 
2571       // An empty filter catches everything, so there is no point in keeping any
2572       // following clauses or marking the landingpad as having a cleanup.  By
2573       // dealing with this case here the following code is made a bit simpler.
2574       if (!NumTypeInfos) {
2575         NewClauses.push_back(FilterClause);
2576         if (!isLastClause)
2577           MakeNewInstruction = true;
2578         CleanupFlag = false;
2579         break;
2580       }
2581 
2582       bool MakeNewFilter = false; // If true, make a new filter.
2583       SmallVector<Constant *, 16> NewFilterElts; // New elements.
2584       if (isa<ConstantAggregateZero>(FilterClause)) {
2585         // Not an empty filter - it contains at least one null typeinfo.
2586         assert(NumTypeInfos > 0 && "Should have handled empty filter already!");
2587         Constant *TypeInfo =
2588           Constant::getNullValue(FilterType->getElementType());
2589         // If this typeinfo is a catch-all then the filter can never match.
2590         if (isCatchAll(Personality, TypeInfo)) {
2591           // Throw the filter away.
2592           MakeNewInstruction = true;
2593           continue;
2594         }
2595 
2596         // There is no point in having multiple copies of this typeinfo, so
2597         // discard all but the first copy if there is more than one.
2598         NewFilterElts.push_back(TypeInfo);
2599         if (NumTypeInfos > 1)
2600           MakeNewFilter = true;
2601       } else {
2602         ConstantArray *Filter = cast<ConstantArray>(FilterClause);
2603         SmallPtrSet<Value *, 16> SeenInFilter; // For uniquing the elements.
2604         NewFilterElts.reserve(NumTypeInfos);
2605 
2606         // Remove any filter elements that were already caught or that already
2607         // occurred in the filter.  While there, see if any of the elements are
2608         // catch-alls.  If so, the filter can be discarded.
2609         bool SawCatchAll = false;
2610         for (unsigned j = 0; j != NumTypeInfos; ++j) {
2611           Constant *Elt = Filter->getOperand(j);
2612           Constant *TypeInfo = Elt->stripPointerCasts();
2613           if (isCatchAll(Personality, TypeInfo)) {
2614             // This element is a catch-all.  Bail out, noting this fact.
2615             SawCatchAll = true;
2616             break;
2617           }
2618 
2619           // Even if we've seen a type in a catch clause, we don't want to
2620           // remove it from the filter.  An unexpected type handler may be
2621           // set up for a call site which throws an exception of the same
2622           // type caught.  In order for the exception thrown by the unexpected
2623           // handler to propagate correctly, the filter must be correctly
2624           // described for the call site.
2625           //
2626           // Example:
2627           //
2628           // void unexpected() { throw 1;}
2629           // void foo() throw (int) {
2630           //   std::set_unexpected(unexpected);
2631           //   try {
2632           //     throw 2.0;
2633           //   } catch (int i) {}
2634           // }
2635 
2636           // There is no point in having multiple copies of the same typeinfo in
2637           // a filter, so only add it if we didn't already.
2638           if (SeenInFilter.insert(TypeInfo).second)
2639             NewFilterElts.push_back(cast<Constant>(Elt));
2640         }
2641         // A filter containing a catch-all cannot match anything by definition.
2642         if (SawCatchAll) {
2643           // Throw the filter away.
2644           MakeNewInstruction = true;
2645           continue;
2646         }
2647 
2648         // If we dropped something from the filter, make a new one.
2649         if (NewFilterElts.size() < NumTypeInfos)
2650           MakeNewFilter = true;
2651       }
2652       if (MakeNewFilter) {
2653         FilterType = ArrayType::get(FilterType->getElementType(),
2654                                     NewFilterElts.size());
2655         FilterClause = ConstantArray::get(FilterType, NewFilterElts);
2656         MakeNewInstruction = true;
2657       }
2658 
2659       NewClauses.push_back(FilterClause);
2660 
2661       // If the new filter is empty then it will catch everything so there is
2662       // no point in keeping any following clauses or marking the landingpad
2663       // as having a cleanup.  The case of the original filter being empty was
2664       // already handled above.
2665       if (MakeNewFilter && !NewFilterElts.size()) {
2666         assert(MakeNewInstruction && "New filter but not a new instruction!");
2667         CleanupFlag = false;
2668         break;
2669       }
2670     }
2671   }
2672 
2673   // If several filters occur in a row then reorder them so that the shortest
2674   // filters come first (those with the smallest number of elements).  This is
2675   // advantageous because shorter filters are more likely to match, speeding up
2676   // unwinding, but mostly because it increases the effectiveness of the other
2677   // filter optimizations below.
2678   for (unsigned i = 0, e = NewClauses.size(); i + 1 < e; ) {
2679     unsigned j;
2680     // Find the maximal 'j' s.t. the range [i, j) consists entirely of filters.
2681     for (j = i; j != e; ++j)
2682       if (!isa<ArrayType>(NewClauses[j]->getType()))
2683         break;
2684 
2685     // Check whether the filters are already sorted by length.  We need to know
2686     // if sorting them is actually going to do anything so that we only make a
2687     // new landingpad instruction if it does.
2688     for (unsigned k = i; k + 1 < j; ++k)
2689       if (shorter_filter(NewClauses[k+1], NewClauses[k])) {
2690         // Not sorted, so sort the filters now.  Doing an unstable sort would be
2691         // correct too but reordering filters pointlessly might confuse users.
2692         std::stable_sort(NewClauses.begin() + i, NewClauses.begin() + j,
2693                          shorter_filter);
2694         MakeNewInstruction = true;
2695         break;
2696       }
2697 
2698     // Look for the next batch of filters.
2699     i = j + 1;
2700   }
2701 
2702   // If typeinfos matched if and only if equal, then the elements of a filter L
2703   // that occurs later than a filter F could be replaced by the intersection of
2704   // the elements of F and L.  In reality two typeinfos can match without being
2705   // equal (for example if one represents a C++ class, and the other some class
2706   // derived from it) so it would be wrong to perform this transform in general.
2707   // However the transform is correct and useful if F is a subset of L.  In that
2708   // case L can be replaced by F, and thus removed altogether since repeating a
2709   // filter is pointless.  So here we look at all pairs of filters F and L where
2710   // L follows F in the list of clauses, and remove L if every element of F is
2711   // an element of L.  This can occur when inlining C++ functions with exception
2712   // specifications.
2713   for (unsigned i = 0; i + 1 < NewClauses.size(); ++i) {
2714     // Examine each filter in turn.
2715     Value *Filter = NewClauses[i];
2716     ArrayType *FTy = dyn_cast<ArrayType>(Filter->getType());
2717     if (!FTy)
2718       // Not a filter - skip it.
2719       continue;
2720     unsigned FElts = FTy->getNumElements();
2721     // Examine each filter following this one.  Doing this backwards means that
2722     // we don't have to worry about filters disappearing under us when removed.
2723     for (unsigned j = NewClauses.size() - 1; j != i; --j) {
2724       Value *LFilter = NewClauses[j];
2725       ArrayType *LTy = dyn_cast<ArrayType>(LFilter->getType());
2726       if (!LTy)
2727         // Not a filter - skip it.
2728         continue;
2729       // If Filter is a subset of LFilter, i.e. every element of Filter is also
2730       // an element of LFilter, then discard LFilter.
2731       SmallVectorImpl<Constant *>::iterator J = NewClauses.begin() + j;
2732       // If Filter is empty then it is a subset of LFilter.
2733       if (!FElts) {
2734         // Discard LFilter.
2735         NewClauses.erase(J);
2736         MakeNewInstruction = true;
2737         // Move on to the next filter.
2738         continue;
2739       }
2740       unsigned LElts = LTy->getNumElements();
2741       // If Filter is longer than LFilter then it cannot be a subset of it.
2742       if (FElts > LElts)
2743         // Move on to the next filter.
2744         continue;
2745       // At this point we know that LFilter has at least one element.
2746       if (isa<ConstantAggregateZero>(LFilter)) { // LFilter only contains zeros.
2747         // Filter is a subset of LFilter iff Filter contains only zeros (as we
2748         // already know that Filter is not longer than LFilter).
2749         if (isa<ConstantAggregateZero>(Filter)) {
2750           assert(FElts <= LElts && "Should have handled this case earlier!");
2751           // Discard LFilter.
2752           NewClauses.erase(J);
2753           MakeNewInstruction = true;
2754         }
2755         // Move on to the next filter.
2756         continue;
2757       }
2758       ConstantArray *LArray = cast<ConstantArray>(LFilter);
2759       if (isa<ConstantAggregateZero>(Filter)) { // Filter only contains zeros.
2760         // Since Filter is non-empty and contains only zeros, it is a subset of
2761         // LFilter iff LFilter contains a zero.
2762         assert(FElts > 0 && "Should have eliminated the empty filter earlier!");
2763         for (unsigned l = 0; l != LElts; ++l)
2764           if (LArray->getOperand(l)->isNullValue()) {
2765             // LFilter contains a zero - discard it.
2766             NewClauses.erase(J);
2767             MakeNewInstruction = true;
2768             break;
2769           }
2770         // Move on to the next filter.
2771         continue;
2772       }
2773       // At this point we know that both filters are ConstantArrays.  Loop over
2774       // operands to see whether every element of Filter is also an element of
2775       // LFilter.  Since filters tend to be short this is probably faster than
2776       // using a method that scales nicely.
2777       ConstantArray *FArray = cast<ConstantArray>(Filter);
2778       bool AllFound = true;
2779       for (unsigned f = 0; f != FElts; ++f) {
2780         Value *FTypeInfo = FArray->getOperand(f)->stripPointerCasts();
2781         AllFound = false;
2782         for (unsigned l = 0; l != LElts; ++l) {
2783           Value *LTypeInfo = LArray->getOperand(l)->stripPointerCasts();
2784           if (LTypeInfo == FTypeInfo) {
2785             AllFound = true;
2786             break;
2787           }
2788         }
2789         if (!AllFound)
2790           break;
2791       }
2792       if (AllFound) {
2793         // Discard LFilter.
2794         NewClauses.erase(J);
2795         MakeNewInstruction = true;
2796       }
2797       // Move on to the next filter.
2798     }
2799   }
2800 
2801   // If we changed any of the clauses, replace the old landingpad instruction
2802   // with a new one.
2803   if (MakeNewInstruction) {
2804     LandingPadInst *NLI = LandingPadInst::Create(LI.getType(),
2805                                                  NewClauses.size());
2806     for (unsigned i = 0, e = NewClauses.size(); i != e; ++i)
2807       NLI->addClause(NewClauses[i]);
2808     // A landing pad with no clauses must have the cleanup flag set.  It is
2809     // theoretically possible, though highly unlikely, that we eliminated all
2810     // clauses.  If so, force the cleanup flag to true.
2811     if (NewClauses.empty())
2812       CleanupFlag = true;
2813     NLI->setCleanup(CleanupFlag);
2814     return NLI;
2815   }
2816 
2817   // Even if none of the clauses changed, we may nonetheless have understood
2818   // that the cleanup flag is pointless.  Clear it if so.
2819   if (LI.isCleanup() != CleanupFlag) {
2820     assert(!CleanupFlag && "Adding a cleanup, not removing one?!");
2821     LI.setCleanup(CleanupFlag);
2822     return &LI;
2823   }
2824 
2825   return nullptr;
2826 }
2827 
2828 /// Try to move the specified instruction from its current block into the
2829 /// beginning of DestBlock, which can only happen if it's safe to move the
2830 /// instruction past all of the instructions between it and the end of its
2831 /// block.
2832 static bool TryToSinkInstruction(Instruction *I, BasicBlock *DestBlock) {
2833   assert(I->hasOneUse() && "Invariants didn't hold!");
2834 
2835   // Cannot move control-flow-involving, volatile loads, vaarg, etc.
2836   if (isa<PHINode>(I) || I->isEHPad() || I->mayHaveSideEffects() ||
2837       isa<TerminatorInst>(I))
2838     return false;
2839 
2840   // Do not sink alloca instructions out of the entry block.
2841   if (isa<AllocaInst>(I) && I->getParent() ==
2842         &DestBlock->getParent()->getEntryBlock())
2843     return false;
2844 
2845   // Do not sink into catchswitch blocks.
2846   if (isa<CatchSwitchInst>(DestBlock->getTerminator()))
2847     return false;
2848 
2849   // Do not sink convergent call instructions.
2850   if (auto *CI = dyn_cast<CallInst>(I)) {
2851     if (CI->isConvergent())
2852       return false;
2853   }
2854   // We can only sink load instructions if there is nothing between the load and
2855   // the end of block that could change the value.
2856   if (I->mayReadFromMemory()) {
2857     for (BasicBlock::iterator Scan = I->getIterator(),
2858                               E = I->getParent()->end();
2859          Scan != E; ++Scan)
2860       if (Scan->mayWriteToMemory())
2861         return false;
2862   }
2863 
2864   BasicBlock::iterator InsertPos = DestBlock->getFirstInsertionPt();
2865   I->moveBefore(&*InsertPos);
2866   ++NumSunkInst;
2867   return true;
2868 }
2869 
2870 bool InstCombiner::run() {
2871   while (!Worklist.isEmpty()) {
2872     Instruction *I = Worklist.RemoveOne();
2873     if (I == nullptr) continue;  // skip null values.
2874 
2875     // Check to see if we can DCE the instruction.
2876     if (isInstructionTriviallyDead(I, &TLI)) {
2877       DEBUG(dbgs() << "IC: DCE: " << *I << '\n');
2878       eraseInstFromFunction(*I);
2879       ++NumDeadInst;
2880       MadeIRChange = true;
2881       continue;
2882     }
2883 
2884     // Instruction isn't dead, see if we can constant propagate it.
2885     if (!I->use_empty() &&
2886         (I->getNumOperands() == 0 || isa<Constant>(I->getOperand(0)))) {
2887       if (Constant *C = ConstantFoldInstruction(I, DL, &TLI)) {
2888         DEBUG(dbgs() << "IC: ConstFold to: " << *C << " from: " << *I << '\n');
2889 
2890         // Add operands to the worklist.
2891         replaceInstUsesWith(*I, C);
2892         ++NumConstProp;
2893         if (isInstructionTriviallyDead(I, &TLI))
2894           eraseInstFromFunction(*I);
2895         MadeIRChange = true;
2896         continue;
2897       }
2898     }
2899 
2900     // In general, it is possible for computeKnownBits to determine all bits in
2901     // a value even when the operands are not all constants.
2902     Type *Ty = I->getType();
2903     if (ExpensiveCombines && !I->use_empty() && Ty->isIntOrIntVectorTy()) {
2904       KnownBits Known = computeKnownBits(I, /*Depth*/0, I);
2905       if (Known.isConstant()) {
2906         Constant *C = ConstantInt::get(Ty, Known.getConstant());
2907         DEBUG(dbgs() << "IC: ConstFold (all bits known) to: " << *C <<
2908                         " from: " << *I << '\n');
2909 
2910         // Add operands to the worklist.
2911         replaceInstUsesWith(*I, C);
2912         ++NumConstProp;
2913         if (isInstructionTriviallyDead(I, &TLI))
2914           eraseInstFromFunction(*I);
2915         MadeIRChange = true;
2916         continue;
2917       }
2918     }
2919 
2920     // See if we can trivially sink this instruction to a successor basic block.
2921     if (I->hasOneUse()) {
2922       BasicBlock *BB = I->getParent();
2923       Instruction *UserInst = cast<Instruction>(*I->user_begin());
2924       BasicBlock *UserParent;
2925 
2926       // Get the block the use occurs in.
2927       if (PHINode *PN = dyn_cast<PHINode>(UserInst))
2928         UserParent = PN->getIncomingBlock(*I->use_begin());
2929       else
2930         UserParent = UserInst->getParent();
2931 
2932       if (UserParent != BB) {
2933         bool UserIsSuccessor = false;
2934         // See if the user is one of our successors.
2935         for (succ_iterator SI = succ_begin(BB), E = succ_end(BB); SI != E; ++SI)
2936           if (*SI == UserParent) {
2937             UserIsSuccessor = true;
2938             break;
2939           }
2940 
2941         // If the user is one of our immediate successors, and if that successor
2942         // only has us as a predecessors (we'd have to split the critical edge
2943         // otherwise), we can keep going.
2944         if (UserIsSuccessor && UserParent->getUniquePredecessor()) {
2945           // Okay, the CFG is simple enough, try to sink this instruction.
2946           if (TryToSinkInstruction(I, UserParent)) {
2947             DEBUG(dbgs() << "IC: Sink: " << *I << '\n');
2948             MadeIRChange = true;
2949             // We'll add uses of the sunk instruction below, but since sinking
2950             // can expose opportunities for it's *operands* add them to the
2951             // worklist
2952             for (Use &U : I->operands())
2953               if (Instruction *OpI = dyn_cast<Instruction>(U.get()))
2954                 Worklist.Add(OpI);
2955           }
2956         }
2957       }
2958     }
2959 
2960     // Now that we have an instruction, try combining it to simplify it.
2961     Builder.SetInsertPoint(I);
2962     Builder.SetCurrentDebugLocation(I->getDebugLoc());
2963 
2964 #ifndef NDEBUG
2965     std::string OrigI;
2966 #endif
2967     DEBUG(raw_string_ostream SS(OrigI); I->print(SS); OrigI = SS.str(););
2968     DEBUG(dbgs() << "IC: Visiting: " << OrigI << '\n');
2969 
2970     if (Instruction *Result = visit(*I)) {
2971       ++NumCombined;
2972       // Should we replace the old instruction with a new one?
2973       if (Result != I) {
2974         DEBUG(dbgs() << "IC: Old = " << *I << '\n'
2975                      << "    New = " << *Result << '\n');
2976 
2977         if (I->getDebugLoc())
2978           Result->setDebugLoc(I->getDebugLoc());
2979         // Everything uses the new instruction now.
2980         I->replaceAllUsesWith(Result);
2981 
2982         // Move the name to the new instruction first.
2983         Result->takeName(I);
2984 
2985         // Push the new instruction and any users onto the worklist.
2986         Worklist.AddUsersToWorkList(*Result);
2987         Worklist.Add(Result);
2988 
2989         // Insert the new instruction into the basic block...
2990         BasicBlock *InstParent = I->getParent();
2991         BasicBlock::iterator InsertPos = I->getIterator();
2992 
2993         // If we replace a PHI with something that isn't a PHI, fix up the
2994         // insertion point.
2995         if (!isa<PHINode>(Result) && isa<PHINode>(InsertPos))
2996           InsertPos = InstParent->getFirstInsertionPt();
2997 
2998         InstParent->getInstList().insert(InsertPos, Result);
2999 
3000         eraseInstFromFunction(*I);
3001       } else {
3002         DEBUG(dbgs() << "IC: Mod = " << OrigI << '\n'
3003                      << "    New = " << *I << '\n');
3004 
3005         // If the instruction was modified, it's possible that it is now dead.
3006         // if so, remove it.
3007         if (isInstructionTriviallyDead(I, &TLI)) {
3008           eraseInstFromFunction(*I);
3009         } else {
3010           Worklist.AddUsersToWorkList(*I);
3011           Worklist.Add(I);
3012         }
3013       }
3014       MadeIRChange = true;
3015     }
3016   }
3017 
3018   Worklist.Zap();
3019   return MadeIRChange;
3020 }
3021 
3022 /// Walk the function in depth-first order, adding all reachable code to the
3023 /// worklist.
3024 ///
3025 /// This has a couple of tricks to make the code faster and more powerful.  In
3026 /// particular, we constant fold and DCE instructions as we go, to avoid adding
3027 /// them to the worklist (this significantly speeds up instcombine on code where
3028 /// many instructions are dead or constant).  Additionally, if we find a branch
3029 /// whose condition is a known constant, we only visit the reachable successors.
3030 ///
3031 static bool AddReachableCodeToWorklist(BasicBlock *BB, const DataLayout &DL,
3032                                        SmallPtrSetImpl<BasicBlock *> &Visited,
3033                                        InstCombineWorklist &ICWorklist,
3034                                        const TargetLibraryInfo *TLI) {
3035   bool MadeIRChange = false;
3036   SmallVector<BasicBlock*, 256> Worklist;
3037   Worklist.push_back(BB);
3038 
3039   SmallVector<Instruction*, 128> InstrsForInstCombineWorklist;
3040   DenseMap<Constant *, Constant *> FoldedConstants;
3041 
3042   do {
3043     BB = Worklist.pop_back_val();
3044 
3045     // We have now visited this block!  If we've already been here, ignore it.
3046     if (!Visited.insert(BB).second)
3047       continue;
3048 
3049     for (BasicBlock::iterator BBI = BB->begin(), E = BB->end(); BBI != E; ) {
3050       Instruction *Inst = &*BBI++;
3051 
3052       // DCE instruction if trivially dead.
3053       if (isInstructionTriviallyDead(Inst, TLI)) {
3054         ++NumDeadInst;
3055         DEBUG(dbgs() << "IC: DCE: " << *Inst << '\n');
3056         Inst->eraseFromParent();
3057         MadeIRChange = true;
3058         continue;
3059       }
3060 
3061       // ConstantProp instruction if trivially constant.
3062       if (!Inst->use_empty() &&
3063           (Inst->getNumOperands() == 0 || isa<Constant>(Inst->getOperand(0))))
3064         if (Constant *C = ConstantFoldInstruction(Inst, DL, TLI)) {
3065           DEBUG(dbgs() << "IC: ConstFold to: " << *C << " from: "
3066                        << *Inst << '\n');
3067           Inst->replaceAllUsesWith(C);
3068           ++NumConstProp;
3069           if (isInstructionTriviallyDead(Inst, TLI))
3070             Inst->eraseFromParent();
3071           MadeIRChange = true;
3072           continue;
3073         }
3074 
3075       // See if we can constant fold its operands.
3076       for (Use &U : Inst->operands()) {
3077         if (!isa<ConstantVector>(U) && !isa<ConstantExpr>(U))
3078           continue;
3079 
3080         auto *C = cast<Constant>(U);
3081         Constant *&FoldRes = FoldedConstants[C];
3082         if (!FoldRes)
3083           FoldRes = ConstantFoldConstant(C, DL, TLI);
3084         if (!FoldRes)
3085           FoldRes = C;
3086 
3087         if (FoldRes != C) {
3088           DEBUG(dbgs() << "IC: ConstFold operand of: " << *Inst
3089                        << "\n    Old = " << *C
3090                        << "\n    New = " << *FoldRes << '\n');
3091           U = FoldRes;
3092           MadeIRChange = true;
3093         }
3094       }
3095 
3096       // Skip processing debug intrinsics in InstCombine. Processing these call instructions
3097       // consumes non-trivial amount of time and provides no value for the optimization.
3098       if (!isa<DbgInfoIntrinsic>(Inst))
3099         InstrsForInstCombineWorklist.push_back(Inst);
3100     }
3101 
3102     // Recursively visit successors.  If this is a branch or switch on a
3103     // constant, only visit the reachable successor.
3104     TerminatorInst *TI = BB->getTerminator();
3105     if (BranchInst *BI = dyn_cast<BranchInst>(TI)) {
3106       if (BI->isConditional() && isa<ConstantInt>(BI->getCondition())) {
3107         bool CondVal = cast<ConstantInt>(BI->getCondition())->getZExtValue();
3108         BasicBlock *ReachableBB = BI->getSuccessor(!CondVal);
3109         Worklist.push_back(ReachableBB);
3110         continue;
3111       }
3112     } else if (SwitchInst *SI = dyn_cast<SwitchInst>(TI)) {
3113       if (ConstantInt *Cond = dyn_cast<ConstantInt>(SI->getCondition())) {
3114         Worklist.push_back(SI->findCaseValue(Cond)->getCaseSuccessor());
3115         continue;
3116       }
3117     }
3118 
3119     for (BasicBlock *SuccBB : TI->successors())
3120       Worklist.push_back(SuccBB);
3121   } while (!Worklist.empty());
3122 
3123   // Once we've found all of the instructions to add to instcombine's worklist,
3124   // add them in reverse order.  This way instcombine will visit from the top
3125   // of the function down.  This jives well with the way that it adds all uses
3126   // of instructions to the worklist after doing a transformation, thus avoiding
3127   // some N^2 behavior in pathological cases.
3128   ICWorklist.AddInitialGroup(InstrsForInstCombineWorklist);
3129 
3130   return MadeIRChange;
3131 }
3132 
3133 /// \brief Populate the IC worklist from a function, and prune any dead basic
3134 /// blocks discovered in the process.
3135 ///
3136 /// This also does basic constant propagation and other forward fixing to make
3137 /// the combiner itself run much faster.
3138 static bool prepareICWorklistFromFunction(Function &F, const DataLayout &DL,
3139                                           TargetLibraryInfo *TLI,
3140                                           InstCombineWorklist &ICWorklist) {
3141   bool MadeIRChange = false;
3142 
3143   // Do a depth-first traversal of the function, populate the worklist with
3144   // the reachable instructions.  Ignore blocks that are not reachable.  Keep
3145   // track of which blocks we visit.
3146   SmallPtrSet<BasicBlock *, 32> Visited;
3147   MadeIRChange |=
3148       AddReachableCodeToWorklist(&F.front(), DL, Visited, ICWorklist, TLI);
3149 
3150   // Do a quick scan over the function.  If we find any blocks that are
3151   // unreachable, remove any instructions inside of them.  This prevents
3152   // the instcombine code from having to deal with some bad special cases.
3153   for (BasicBlock &BB : F) {
3154     if (Visited.count(&BB))
3155       continue;
3156 
3157     unsigned NumDeadInstInBB = removeAllNonTerminatorAndEHPadInstructions(&BB);
3158     MadeIRChange |= NumDeadInstInBB > 0;
3159     NumDeadInst += NumDeadInstInBB;
3160   }
3161 
3162   return MadeIRChange;
3163 }
3164 
3165 static bool
3166 combineInstructionsOverFunction(Function &F, InstCombineWorklist &Worklist,
3167                                 AliasAnalysis *AA, AssumptionCache &AC,
3168                                 TargetLibraryInfo &TLI, DominatorTree &DT,
3169                                 bool ExpensiveCombines = true,
3170                                 LoopInfo *LI = nullptr) {
3171   auto &DL = F.getParent()->getDataLayout();
3172   ExpensiveCombines |= EnableExpensiveCombines;
3173 
3174   /// Builder - This is an IRBuilder that automatically inserts new
3175   /// instructions into the worklist when they are created.
3176   IRBuilder<TargetFolder, IRBuilderCallbackInserter> Builder(
3177       F.getContext(), TargetFolder(DL),
3178       IRBuilderCallbackInserter([&Worklist, &AC](Instruction *I) {
3179         Worklist.Add(I);
3180 
3181         using namespace llvm::PatternMatch;
3182         if (match(I, m_Intrinsic<Intrinsic::assume>()))
3183           AC.registerAssumption(cast<CallInst>(I));
3184       }));
3185 
3186   // Lower dbg.declare intrinsics otherwise their value may be clobbered
3187   // by instcombiner.
3188   bool MadeIRChange = LowerDbgDeclare(F);
3189 
3190   // Iterate while there is work to do.
3191   int Iteration = 0;
3192   for (;;) {
3193     ++Iteration;
3194     DEBUG(dbgs() << "\n\nINSTCOMBINE ITERATION #" << Iteration << " on "
3195                  << F.getName() << "\n");
3196 
3197     MadeIRChange |= prepareICWorklistFromFunction(F, DL, &TLI, Worklist);
3198 
3199     InstCombiner IC(Worklist, Builder, F.optForMinSize(), ExpensiveCombines,
3200                     AA, AC, TLI, DT, DL, LI);
3201     IC.MaxArraySizeForCombine = MaxArraySize;
3202 
3203     if (!IC.run())
3204       break;
3205   }
3206 
3207   return MadeIRChange || Iteration > 1;
3208 }
3209 
3210 PreservedAnalyses InstCombinePass::run(Function &F,
3211                                        FunctionAnalysisManager &AM) {
3212   auto &AC = AM.getResult<AssumptionAnalysis>(F);
3213   auto &DT = AM.getResult<DominatorTreeAnalysis>(F);
3214   auto &TLI = AM.getResult<TargetLibraryAnalysis>(F);
3215 
3216   auto *LI = AM.getCachedResult<LoopAnalysis>(F);
3217 
3218   // FIXME: The AliasAnalysis is not yet supported in the new pass manager
3219   if (!combineInstructionsOverFunction(F, Worklist, nullptr, AC, TLI, DT,
3220                                        ExpensiveCombines, LI))
3221     // No changes, all analyses are preserved.
3222     return PreservedAnalyses::all();
3223 
3224   // Mark all the analyses that instcombine updates as preserved.
3225   PreservedAnalyses PA;
3226   PA.preserveSet<CFGAnalyses>();
3227   PA.preserve<AAManager>();
3228   PA.preserve<GlobalsAA>();
3229   return PA;
3230 }
3231 
3232 void InstructionCombiningPass::getAnalysisUsage(AnalysisUsage &AU) const {
3233   AU.setPreservesCFG();
3234   AU.addRequired<AAResultsWrapperPass>();
3235   AU.addRequired<AssumptionCacheTracker>();
3236   AU.addRequired<TargetLibraryInfoWrapperPass>();
3237   AU.addRequired<DominatorTreeWrapperPass>();
3238   AU.addPreserved<DominatorTreeWrapperPass>();
3239   AU.addPreserved<AAResultsWrapperPass>();
3240   AU.addPreserved<BasicAAWrapperPass>();
3241   AU.addPreserved<GlobalsAAWrapperPass>();
3242 }
3243 
3244 bool InstructionCombiningPass::runOnFunction(Function &F) {
3245   if (skipFunction(F))
3246     return false;
3247 
3248   // Required analyses.
3249   auto AA = &getAnalysis<AAResultsWrapperPass>().getAAResults();
3250   auto &AC = getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F);
3251   auto &TLI = getAnalysis<TargetLibraryInfoWrapperPass>().getTLI();
3252   auto &DT = getAnalysis<DominatorTreeWrapperPass>().getDomTree();
3253 
3254   // Optional analyses.
3255   auto *LIWP = getAnalysisIfAvailable<LoopInfoWrapperPass>();
3256   auto *LI = LIWP ? &LIWP->getLoopInfo() : nullptr;
3257 
3258   return combineInstructionsOverFunction(F, Worklist, AA, AC, TLI, DT,
3259                                          ExpensiveCombines, LI);
3260 }
3261 
3262 char InstructionCombiningPass::ID = 0;
3263 INITIALIZE_PASS_BEGIN(InstructionCombiningPass, "instcombine",
3264                       "Combine redundant instructions", false, false)
3265 INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker)
3266 INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass)
3267 INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)
3268 INITIALIZE_PASS_DEPENDENCY(AAResultsWrapperPass)
3269 INITIALIZE_PASS_DEPENDENCY(GlobalsAAWrapperPass)
3270 INITIALIZE_PASS_END(InstructionCombiningPass, "instcombine",
3271                     "Combine redundant instructions", false, false)
3272 
3273 // Initialization Routines
3274 void llvm::initializeInstCombine(PassRegistry &Registry) {
3275   initializeInstructionCombiningPassPass(Registry);
3276 }
3277 
3278 void LLVMInitializeInstCombine(LLVMPassRegistryRef R) {
3279   initializeInstructionCombiningPassPass(*unwrap(R));
3280 }
3281 
3282 FunctionPass *llvm::createInstructionCombiningPass(bool ExpensiveCombines) {
3283   return new InstructionCombiningPass(ExpensiveCombines);
3284 }
3285