1 //===------- VectorCombine.cpp - Optimize partial vector operations -------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This pass optimizes scalar/vector interactions using target cost models. The
10 // transforms implemented here may not fit in traditional loop-based or SLP
11 // vectorization passes.
12 //
13 //===----------------------------------------------------------------------===//
14 
15 #include "llvm/Transforms/Vectorize/VectorCombine.h"
16 #include "llvm/ADT/Statistic.h"
17 #include "llvm/Analysis/AssumptionCache.h"
18 #include "llvm/Analysis/BasicAliasAnalysis.h"
19 #include "llvm/Analysis/GlobalsModRef.h"
20 #include "llvm/Analysis/Loads.h"
21 #include "llvm/Analysis/TargetTransformInfo.h"
22 #include "llvm/Analysis/ValueTracking.h"
23 #include "llvm/Analysis/VectorUtils.h"
24 #include "llvm/IR/Dominators.h"
25 #include "llvm/IR/Function.h"
26 #include "llvm/IR/IRBuilder.h"
27 #include "llvm/IR/PatternMatch.h"
28 #include "llvm/InitializePasses.h"
29 #include "llvm/Pass.h"
30 #include "llvm/Support/CommandLine.h"
31 #include "llvm/Transforms/Utils/Local.h"
32 #include "llvm/Transforms/Vectorize.h"
33 
34 #define DEBUG_TYPE "vector-combine"
35 #include "llvm/Transforms/Utils/InstructionWorklist.h"
36 
37 using namespace llvm;
38 using namespace llvm::PatternMatch;
39 
40 STATISTIC(NumVecLoad, "Number of vector loads formed");
41 STATISTIC(NumVecCmp, "Number of vector compares formed");
42 STATISTIC(NumVecBO, "Number of vector binops formed");
43 STATISTIC(NumVecCmpBO, "Number of vector compare + binop formed");
44 STATISTIC(NumShufOfBitcast, "Number of shuffles moved after bitcast");
45 STATISTIC(NumScalarBO, "Number of scalar binops formed");
46 STATISTIC(NumScalarCmp, "Number of scalar compares formed");
47 
48 static cl::opt<bool> DisableVectorCombine(
49     "disable-vector-combine", cl::init(false), cl::Hidden,
50     cl::desc("Disable all vector combine transforms"));
51 
52 static cl::opt<bool> DisableBinopExtractShuffle(
53     "disable-binop-extract-shuffle", cl::init(false), cl::Hidden,
54     cl::desc("Disable binop extract to shuffle transforms"));
55 
56 static cl::opt<unsigned> MaxInstrsToScan(
57     "vector-combine-max-scan-instrs", cl::init(30), cl::Hidden,
58     cl::desc("Max number of instructions to scan for vector combining."));
59 
60 static const unsigned InvalidIndex = std::numeric_limits<unsigned>::max();
61 
62 namespace {
63 class VectorCombine {
64 public:
65   VectorCombine(Function &F, const TargetTransformInfo &TTI,
66                 const DominatorTree &DT, AAResults &AA, AssumptionCache &AC,
67                 bool ScalarizationOnly)
68       : F(F), Builder(F.getContext()), TTI(TTI), DT(DT), AA(AA), AC(AC),
69         ScalarizationOnly(ScalarizationOnly) {}
70 
71   bool run();
72 
73 private:
74   Function &F;
75   IRBuilder<> Builder;
76   const TargetTransformInfo &TTI;
77   const DominatorTree &DT;
78   AAResults &AA;
79   AssumptionCache &AC;
80 
81   /// If true only perform scalarization combines and do not introduce new
82   /// vector operations.
83   bool ScalarizationOnly;
84 
85   InstructionWorklist Worklist;
86 
87   bool vectorizeLoadInsert(Instruction &I);
88   ExtractElementInst *getShuffleExtract(ExtractElementInst *Ext0,
89                                         ExtractElementInst *Ext1,
90                                         unsigned PreferredExtractIndex) const;
91   bool isExtractExtractCheap(ExtractElementInst *Ext0, ExtractElementInst *Ext1,
92                              const Instruction &I,
93                              ExtractElementInst *&ConvertToShuffle,
94                              unsigned PreferredExtractIndex);
95   void foldExtExtCmp(ExtractElementInst *Ext0, ExtractElementInst *Ext1,
96                      Instruction &I);
97   void foldExtExtBinop(ExtractElementInst *Ext0, ExtractElementInst *Ext1,
98                        Instruction &I);
99   bool foldExtractExtract(Instruction &I);
100   bool foldBitcastShuf(Instruction &I);
101   bool scalarizeBinopOrCmp(Instruction &I);
102   bool foldExtractedCmps(Instruction &I);
103   bool foldSingleElementStore(Instruction &I);
104   bool scalarizeLoadExtract(Instruction &I);
105   bool foldShuffleOfBinops(Instruction &I);
106   bool foldShuffleFromReductions(Instruction &I);
107   bool foldSelectShuffle(Instruction &I, bool FromReduction = false);
108 
109   void replaceValue(Value &Old, Value &New) {
110     Old.replaceAllUsesWith(&New);
111     if (auto *NewI = dyn_cast<Instruction>(&New)) {
112       New.takeName(&Old);
113       Worklist.pushUsersToWorkList(*NewI);
114       Worklist.pushValue(NewI);
115     }
116     Worklist.pushValue(&Old);
117   }
118 
119   void eraseInstruction(Instruction &I) {
120     for (Value *Op : I.operands())
121       Worklist.pushValue(Op);
122     Worklist.remove(&I);
123     I.eraseFromParent();
124   }
125 };
126 } // namespace
127 
128 bool VectorCombine::vectorizeLoadInsert(Instruction &I) {
129   // Match insert into fixed vector of scalar value.
130   // TODO: Handle non-zero insert index.
131   auto *Ty = dyn_cast<FixedVectorType>(I.getType());
132   Value *Scalar;
133   if (!Ty || !match(&I, m_InsertElt(m_Undef(), m_Value(Scalar), m_ZeroInt())) ||
134       !Scalar->hasOneUse())
135     return false;
136 
137   // Optionally match an extract from another vector.
138   Value *X;
139   bool HasExtract = match(Scalar, m_ExtractElt(m_Value(X), m_ZeroInt()));
140   if (!HasExtract)
141     X = Scalar;
142 
143   // Match source value as load of scalar or vector.
144   // Do not vectorize scalar load (widening) if atomic/volatile or under
145   // asan/hwasan/memtag/tsan. The widened load may load data from dirty regions
146   // or create data races non-existent in the source.
147   auto *Load = dyn_cast<LoadInst>(X);
148   if (!Load || !Load->isSimple() || !Load->hasOneUse() ||
149       Load->getFunction()->hasFnAttribute(Attribute::SanitizeMemTag) ||
150       mustSuppressSpeculation(*Load))
151     return false;
152 
153   const DataLayout &DL = I.getModule()->getDataLayout();
154   Value *SrcPtr = Load->getPointerOperand()->stripPointerCasts();
155   assert(isa<PointerType>(SrcPtr->getType()) && "Expected a pointer type");
156 
157   unsigned AS = Load->getPointerAddressSpace();
158 
159   // We are potentially transforming byte-sized (8-bit) memory accesses, so make
160   // sure we have all of our type-based constraints in place for this target.
161   Type *ScalarTy = Scalar->getType();
162   uint64_t ScalarSize = ScalarTy->getPrimitiveSizeInBits();
163   unsigned MinVectorSize = TTI.getMinVectorRegisterBitWidth();
164   if (!ScalarSize || !MinVectorSize || MinVectorSize % ScalarSize != 0 ||
165       ScalarSize % 8 != 0)
166     return false;
167 
168   // Check safety of replacing the scalar load with a larger vector load.
169   // We use minimal alignment (maximum flexibility) because we only care about
170   // the dereferenceable region. When calculating cost and creating a new op,
171   // we may use a larger value based on alignment attributes.
172   unsigned MinVecNumElts = MinVectorSize / ScalarSize;
173   auto *MinVecTy = VectorType::get(ScalarTy, MinVecNumElts, false);
174   unsigned OffsetEltIndex = 0;
175   Align Alignment = Load->getAlign();
176   if (!isSafeToLoadUnconditionally(SrcPtr, MinVecTy, Align(1), DL, Load, &DT)) {
177     // It is not safe to load directly from the pointer, but we can still peek
178     // through gep offsets and check if it safe to load from a base address with
179     // updated alignment. If it is, we can shuffle the element(s) into place
180     // after loading.
181     unsigned OffsetBitWidth = DL.getIndexTypeSizeInBits(SrcPtr->getType());
182     APInt Offset(OffsetBitWidth, 0);
183     SrcPtr = SrcPtr->stripAndAccumulateInBoundsConstantOffsets(DL, Offset);
184 
185     // We want to shuffle the result down from a high element of a vector, so
186     // the offset must be positive.
187     if (Offset.isNegative())
188       return false;
189 
190     // The offset must be a multiple of the scalar element to shuffle cleanly
191     // in the element's size.
192     uint64_t ScalarSizeInBytes = ScalarSize / 8;
193     if (Offset.urem(ScalarSizeInBytes) != 0)
194       return false;
195 
196     // If we load MinVecNumElts, will our target element still be loaded?
197     OffsetEltIndex = Offset.udiv(ScalarSizeInBytes).getZExtValue();
198     if (OffsetEltIndex >= MinVecNumElts)
199       return false;
200 
201     if (!isSafeToLoadUnconditionally(SrcPtr, MinVecTy, Align(1), DL, Load, &DT))
202       return false;
203 
204     // Update alignment with offset value. Note that the offset could be negated
205     // to more accurately represent "(new) SrcPtr - Offset = (old) SrcPtr", but
206     // negation does not change the result of the alignment calculation.
207     Alignment = commonAlignment(Alignment, Offset.getZExtValue());
208   }
209 
210   // Original pattern: insertelt undef, load [free casts of] PtrOp, 0
211   // Use the greater of the alignment on the load or its source pointer.
212   Alignment = std::max(SrcPtr->getPointerAlignment(DL), Alignment);
213   Type *LoadTy = Load->getType();
214   InstructionCost OldCost =
215       TTI.getMemoryOpCost(Instruction::Load, LoadTy, Alignment, AS);
216   APInt DemandedElts = APInt::getOneBitSet(MinVecNumElts, 0);
217   OldCost += TTI.getScalarizationOverhead(MinVecTy, DemandedElts,
218                                           /* Insert */ true, HasExtract);
219 
220   // New pattern: load VecPtr
221   InstructionCost NewCost =
222       TTI.getMemoryOpCost(Instruction::Load, MinVecTy, Alignment, AS);
223   // Optionally, we are shuffling the loaded vector element(s) into place.
224   // For the mask set everything but element 0 to undef to prevent poison from
225   // propagating from the extra loaded memory. This will also optionally
226   // shrink/grow the vector from the loaded size to the output size.
227   // We assume this operation has no cost in codegen if there was no offset.
228   // Note that we could use freeze to avoid poison problems, but then we might
229   // still need a shuffle to change the vector size.
230   unsigned OutputNumElts = Ty->getNumElements();
231   SmallVector<int, 16> Mask(OutputNumElts, UndefMaskElem);
232   assert(OffsetEltIndex < MinVecNumElts && "Address offset too big");
233   Mask[0] = OffsetEltIndex;
234   if (OffsetEltIndex)
235     NewCost += TTI.getShuffleCost(TTI::SK_PermuteSingleSrc, MinVecTy, Mask);
236 
237   // We can aggressively convert to the vector form because the backend can
238   // invert this transform if it does not result in a performance win.
239   if (OldCost < NewCost || !NewCost.isValid())
240     return false;
241 
242   // It is safe and potentially profitable to load a vector directly:
243   // inselt undef, load Scalar, 0 --> load VecPtr
244   IRBuilder<> Builder(Load);
245   Value *CastedPtr = Builder.CreatePointerBitCastOrAddrSpaceCast(
246       SrcPtr, MinVecTy->getPointerTo(AS));
247   Value *VecLd = Builder.CreateAlignedLoad(MinVecTy, CastedPtr, Alignment);
248   VecLd = Builder.CreateShuffleVector(VecLd, Mask);
249 
250   replaceValue(I, *VecLd);
251   ++NumVecLoad;
252   return true;
253 }
254 
255 /// Determine which, if any, of the inputs should be replaced by a shuffle
256 /// followed by extract from a different index.
257 ExtractElementInst *VectorCombine::getShuffleExtract(
258     ExtractElementInst *Ext0, ExtractElementInst *Ext1,
259     unsigned PreferredExtractIndex = InvalidIndex) const {
260   auto *Index0C = dyn_cast<ConstantInt>(Ext0->getIndexOperand());
261   auto *Index1C = dyn_cast<ConstantInt>(Ext1->getIndexOperand());
262   assert(Index0C && Index1C && "Expected constant extract indexes");
263 
264   unsigned Index0 = Index0C->getZExtValue();
265   unsigned Index1 = Index1C->getZExtValue();
266 
267   // If the extract indexes are identical, no shuffle is needed.
268   if (Index0 == Index1)
269     return nullptr;
270 
271   Type *VecTy = Ext0->getVectorOperand()->getType();
272   assert(VecTy == Ext1->getVectorOperand()->getType() && "Need matching types");
273   InstructionCost Cost0 =
274       TTI.getVectorInstrCost(Ext0->getOpcode(), VecTy, Index0);
275   InstructionCost Cost1 =
276       TTI.getVectorInstrCost(Ext1->getOpcode(), VecTy, Index1);
277 
278   // If both costs are invalid no shuffle is needed
279   if (!Cost0.isValid() && !Cost1.isValid())
280     return nullptr;
281 
282   // We are extracting from 2 different indexes, so one operand must be shuffled
283   // before performing a vector operation and/or extract. The more expensive
284   // extract will be replaced by a shuffle.
285   if (Cost0 > Cost1)
286     return Ext0;
287   if (Cost1 > Cost0)
288     return Ext1;
289 
290   // If the costs are equal and there is a preferred extract index, shuffle the
291   // opposite operand.
292   if (PreferredExtractIndex == Index0)
293     return Ext1;
294   if (PreferredExtractIndex == Index1)
295     return Ext0;
296 
297   // Otherwise, replace the extract with the higher index.
298   return Index0 > Index1 ? Ext0 : Ext1;
299 }
300 
301 /// Compare the relative costs of 2 extracts followed by scalar operation vs.
302 /// vector operation(s) followed by extract. Return true if the existing
303 /// instructions are cheaper than a vector alternative. Otherwise, return false
304 /// and if one of the extracts should be transformed to a shufflevector, set
305 /// \p ConvertToShuffle to that extract instruction.
306 bool VectorCombine::isExtractExtractCheap(ExtractElementInst *Ext0,
307                                           ExtractElementInst *Ext1,
308                                           const Instruction &I,
309                                           ExtractElementInst *&ConvertToShuffle,
310                                           unsigned PreferredExtractIndex) {
311   auto *Ext0IndexC = dyn_cast<ConstantInt>(Ext0->getOperand(1));
312   auto *Ext1IndexC = dyn_cast<ConstantInt>(Ext1->getOperand(1));
313   assert(Ext0IndexC && Ext1IndexC && "Expected constant extract indexes");
314 
315   unsigned Opcode = I.getOpcode();
316   Type *ScalarTy = Ext0->getType();
317   auto *VecTy = cast<VectorType>(Ext0->getOperand(0)->getType());
318   InstructionCost ScalarOpCost, VectorOpCost;
319 
320   // Get cost estimates for scalar and vector versions of the operation.
321   bool IsBinOp = Instruction::isBinaryOp(Opcode);
322   if (IsBinOp) {
323     ScalarOpCost = TTI.getArithmeticInstrCost(Opcode, ScalarTy);
324     VectorOpCost = TTI.getArithmeticInstrCost(Opcode, VecTy);
325   } else {
326     assert((Opcode == Instruction::ICmp || Opcode == Instruction::FCmp) &&
327            "Expected a compare");
328     CmpInst::Predicate Pred = cast<CmpInst>(I).getPredicate();
329     ScalarOpCost = TTI.getCmpSelInstrCost(
330         Opcode, ScalarTy, CmpInst::makeCmpResultType(ScalarTy), Pred);
331     VectorOpCost = TTI.getCmpSelInstrCost(
332         Opcode, VecTy, CmpInst::makeCmpResultType(VecTy), Pred);
333   }
334 
335   // Get cost estimates for the extract elements. These costs will factor into
336   // both sequences.
337   unsigned Ext0Index = Ext0IndexC->getZExtValue();
338   unsigned Ext1Index = Ext1IndexC->getZExtValue();
339 
340   InstructionCost Extract0Cost =
341       TTI.getVectorInstrCost(Instruction::ExtractElement, VecTy, Ext0Index);
342   InstructionCost Extract1Cost =
343       TTI.getVectorInstrCost(Instruction::ExtractElement, VecTy, Ext1Index);
344 
345   // A more expensive extract will always be replaced by a splat shuffle.
346   // For example, if Ext0 is more expensive:
347   // opcode (extelt V0, Ext0), (ext V1, Ext1) -->
348   // extelt (opcode (splat V0, Ext0), V1), Ext1
349   // TODO: Evaluate whether that always results in lowest cost. Alternatively,
350   //       check the cost of creating a broadcast shuffle and shuffling both
351   //       operands to element 0.
352   InstructionCost CheapExtractCost = std::min(Extract0Cost, Extract1Cost);
353 
354   // Extra uses of the extracts mean that we include those costs in the
355   // vector total because those instructions will not be eliminated.
356   InstructionCost OldCost, NewCost;
357   if (Ext0->getOperand(0) == Ext1->getOperand(0) && Ext0Index == Ext1Index) {
358     // Handle a special case. If the 2 extracts are identical, adjust the
359     // formulas to account for that. The extra use charge allows for either the
360     // CSE'd pattern or an unoptimized form with identical values:
361     // opcode (extelt V, C), (extelt V, C) --> extelt (opcode V, V), C
362     bool HasUseTax = Ext0 == Ext1 ? !Ext0->hasNUses(2)
363                                   : !Ext0->hasOneUse() || !Ext1->hasOneUse();
364     OldCost = CheapExtractCost + ScalarOpCost;
365     NewCost = VectorOpCost + CheapExtractCost + HasUseTax * CheapExtractCost;
366   } else {
367     // Handle the general case. Each extract is actually a different value:
368     // opcode (extelt V0, C0), (extelt V1, C1) --> extelt (opcode V0, V1), C
369     OldCost = Extract0Cost + Extract1Cost + ScalarOpCost;
370     NewCost = VectorOpCost + CheapExtractCost +
371               !Ext0->hasOneUse() * Extract0Cost +
372               !Ext1->hasOneUse() * Extract1Cost;
373   }
374 
375   ConvertToShuffle = getShuffleExtract(Ext0, Ext1, PreferredExtractIndex);
376   if (ConvertToShuffle) {
377     if (IsBinOp && DisableBinopExtractShuffle)
378       return true;
379 
380     // If we are extracting from 2 different indexes, then one operand must be
381     // shuffled before performing the vector operation. The shuffle mask is
382     // undefined except for 1 lane that is being translated to the remaining
383     // extraction lane. Therefore, it is a splat shuffle. Ex:
384     // ShufMask = { undef, undef, 0, undef }
385     // TODO: The cost model has an option for a "broadcast" shuffle
386     //       (splat-from-element-0), but no option for a more general splat.
387     NewCost +=
388         TTI.getShuffleCost(TargetTransformInfo::SK_PermuteSingleSrc, VecTy);
389   }
390 
391   // Aggressively form a vector op if the cost is equal because the transform
392   // may enable further optimization.
393   // Codegen can reverse this transform (scalarize) if it was not profitable.
394   return OldCost < NewCost;
395 }
396 
397 /// Create a shuffle that translates (shifts) 1 element from the input vector
398 /// to a new element location.
399 static Value *createShiftShuffle(Value *Vec, unsigned OldIndex,
400                                  unsigned NewIndex, IRBuilder<> &Builder) {
401   // The shuffle mask is undefined except for 1 lane that is being translated
402   // to the new element index. Example for OldIndex == 2 and NewIndex == 0:
403   // ShufMask = { 2, undef, undef, undef }
404   auto *VecTy = cast<FixedVectorType>(Vec->getType());
405   SmallVector<int, 32> ShufMask(VecTy->getNumElements(), UndefMaskElem);
406   ShufMask[NewIndex] = OldIndex;
407   return Builder.CreateShuffleVector(Vec, ShufMask, "shift");
408 }
409 
410 /// Given an extract element instruction with constant index operand, shuffle
411 /// the source vector (shift the scalar element) to a NewIndex for extraction.
412 /// Return null if the input can be constant folded, so that we are not creating
413 /// unnecessary instructions.
414 static ExtractElementInst *translateExtract(ExtractElementInst *ExtElt,
415                                             unsigned NewIndex,
416                                             IRBuilder<> &Builder) {
417   // If the extract can be constant-folded, this code is unsimplified. Defer
418   // to other passes to handle that.
419   Value *X = ExtElt->getVectorOperand();
420   Value *C = ExtElt->getIndexOperand();
421   assert(isa<ConstantInt>(C) && "Expected a constant index operand");
422   if (isa<Constant>(X))
423     return nullptr;
424 
425   Value *Shuf = createShiftShuffle(X, cast<ConstantInt>(C)->getZExtValue(),
426                                    NewIndex, Builder);
427   return cast<ExtractElementInst>(Builder.CreateExtractElement(Shuf, NewIndex));
428 }
429 
430 /// Try to reduce extract element costs by converting scalar compares to vector
431 /// compares followed by extract.
432 /// cmp (ext0 V0, C), (ext1 V1, C)
433 void VectorCombine::foldExtExtCmp(ExtractElementInst *Ext0,
434                                   ExtractElementInst *Ext1, Instruction &I) {
435   assert(isa<CmpInst>(&I) && "Expected a compare");
436   assert(cast<ConstantInt>(Ext0->getIndexOperand())->getZExtValue() ==
437              cast<ConstantInt>(Ext1->getIndexOperand())->getZExtValue() &&
438          "Expected matching constant extract indexes");
439 
440   // cmp Pred (extelt V0, C), (extelt V1, C) --> extelt (cmp Pred V0, V1), C
441   ++NumVecCmp;
442   CmpInst::Predicate Pred = cast<CmpInst>(&I)->getPredicate();
443   Value *V0 = Ext0->getVectorOperand(), *V1 = Ext1->getVectorOperand();
444   Value *VecCmp = Builder.CreateCmp(Pred, V0, V1);
445   Value *NewExt = Builder.CreateExtractElement(VecCmp, Ext0->getIndexOperand());
446   replaceValue(I, *NewExt);
447 }
448 
449 /// Try to reduce extract element costs by converting scalar binops to vector
450 /// binops followed by extract.
451 /// bo (ext0 V0, C), (ext1 V1, C)
452 void VectorCombine::foldExtExtBinop(ExtractElementInst *Ext0,
453                                     ExtractElementInst *Ext1, Instruction &I) {
454   assert(isa<BinaryOperator>(&I) && "Expected a binary operator");
455   assert(cast<ConstantInt>(Ext0->getIndexOperand())->getZExtValue() ==
456              cast<ConstantInt>(Ext1->getIndexOperand())->getZExtValue() &&
457          "Expected matching constant extract indexes");
458 
459   // bo (extelt V0, C), (extelt V1, C) --> extelt (bo V0, V1), C
460   ++NumVecBO;
461   Value *V0 = Ext0->getVectorOperand(), *V1 = Ext1->getVectorOperand();
462   Value *VecBO =
463       Builder.CreateBinOp(cast<BinaryOperator>(&I)->getOpcode(), V0, V1);
464 
465   // All IR flags are safe to back-propagate because any potential poison
466   // created in unused vector elements is discarded by the extract.
467   if (auto *VecBOInst = dyn_cast<Instruction>(VecBO))
468     VecBOInst->copyIRFlags(&I);
469 
470   Value *NewExt = Builder.CreateExtractElement(VecBO, Ext0->getIndexOperand());
471   replaceValue(I, *NewExt);
472 }
473 
474 /// Match an instruction with extracted vector operands.
475 bool VectorCombine::foldExtractExtract(Instruction &I) {
476   // It is not safe to transform things like div, urem, etc. because we may
477   // create undefined behavior when executing those on unknown vector elements.
478   if (!isSafeToSpeculativelyExecute(&I))
479     return false;
480 
481   Instruction *I0, *I1;
482   CmpInst::Predicate Pred = CmpInst::BAD_ICMP_PREDICATE;
483   if (!match(&I, m_Cmp(Pred, m_Instruction(I0), m_Instruction(I1))) &&
484       !match(&I, m_BinOp(m_Instruction(I0), m_Instruction(I1))))
485     return false;
486 
487   Value *V0, *V1;
488   uint64_t C0, C1;
489   if (!match(I0, m_ExtractElt(m_Value(V0), m_ConstantInt(C0))) ||
490       !match(I1, m_ExtractElt(m_Value(V1), m_ConstantInt(C1))) ||
491       V0->getType() != V1->getType())
492     return false;
493 
494   // If the scalar value 'I' is going to be re-inserted into a vector, then try
495   // to create an extract to that same element. The extract/insert can be
496   // reduced to a "select shuffle".
497   // TODO: If we add a larger pattern match that starts from an insert, this
498   //       probably becomes unnecessary.
499   auto *Ext0 = cast<ExtractElementInst>(I0);
500   auto *Ext1 = cast<ExtractElementInst>(I1);
501   uint64_t InsertIndex = InvalidIndex;
502   if (I.hasOneUse())
503     match(I.user_back(),
504           m_InsertElt(m_Value(), m_Value(), m_ConstantInt(InsertIndex)));
505 
506   ExtractElementInst *ExtractToChange;
507   if (isExtractExtractCheap(Ext0, Ext1, I, ExtractToChange, InsertIndex))
508     return false;
509 
510   if (ExtractToChange) {
511     unsigned CheapExtractIdx = ExtractToChange == Ext0 ? C1 : C0;
512     ExtractElementInst *NewExtract =
513         translateExtract(ExtractToChange, CheapExtractIdx, Builder);
514     if (!NewExtract)
515       return false;
516     if (ExtractToChange == Ext0)
517       Ext0 = NewExtract;
518     else
519       Ext1 = NewExtract;
520   }
521 
522   if (Pred != CmpInst::BAD_ICMP_PREDICATE)
523     foldExtExtCmp(Ext0, Ext1, I);
524   else
525     foldExtExtBinop(Ext0, Ext1, I);
526 
527   Worklist.push(Ext0);
528   Worklist.push(Ext1);
529   return true;
530 }
531 
532 /// If this is a bitcast of a shuffle, try to bitcast the source vector to the
533 /// destination type followed by shuffle. This can enable further transforms by
534 /// moving bitcasts or shuffles together.
535 bool VectorCombine::foldBitcastShuf(Instruction &I) {
536   Value *V;
537   ArrayRef<int> Mask;
538   if (!match(&I, m_BitCast(
539                      m_OneUse(m_Shuffle(m_Value(V), m_Undef(), m_Mask(Mask))))))
540     return false;
541 
542   // 1) Do not fold bitcast shuffle for scalable type. First, shuffle cost for
543   // scalable type is unknown; Second, we cannot reason if the narrowed shuffle
544   // mask for scalable type is a splat or not.
545   // 2) Disallow non-vector casts and length-changing shuffles.
546   // TODO: We could allow any shuffle.
547   auto *DestTy = dyn_cast<FixedVectorType>(I.getType());
548   auto *SrcTy = dyn_cast<FixedVectorType>(V->getType());
549   if (!SrcTy || !DestTy || I.getOperand(0)->getType() != SrcTy)
550     return false;
551 
552   unsigned DestNumElts = DestTy->getNumElements();
553   unsigned SrcNumElts = SrcTy->getNumElements();
554   SmallVector<int, 16> NewMask;
555   if (SrcNumElts <= DestNumElts) {
556     // The bitcast is from wide to narrow/equal elements. The shuffle mask can
557     // always be expanded to the equivalent form choosing narrower elements.
558     assert(DestNumElts % SrcNumElts == 0 && "Unexpected shuffle mask");
559     unsigned ScaleFactor = DestNumElts / SrcNumElts;
560     narrowShuffleMaskElts(ScaleFactor, Mask, NewMask);
561   } else {
562     // The bitcast is from narrow elements to wide elements. The shuffle mask
563     // must choose consecutive elements to allow casting first.
564     assert(SrcNumElts % DestNumElts == 0 && "Unexpected shuffle mask");
565     unsigned ScaleFactor = SrcNumElts / DestNumElts;
566     if (!widenShuffleMaskElts(ScaleFactor, Mask, NewMask))
567       return false;
568   }
569 
570   // The new shuffle must not cost more than the old shuffle. The bitcast is
571   // moved ahead of the shuffle, so assume that it has the same cost as before.
572   InstructionCost DestCost = TTI.getShuffleCost(
573       TargetTransformInfo::SK_PermuteSingleSrc, DestTy, NewMask);
574   InstructionCost SrcCost =
575       TTI.getShuffleCost(TargetTransformInfo::SK_PermuteSingleSrc, SrcTy, Mask);
576   if (DestCost > SrcCost || !DestCost.isValid())
577     return false;
578 
579   // bitcast (shuf V, MaskC) --> shuf (bitcast V), MaskC'
580   ++NumShufOfBitcast;
581   Value *CastV = Builder.CreateBitCast(V, DestTy);
582   Value *Shuf = Builder.CreateShuffleVector(CastV, NewMask);
583   replaceValue(I, *Shuf);
584   return true;
585 }
586 
587 /// Match a vector binop or compare instruction with at least one inserted
588 /// scalar operand and convert to scalar binop/cmp followed by insertelement.
589 bool VectorCombine::scalarizeBinopOrCmp(Instruction &I) {
590   CmpInst::Predicate Pred = CmpInst::BAD_ICMP_PREDICATE;
591   Value *Ins0, *Ins1;
592   if (!match(&I, m_BinOp(m_Value(Ins0), m_Value(Ins1))) &&
593       !match(&I, m_Cmp(Pred, m_Value(Ins0), m_Value(Ins1))))
594     return false;
595 
596   // Do not convert the vector condition of a vector select into a scalar
597   // condition. That may cause problems for codegen because of differences in
598   // boolean formats and register-file transfers.
599   // TODO: Can we account for that in the cost model?
600   bool IsCmp = Pred != CmpInst::Predicate::BAD_ICMP_PREDICATE;
601   if (IsCmp)
602     for (User *U : I.users())
603       if (match(U, m_Select(m_Specific(&I), m_Value(), m_Value())))
604         return false;
605 
606   // Match against one or both scalar values being inserted into constant
607   // vectors:
608   // vec_op VecC0, (inselt VecC1, V1, Index)
609   // vec_op (inselt VecC0, V0, Index), VecC1
610   // vec_op (inselt VecC0, V0, Index), (inselt VecC1, V1, Index)
611   // TODO: Deal with mismatched index constants and variable indexes?
612   Constant *VecC0 = nullptr, *VecC1 = nullptr;
613   Value *V0 = nullptr, *V1 = nullptr;
614   uint64_t Index0 = 0, Index1 = 0;
615   if (!match(Ins0, m_InsertElt(m_Constant(VecC0), m_Value(V0),
616                                m_ConstantInt(Index0))) &&
617       !match(Ins0, m_Constant(VecC0)))
618     return false;
619   if (!match(Ins1, m_InsertElt(m_Constant(VecC1), m_Value(V1),
620                                m_ConstantInt(Index1))) &&
621       !match(Ins1, m_Constant(VecC1)))
622     return false;
623 
624   bool IsConst0 = !V0;
625   bool IsConst1 = !V1;
626   if (IsConst0 && IsConst1)
627     return false;
628   if (!IsConst0 && !IsConst1 && Index0 != Index1)
629     return false;
630 
631   // Bail for single insertion if it is a load.
632   // TODO: Handle this once getVectorInstrCost can cost for load/stores.
633   auto *I0 = dyn_cast_or_null<Instruction>(V0);
634   auto *I1 = dyn_cast_or_null<Instruction>(V1);
635   if ((IsConst0 && I1 && I1->mayReadFromMemory()) ||
636       (IsConst1 && I0 && I0->mayReadFromMemory()))
637     return false;
638 
639   uint64_t Index = IsConst0 ? Index1 : Index0;
640   Type *ScalarTy = IsConst0 ? V1->getType() : V0->getType();
641   Type *VecTy = I.getType();
642   assert(VecTy->isVectorTy() &&
643          (IsConst0 || IsConst1 || V0->getType() == V1->getType()) &&
644          (ScalarTy->isIntegerTy() || ScalarTy->isFloatingPointTy() ||
645           ScalarTy->isPointerTy()) &&
646          "Unexpected types for insert element into binop or cmp");
647 
648   unsigned Opcode = I.getOpcode();
649   InstructionCost ScalarOpCost, VectorOpCost;
650   if (IsCmp) {
651     CmpInst::Predicate Pred = cast<CmpInst>(I).getPredicate();
652     ScalarOpCost = TTI.getCmpSelInstrCost(
653         Opcode, ScalarTy, CmpInst::makeCmpResultType(ScalarTy), Pred);
654     VectorOpCost = TTI.getCmpSelInstrCost(
655         Opcode, VecTy, CmpInst::makeCmpResultType(VecTy), Pred);
656   } else {
657     ScalarOpCost = TTI.getArithmeticInstrCost(Opcode, ScalarTy);
658     VectorOpCost = TTI.getArithmeticInstrCost(Opcode, VecTy);
659   }
660 
661   // Get cost estimate for the insert element. This cost will factor into
662   // both sequences.
663   InstructionCost InsertCost =
664       TTI.getVectorInstrCost(Instruction::InsertElement, VecTy, Index);
665   InstructionCost OldCost =
666       (IsConst0 ? 0 : InsertCost) + (IsConst1 ? 0 : InsertCost) + VectorOpCost;
667   InstructionCost NewCost = ScalarOpCost + InsertCost +
668                             (IsConst0 ? 0 : !Ins0->hasOneUse() * InsertCost) +
669                             (IsConst1 ? 0 : !Ins1->hasOneUse() * InsertCost);
670 
671   // We want to scalarize unless the vector variant actually has lower cost.
672   if (OldCost < NewCost || !NewCost.isValid())
673     return false;
674 
675   // vec_op (inselt VecC0, V0, Index), (inselt VecC1, V1, Index) -->
676   // inselt NewVecC, (scalar_op V0, V1), Index
677   if (IsCmp)
678     ++NumScalarCmp;
679   else
680     ++NumScalarBO;
681 
682   // For constant cases, extract the scalar element, this should constant fold.
683   if (IsConst0)
684     V0 = ConstantExpr::getExtractElement(VecC0, Builder.getInt64(Index));
685   if (IsConst1)
686     V1 = ConstantExpr::getExtractElement(VecC1, Builder.getInt64(Index));
687 
688   Value *Scalar =
689       IsCmp ? Builder.CreateCmp(Pred, V0, V1)
690             : Builder.CreateBinOp((Instruction::BinaryOps)Opcode, V0, V1);
691 
692   Scalar->setName(I.getName() + ".scalar");
693 
694   // All IR flags are safe to back-propagate. There is no potential for extra
695   // poison to be created by the scalar instruction.
696   if (auto *ScalarInst = dyn_cast<Instruction>(Scalar))
697     ScalarInst->copyIRFlags(&I);
698 
699   // Fold the vector constants in the original vectors into a new base vector.
700   Value *NewVecC =
701       IsCmp ? Builder.CreateCmp(Pred, VecC0, VecC1)
702             : Builder.CreateBinOp((Instruction::BinaryOps)Opcode, VecC0, VecC1);
703   Value *Insert = Builder.CreateInsertElement(NewVecC, Scalar, Index);
704   replaceValue(I, *Insert);
705   return true;
706 }
707 
708 /// Try to combine a scalar binop + 2 scalar compares of extracted elements of
709 /// a vector into vector operations followed by extract. Note: The SLP pass
710 /// may miss this pattern because of implementation problems.
711 bool VectorCombine::foldExtractedCmps(Instruction &I) {
712   // We are looking for a scalar binop of booleans.
713   // binop i1 (cmp Pred I0, C0), (cmp Pred I1, C1)
714   if (!I.isBinaryOp() || !I.getType()->isIntegerTy(1))
715     return false;
716 
717   // The compare predicates should match, and each compare should have a
718   // constant operand.
719   // TODO: Relax the one-use constraints.
720   Value *B0 = I.getOperand(0), *B1 = I.getOperand(1);
721   Instruction *I0, *I1;
722   Constant *C0, *C1;
723   CmpInst::Predicate P0, P1;
724   if (!match(B0, m_OneUse(m_Cmp(P0, m_Instruction(I0), m_Constant(C0)))) ||
725       !match(B1, m_OneUse(m_Cmp(P1, m_Instruction(I1), m_Constant(C1)))) ||
726       P0 != P1)
727     return false;
728 
729   // The compare operands must be extracts of the same vector with constant
730   // extract indexes.
731   // TODO: Relax the one-use constraints.
732   Value *X;
733   uint64_t Index0, Index1;
734   if (!match(I0, m_OneUse(m_ExtractElt(m_Value(X), m_ConstantInt(Index0)))) ||
735       !match(I1, m_OneUse(m_ExtractElt(m_Specific(X), m_ConstantInt(Index1)))))
736     return false;
737 
738   auto *Ext0 = cast<ExtractElementInst>(I0);
739   auto *Ext1 = cast<ExtractElementInst>(I1);
740   ExtractElementInst *ConvertToShuf = getShuffleExtract(Ext0, Ext1);
741   if (!ConvertToShuf)
742     return false;
743 
744   // The original scalar pattern is:
745   // binop i1 (cmp Pred (ext X, Index0), C0), (cmp Pred (ext X, Index1), C1)
746   CmpInst::Predicate Pred = P0;
747   unsigned CmpOpcode = CmpInst::isFPPredicate(Pred) ? Instruction::FCmp
748                                                     : Instruction::ICmp;
749   auto *VecTy = dyn_cast<FixedVectorType>(X->getType());
750   if (!VecTy)
751     return false;
752 
753   InstructionCost OldCost =
754       TTI.getVectorInstrCost(Ext0->getOpcode(), VecTy, Index0);
755   OldCost += TTI.getVectorInstrCost(Ext1->getOpcode(), VecTy, Index1);
756   OldCost +=
757       TTI.getCmpSelInstrCost(CmpOpcode, I0->getType(),
758                              CmpInst::makeCmpResultType(I0->getType()), Pred) *
759       2;
760   OldCost += TTI.getArithmeticInstrCost(I.getOpcode(), I.getType());
761 
762   // The proposed vector pattern is:
763   // vcmp = cmp Pred X, VecC
764   // ext (binop vNi1 vcmp, (shuffle vcmp, Index1)), Index0
765   int CheapIndex = ConvertToShuf == Ext0 ? Index1 : Index0;
766   int ExpensiveIndex = ConvertToShuf == Ext0 ? Index0 : Index1;
767   auto *CmpTy = cast<FixedVectorType>(CmpInst::makeCmpResultType(X->getType()));
768   InstructionCost NewCost = TTI.getCmpSelInstrCost(
769       CmpOpcode, X->getType(), CmpInst::makeCmpResultType(X->getType()), Pred);
770   SmallVector<int, 32> ShufMask(VecTy->getNumElements(), UndefMaskElem);
771   ShufMask[CheapIndex] = ExpensiveIndex;
772   NewCost += TTI.getShuffleCost(TargetTransformInfo::SK_PermuteSingleSrc, CmpTy,
773                                 ShufMask);
774   NewCost += TTI.getArithmeticInstrCost(I.getOpcode(), CmpTy);
775   NewCost += TTI.getVectorInstrCost(Ext0->getOpcode(), CmpTy, CheapIndex);
776 
777   // Aggressively form vector ops if the cost is equal because the transform
778   // may enable further optimization.
779   // Codegen can reverse this transform (scalarize) if it was not profitable.
780   if (OldCost < NewCost || !NewCost.isValid())
781     return false;
782 
783   // Create a vector constant from the 2 scalar constants.
784   SmallVector<Constant *, 32> CmpC(VecTy->getNumElements(),
785                                    UndefValue::get(VecTy->getElementType()));
786   CmpC[Index0] = C0;
787   CmpC[Index1] = C1;
788   Value *VCmp = Builder.CreateCmp(Pred, X, ConstantVector::get(CmpC));
789 
790   Value *Shuf = createShiftShuffle(VCmp, ExpensiveIndex, CheapIndex, Builder);
791   Value *VecLogic = Builder.CreateBinOp(cast<BinaryOperator>(I).getOpcode(),
792                                         VCmp, Shuf);
793   Value *NewExt = Builder.CreateExtractElement(VecLogic, CheapIndex);
794   replaceValue(I, *NewExt);
795   ++NumVecCmpBO;
796   return true;
797 }
798 
799 // Check if memory loc modified between two instrs in the same BB
800 static bool isMemModifiedBetween(BasicBlock::iterator Begin,
801                                  BasicBlock::iterator End,
802                                  const MemoryLocation &Loc, AAResults &AA) {
803   unsigned NumScanned = 0;
804   return std::any_of(Begin, End, [&](const Instruction &Instr) {
805     return isModSet(AA.getModRefInfo(&Instr, Loc)) ||
806            ++NumScanned > MaxInstrsToScan;
807   });
808 }
809 
810 /// Helper class to indicate whether a vector index can be safely scalarized and
811 /// if a freeze needs to be inserted.
812 class ScalarizationResult {
813   enum class StatusTy { Unsafe, Safe, SafeWithFreeze };
814 
815   StatusTy Status;
816   Value *ToFreeze;
817 
818   ScalarizationResult(StatusTy Status, Value *ToFreeze = nullptr)
819       : Status(Status), ToFreeze(ToFreeze) {}
820 
821 public:
822   ScalarizationResult(const ScalarizationResult &Other) = default;
823   ~ScalarizationResult() {
824     assert(!ToFreeze && "freeze() not called with ToFreeze being set");
825   }
826 
827   static ScalarizationResult unsafe() { return {StatusTy::Unsafe}; }
828   static ScalarizationResult safe() { return {StatusTy::Safe}; }
829   static ScalarizationResult safeWithFreeze(Value *ToFreeze) {
830     return {StatusTy::SafeWithFreeze, ToFreeze};
831   }
832 
833   /// Returns true if the index can be scalarize without requiring a freeze.
834   bool isSafe() const { return Status == StatusTy::Safe; }
835   /// Returns true if the index cannot be scalarized.
836   bool isUnsafe() const { return Status == StatusTy::Unsafe; }
837   /// Returns true if the index can be scalarize, but requires inserting a
838   /// freeze.
839   bool isSafeWithFreeze() const { return Status == StatusTy::SafeWithFreeze; }
840 
841   /// Reset the state of Unsafe and clear ToFreze if set.
842   void discard() {
843     ToFreeze = nullptr;
844     Status = StatusTy::Unsafe;
845   }
846 
847   /// Freeze the ToFreeze and update the use in \p User to use it.
848   void freeze(IRBuilder<> &Builder, Instruction &UserI) {
849     assert(isSafeWithFreeze() &&
850            "should only be used when freezing is required");
851     assert(is_contained(ToFreeze->users(), &UserI) &&
852            "UserI must be a user of ToFreeze");
853     IRBuilder<>::InsertPointGuard Guard(Builder);
854     Builder.SetInsertPoint(cast<Instruction>(&UserI));
855     Value *Frozen =
856         Builder.CreateFreeze(ToFreeze, ToFreeze->getName() + ".frozen");
857     for (Use &U : make_early_inc_range((UserI.operands())))
858       if (U.get() == ToFreeze)
859         U.set(Frozen);
860 
861     ToFreeze = nullptr;
862   }
863 };
864 
865 /// Check if it is legal to scalarize a memory access to \p VecTy at index \p
866 /// Idx. \p Idx must access a valid vector element.
867 static ScalarizationResult canScalarizeAccess(FixedVectorType *VecTy,
868                                               Value *Idx, Instruction *CtxI,
869                                               AssumptionCache &AC,
870                                               const DominatorTree &DT) {
871   if (auto *C = dyn_cast<ConstantInt>(Idx)) {
872     if (C->getValue().ult(VecTy->getNumElements()))
873       return ScalarizationResult::safe();
874     return ScalarizationResult::unsafe();
875   }
876 
877   unsigned IntWidth = Idx->getType()->getScalarSizeInBits();
878   APInt Zero(IntWidth, 0);
879   APInt MaxElts(IntWidth, VecTy->getNumElements());
880   ConstantRange ValidIndices(Zero, MaxElts);
881   ConstantRange IdxRange(IntWidth, true);
882 
883   if (isGuaranteedNotToBePoison(Idx, &AC)) {
884     if (ValidIndices.contains(computeConstantRange(Idx, /* ForSigned */ false,
885                                                    true, &AC, CtxI, &DT)))
886       return ScalarizationResult::safe();
887     return ScalarizationResult::unsafe();
888   }
889 
890   // If the index may be poison, check if we can insert a freeze before the
891   // range of the index is restricted.
892   Value *IdxBase;
893   ConstantInt *CI;
894   if (match(Idx, m_And(m_Value(IdxBase), m_ConstantInt(CI)))) {
895     IdxRange = IdxRange.binaryAnd(CI->getValue());
896   } else if (match(Idx, m_URem(m_Value(IdxBase), m_ConstantInt(CI)))) {
897     IdxRange = IdxRange.urem(CI->getValue());
898   }
899 
900   if (ValidIndices.contains(IdxRange))
901     return ScalarizationResult::safeWithFreeze(IdxBase);
902   return ScalarizationResult::unsafe();
903 }
904 
905 /// The memory operation on a vector of \p ScalarType had alignment of
906 /// \p VectorAlignment. Compute the maximal, but conservatively correct,
907 /// alignment that will be valid for the memory operation on a single scalar
908 /// element of the same type with index \p Idx.
909 static Align computeAlignmentAfterScalarization(Align VectorAlignment,
910                                                 Type *ScalarType, Value *Idx,
911                                                 const DataLayout &DL) {
912   if (auto *C = dyn_cast<ConstantInt>(Idx))
913     return commonAlignment(VectorAlignment,
914                            C->getZExtValue() * DL.getTypeStoreSize(ScalarType));
915   return commonAlignment(VectorAlignment, DL.getTypeStoreSize(ScalarType));
916 }
917 
918 // Combine patterns like:
919 //   %0 = load <4 x i32>, <4 x i32>* %a
920 //   %1 = insertelement <4 x i32> %0, i32 %b, i32 1
921 //   store <4 x i32> %1, <4 x i32>* %a
922 // to:
923 //   %0 = bitcast <4 x i32>* %a to i32*
924 //   %1 = getelementptr inbounds i32, i32* %0, i64 0, i64 1
925 //   store i32 %b, i32* %1
926 bool VectorCombine::foldSingleElementStore(Instruction &I) {
927   StoreInst *SI = dyn_cast<StoreInst>(&I);
928   if (!SI || !SI->isSimple() ||
929       !isa<FixedVectorType>(SI->getValueOperand()->getType()))
930     return false;
931 
932   // TODO: Combine more complicated patterns (multiple insert) by referencing
933   // TargetTransformInfo.
934   Instruction *Source;
935   Value *NewElement;
936   Value *Idx;
937   if (!match(SI->getValueOperand(),
938              m_InsertElt(m_Instruction(Source), m_Value(NewElement),
939                          m_Value(Idx))))
940     return false;
941 
942   if (auto *Load = dyn_cast<LoadInst>(Source)) {
943     auto VecTy = cast<FixedVectorType>(SI->getValueOperand()->getType());
944     const DataLayout &DL = I.getModule()->getDataLayout();
945     Value *SrcAddr = Load->getPointerOperand()->stripPointerCasts();
946     // Don't optimize for atomic/volatile load or store. Ensure memory is not
947     // modified between, vector type matches store size, and index is inbounds.
948     if (!Load->isSimple() || Load->getParent() != SI->getParent() ||
949         !DL.typeSizeEqualsStoreSize(Load->getType()) ||
950         SrcAddr != SI->getPointerOperand()->stripPointerCasts())
951       return false;
952 
953     auto ScalarizableIdx = canScalarizeAccess(VecTy, Idx, Load, AC, DT);
954     if (ScalarizableIdx.isUnsafe() ||
955         isMemModifiedBetween(Load->getIterator(), SI->getIterator(),
956                              MemoryLocation::get(SI), AA))
957       return false;
958 
959     if (ScalarizableIdx.isSafeWithFreeze())
960       ScalarizableIdx.freeze(Builder, *cast<Instruction>(Idx));
961     Value *GEP = Builder.CreateInBoundsGEP(
962         SI->getValueOperand()->getType(), SI->getPointerOperand(),
963         {ConstantInt::get(Idx->getType(), 0), Idx});
964     StoreInst *NSI = Builder.CreateStore(NewElement, GEP);
965     NSI->copyMetadata(*SI);
966     Align ScalarOpAlignment = computeAlignmentAfterScalarization(
967         std::max(SI->getAlign(), Load->getAlign()), NewElement->getType(), Idx,
968         DL);
969     NSI->setAlignment(ScalarOpAlignment);
970     replaceValue(I, *NSI);
971     eraseInstruction(I);
972     return true;
973   }
974 
975   return false;
976 }
977 
978 /// Try to scalarize vector loads feeding extractelement instructions.
979 bool VectorCombine::scalarizeLoadExtract(Instruction &I) {
980   Value *Ptr;
981   if (!match(&I, m_Load(m_Value(Ptr))))
982     return false;
983 
984   auto *LI = cast<LoadInst>(&I);
985   const DataLayout &DL = I.getModule()->getDataLayout();
986   if (LI->isVolatile() || !DL.typeSizeEqualsStoreSize(LI->getType()))
987     return false;
988 
989   auto *FixedVT = dyn_cast<FixedVectorType>(LI->getType());
990   if (!FixedVT)
991     return false;
992 
993   InstructionCost OriginalCost =
994       TTI.getMemoryOpCost(Instruction::Load, LI->getType(), LI->getAlign(),
995                           LI->getPointerAddressSpace());
996   InstructionCost ScalarizedCost = 0;
997 
998   Instruction *LastCheckedInst = LI;
999   unsigned NumInstChecked = 0;
1000   // Check if all users of the load are extracts with no memory modifications
1001   // between the load and the extract. Compute the cost of both the original
1002   // code and the scalarized version.
1003   for (User *U : LI->users()) {
1004     auto *UI = dyn_cast<ExtractElementInst>(U);
1005     if (!UI || UI->getParent() != LI->getParent())
1006       return false;
1007 
1008     if (!isGuaranteedNotToBePoison(UI->getOperand(1), &AC, LI, &DT))
1009       return false;
1010 
1011     // Check if any instruction between the load and the extract may modify
1012     // memory.
1013     if (LastCheckedInst->comesBefore(UI)) {
1014       for (Instruction &I :
1015            make_range(std::next(LI->getIterator()), UI->getIterator())) {
1016         // Bail out if we reached the check limit or the instruction may write
1017         // to memory.
1018         if (NumInstChecked == MaxInstrsToScan || I.mayWriteToMemory())
1019           return false;
1020         NumInstChecked++;
1021       }
1022       LastCheckedInst = UI;
1023     }
1024 
1025     auto ScalarIdx = canScalarizeAccess(FixedVT, UI->getOperand(1), &I, AC, DT);
1026     if (!ScalarIdx.isSafe()) {
1027       // TODO: Freeze index if it is safe to do so.
1028       ScalarIdx.discard();
1029       return false;
1030     }
1031 
1032     auto *Index = dyn_cast<ConstantInt>(UI->getOperand(1));
1033     OriginalCost +=
1034         TTI.getVectorInstrCost(Instruction::ExtractElement, LI->getType(),
1035                                Index ? Index->getZExtValue() : -1);
1036     ScalarizedCost +=
1037         TTI.getMemoryOpCost(Instruction::Load, FixedVT->getElementType(),
1038                             Align(1), LI->getPointerAddressSpace());
1039     ScalarizedCost += TTI.getAddressComputationCost(FixedVT->getElementType());
1040   }
1041 
1042   if (ScalarizedCost >= OriginalCost)
1043     return false;
1044 
1045   // Replace extracts with narrow scalar loads.
1046   for (User *U : LI->users()) {
1047     auto *EI = cast<ExtractElementInst>(U);
1048     Builder.SetInsertPoint(EI);
1049 
1050     Value *Idx = EI->getOperand(1);
1051     Value *GEP =
1052         Builder.CreateInBoundsGEP(FixedVT, Ptr, {Builder.getInt32(0), Idx});
1053     auto *NewLoad = cast<LoadInst>(Builder.CreateLoad(
1054         FixedVT->getElementType(), GEP, EI->getName() + ".scalar"));
1055 
1056     Align ScalarOpAlignment = computeAlignmentAfterScalarization(
1057         LI->getAlign(), FixedVT->getElementType(), Idx, DL);
1058     NewLoad->setAlignment(ScalarOpAlignment);
1059 
1060     replaceValue(*EI, *NewLoad);
1061   }
1062 
1063   return true;
1064 }
1065 
1066 /// Try to convert "shuffle (binop), (binop)" with a shared binop operand into
1067 /// "binop (shuffle), (shuffle)".
1068 bool VectorCombine::foldShuffleOfBinops(Instruction &I) {
1069   auto *VecTy = dyn_cast<FixedVectorType>(I.getType());
1070   if (!VecTy)
1071     return false;
1072 
1073   BinaryOperator *B0, *B1;
1074   ArrayRef<int> Mask;
1075   if (!match(&I, m_Shuffle(m_OneUse(m_BinOp(B0)), m_OneUse(m_BinOp(B1)),
1076                            m_Mask(Mask))) ||
1077       B0->getOpcode() != B1->getOpcode() || B0->getType() != VecTy)
1078     return false;
1079 
1080   // Try to replace a binop with a shuffle if the shuffle is not costly.
1081   // The new shuffle will choose from a single, common operand, so it may be
1082   // cheaper than the existing two-operand shuffle.
1083   SmallVector<int> UnaryMask = createUnaryMask(Mask, Mask.size());
1084   Instruction::BinaryOps Opcode = B0->getOpcode();
1085   InstructionCost BinopCost = TTI.getArithmeticInstrCost(Opcode, VecTy);
1086   InstructionCost ShufCost = TTI.getShuffleCost(
1087       TargetTransformInfo::SK_PermuteSingleSrc, VecTy, UnaryMask);
1088   if (ShufCost > BinopCost)
1089     return false;
1090 
1091   // If we have something like "add X, Y" and "add Z, X", swap ops to match.
1092   Value *X = B0->getOperand(0), *Y = B0->getOperand(1);
1093   Value *Z = B1->getOperand(0), *W = B1->getOperand(1);
1094   if (BinaryOperator::isCommutative(Opcode) && X != Z && Y != W)
1095     std::swap(X, Y);
1096 
1097   Value *Shuf0, *Shuf1;
1098   if (X == Z) {
1099     // shuf (bo X, Y), (bo X, W) --> bo (shuf X), (shuf Y, W)
1100     Shuf0 = Builder.CreateShuffleVector(X, UnaryMask);
1101     Shuf1 = Builder.CreateShuffleVector(Y, W, Mask);
1102   } else if (Y == W) {
1103     // shuf (bo X, Y), (bo Z, Y) --> bo (shuf X, Z), (shuf Y)
1104     Shuf0 = Builder.CreateShuffleVector(X, Z, Mask);
1105     Shuf1 = Builder.CreateShuffleVector(Y, UnaryMask);
1106   } else {
1107     return false;
1108   }
1109 
1110   Value *NewBO = Builder.CreateBinOp(Opcode, Shuf0, Shuf1);
1111   // Intersect flags from the old binops.
1112   if (auto *NewInst = dyn_cast<Instruction>(NewBO)) {
1113     NewInst->copyIRFlags(B0);
1114     NewInst->andIRFlags(B1);
1115   }
1116   replaceValue(I, *NewBO);
1117   return true;
1118 }
1119 
1120 /// Given a commutative reduction, the order of the input lanes does not alter
1121 /// the results. We can use this to remove certain shuffles feeding the
1122 /// reduction, removing the need to shuffle at all.
1123 bool VectorCombine::foldShuffleFromReductions(Instruction &I) {
1124   auto *II = dyn_cast<IntrinsicInst>(&I);
1125   if (!II)
1126     return false;
1127   switch (II->getIntrinsicID()) {
1128   case Intrinsic::vector_reduce_add:
1129   case Intrinsic::vector_reduce_mul:
1130   case Intrinsic::vector_reduce_and:
1131   case Intrinsic::vector_reduce_or:
1132   case Intrinsic::vector_reduce_xor:
1133   case Intrinsic::vector_reduce_smin:
1134   case Intrinsic::vector_reduce_smax:
1135   case Intrinsic::vector_reduce_umin:
1136   case Intrinsic::vector_reduce_umax:
1137     break;
1138   default:
1139     return false;
1140   }
1141 
1142   // Find all the inputs when looking through operations that do not alter the
1143   // lane order (binops, for example). Currently we look for a single shuffle,
1144   // and can ignore splat values.
1145   std::queue<Value *> Worklist;
1146   SmallPtrSet<Value *, 4> Visited;
1147   ShuffleVectorInst *Shuffle = nullptr;
1148   if (auto *Op = dyn_cast<Instruction>(I.getOperand(0)))
1149     Worklist.push(Op);
1150 
1151   while (!Worklist.empty()) {
1152     Value *CV = Worklist.front();
1153     Worklist.pop();
1154     if (Visited.contains(CV))
1155       continue;
1156 
1157     // Splats don't change the order, so can be safely ignored.
1158     if (isSplatValue(CV))
1159       continue;
1160 
1161     Visited.insert(CV);
1162 
1163     if (auto *CI = dyn_cast<Instruction>(CV)) {
1164       if (CI->isBinaryOp()) {
1165         for (auto *Op : CI->operand_values())
1166           Worklist.push(Op);
1167         continue;
1168       } else if (auto *SV = dyn_cast<ShuffleVectorInst>(CI)) {
1169         if (Shuffle && Shuffle != SV)
1170           return false;
1171         Shuffle = SV;
1172         continue;
1173       }
1174     }
1175 
1176     // Anything else is currently an unknown node.
1177     return false;
1178   }
1179 
1180   if (!Shuffle)
1181     return false;
1182 
1183   // Check all uses of the binary ops and shuffles are also included in the
1184   // lane-invariant operations (Visited should be the list of lanewise
1185   // instructions, including the shuffle that we found).
1186   for (auto *V : Visited)
1187     for (auto *U : V->users())
1188       if (!Visited.contains(U) && U != &I)
1189         return false;
1190 
1191   FixedVectorType *VecType =
1192       dyn_cast<FixedVectorType>(II->getOperand(0)->getType());
1193   if (!VecType)
1194     return false;
1195   FixedVectorType *ShuffleInputType =
1196       dyn_cast<FixedVectorType>(Shuffle->getOperand(0)->getType());
1197   if (!ShuffleInputType)
1198     return false;
1199   int NumInputElts = ShuffleInputType->getNumElements();
1200 
1201   // Find the mask from sorting the lanes into order. This is most likely to
1202   // become a identity or concat mask. Undef elements are pushed to the end.
1203   SmallVector<int> ConcatMask;
1204   Shuffle->getShuffleMask(ConcatMask);
1205   sort(ConcatMask, [](int X, int Y) { return (unsigned)X < (unsigned)Y; });
1206   bool UsesSecondVec =
1207       any_of(ConcatMask, [&](int M) { return M >= NumInputElts; });
1208   InstructionCost OldCost = TTI.getShuffleCost(
1209       UsesSecondVec ? TTI::SK_PermuteTwoSrc : TTI::SK_PermuteSingleSrc, VecType,
1210       Shuffle->getShuffleMask());
1211   InstructionCost NewCost = TTI.getShuffleCost(
1212       UsesSecondVec ? TTI::SK_PermuteTwoSrc : TTI::SK_PermuteSingleSrc, VecType,
1213       ConcatMask);
1214 
1215   LLVM_DEBUG(dbgs() << "Found a reduction feeding from a shuffle: " << *Shuffle
1216                     << "\n");
1217   LLVM_DEBUG(dbgs() << "  OldCost: " << OldCost << " vs NewCost: " << NewCost
1218                     << "\n");
1219   if (NewCost < OldCost) {
1220     Builder.SetInsertPoint(Shuffle);
1221     Value *NewShuffle = Builder.CreateShuffleVector(
1222         Shuffle->getOperand(0), Shuffle->getOperand(1), ConcatMask);
1223     LLVM_DEBUG(dbgs() << "Created new shuffle: " << *NewShuffle << "\n");
1224     replaceValue(*Shuffle, *NewShuffle);
1225   }
1226 
1227   // See if we can re-use foldSelectShuffle, getting it to reduce the size of
1228   // the shuffle into a nicer order, as it can ignore the order of the shuffles.
1229   return foldSelectShuffle(*Shuffle, true);
1230 }
1231 
1232 /// This method looks for groups of shuffles acting on binops, of the form:
1233 ///  %x = shuffle ...
1234 ///  %y = shuffle ...
1235 ///  %a = binop %x, %y
1236 ///  %b = binop %x, %y
1237 ///  shuffle %a, %b, selectmask
1238 /// We may, especially if the shuffle is wider than legal, be able to convert
1239 /// the shuffle to a form where only parts of a and b need to be computed. On
1240 /// architectures with no obvious "select" shuffle, this can reduce the total
1241 /// number of operations if the target reports them as cheaper.
1242 bool VectorCombine::foldSelectShuffle(Instruction &I, bool FromReduction) {
1243   auto *SVI = dyn_cast<ShuffleVectorInst>(&I);
1244   auto *VT = dyn_cast<FixedVectorType>(I.getType());
1245   if (!SVI || !VT)
1246     return false;
1247   auto *Op0 = dyn_cast<Instruction>(SVI->getOperand(0));
1248   auto *Op1 = dyn_cast<Instruction>(SVI->getOperand(1));
1249   if (!Op0 || !Op1 || Op0 == Op1 || !Op0->isBinaryOp() || !Op1->isBinaryOp() ||
1250       VT != Op0->getType())
1251     return false;
1252   auto *SVI0A = dyn_cast<Instruction>(Op0->getOperand(0));
1253   auto *SVI0B = dyn_cast<Instruction>(Op0->getOperand(1));
1254   auto *SVI1A = dyn_cast<Instruction>(Op1->getOperand(0));
1255   auto *SVI1B = dyn_cast<Instruction>(Op1->getOperand(1));
1256   SmallPtrSet<Instruction *, 4> InputShuffles({SVI0A, SVI0B, SVI1A, SVI1B});
1257   auto checkSVNonOpUses = [&](Instruction *I) {
1258     if (!I || I->getOperand(0)->getType() != VT)
1259       return true;
1260     return any_of(I->users(), [&](User *U) {
1261       return U != Op0 && U != Op1 &&
1262              !(isa<ShuffleVectorInst>(U) &&
1263                (InputShuffles.contains(cast<Instruction>(U)) ||
1264                 isInstructionTriviallyDead(cast<Instruction>(U))));
1265     });
1266   };
1267   if (checkSVNonOpUses(SVI0A) || checkSVNonOpUses(SVI0B) ||
1268       checkSVNonOpUses(SVI1A) || checkSVNonOpUses(SVI1B))
1269     return false;
1270 
1271   // Collect all the uses that are shuffles that we can transform together. We
1272   // may not have a single shuffle, but a group that can all be transformed
1273   // together profitably.
1274   SmallVector<ShuffleVectorInst *> Shuffles;
1275   auto collectShuffles = [&](Instruction *I) {
1276     for (auto *U : I->users()) {
1277       auto *SV = dyn_cast<ShuffleVectorInst>(U);
1278       if (!SV || SV->getType() != VT)
1279         return false;
1280       if ((SV->getOperand(0) != Op0 && SV->getOperand(0) != Op1) ||
1281           (SV->getOperand(1) != Op0 && SV->getOperand(1) != Op1))
1282         return false;
1283       if (!llvm::is_contained(Shuffles, SV))
1284         Shuffles.push_back(SV);
1285     }
1286     return true;
1287   };
1288   if (!collectShuffles(Op0) || !collectShuffles(Op1))
1289     return false;
1290   // From a reduction, we need to be processing a single shuffle, otherwise the
1291   // other uses will not be lane-invariant.
1292   if (FromReduction && Shuffles.size() > 1)
1293     return false;
1294 
1295   // Add any shuffle uses for the shuffles we have found, to include them in our
1296   // cost calculations.
1297   if (!FromReduction) {
1298     for (ShuffleVectorInst *SV : Shuffles) {
1299       for (auto U : SV->users()) {
1300         ShuffleVectorInst *SSV = dyn_cast<ShuffleVectorInst>(U);
1301         if (SSV && isa<UndefValue>(SSV->getOperand(1)))
1302           Shuffles.push_back(SSV);
1303       }
1304     }
1305   }
1306 
1307   // For each of the output shuffles, we try to sort all the first vector
1308   // elements to the beginning, followed by the second array elements at the
1309   // end. If the binops are legalized to smaller vectors, this may reduce total
1310   // number of binops. We compute the ReconstructMask mask needed to convert
1311   // back to the original lane order.
1312   SmallVector<std::pair<int, int>> V1, V2;
1313   SmallVector<SmallVector<int>> OrigReconstructMasks;
1314   int MaxV1Elt = 0, MaxV2Elt = 0;
1315   unsigned NumElts = VT->getNumElements();
1316   for (ShuffleVectorInst *SVN : Shuffles) {
1317     SmallVector<int> Mask;
1318     SVN->getShuffleMask(Mask);
1319 
1320     // Check the operands are the same as the original, or reversed (in which
1321     // case we need to commute the mask).
1322     Value *SVOp0 = SVN->getOperand(0);
1323     Value *SVOp1 = SVN->getOperand(1);
1324     if (isa<UndefValue>(SVOp1)) {
1325       auto *SSV = cast<ShuffleVectorInst>(SVOp0);
1326       SVOp0 = SSV->getOperand(0);
1327       SVOp1 = SSV->getOperand(1);
1328       for (unsigned I = 0, E = Mask.size(); I != E; I++) {
1329         if (Mask[I] >= static_cast<int>(SSV->getShuffleMask().size()))
1330           return false;
1331         Mask[I] = Mask[I] < 0 ? Mask[I] : SSV->getMaskValue(Mask[I]);
1332       }
1333     }
1334     if (SVOp0 == Op1 && SVOp1 == Op0) {
1335       std::swap(SVOp0, SVOp1);
1336       ShuffleVectorInst::commuteShuffleMask(Mask, NumElts);
1337     }
1338     if (SVOp0 != Op0 || SVOp1 != Op1)
1339       return false;
1340 
1341     // Calculate the reconstruction mask for this shuffle, as the mask needed to
1342     // take the packed values from Op0/Op1 and reconstructing to the original
1343     // order.
1344     SmallVector<int> ReconstructMask;
1345     for (unsigned I = 0; I < Mask.size(); I++) {
1346       if (Mask[I] < 0) {
1347         ReconstructMask.push_back(-1);
1348       } else if (Mask[I] < static_cast<int>(NumElts)) {
1349         MaxV1Elt = std::max(MaxV1Elt, Mask[I]);
1350         auto It = find_if(V1, [&](const std::pair<int, int> &A) {
1351           return Mask[I] == A.first;
1352         });
1353         if (It != V1.end())
1354           ReconstructMask.push_back(It - V1.begin());
1355         else {
1356           ReconstructMask.push_back(V1.size());
1357           V1.emplace_back(Mask[I], V1.size());
1358         }
1359       } else {
1360         MaxV2Elt = std::max<int>(MaxV2Elt, Mask[I] - NumElts);
1361         auto It = find_if(V2, [&](const std::pair<int, int> &A) {
1362           return Mask[I] - static_cast<int>(NumElts) == A.first;
1363         });
1364         if (It != V2.end())
1365           ReconstructMask.push_back(NumElts + It - V2.begin());
1366         else {
1367           ReconstructMask.push_back(NumElts + V2.size());
1368           V2.emplace_back(Mask[I] - NumElts, NumElts + V2.size());
1369         }
1370       }
1371     }
1372 
1373     // For reductions, we know that the lane ordering out doesn't alter the
1374     // result. In-order can help simplify the shuffle away.
1375     if (FromReduction)
1376       sort(ReconstructMask);
1377     OrigReconstructMasks.push_back(std::move(ReconstructMask));
1378   }
1379 
1380   // If the Maximum element used from V1 and V2 are not larger than the new
1381   // vectors, the vectors are already packes and performing the optimization
1382   // again will likely not help any further. This also prevents us from getting
1383   // stuck in a cycle in case the costs do not also rule it out.
1384   if (V1.empty() || V2.empty() ||
1385       (MaxV1Elt == static_cast<int>(V1.size()) - 1 &&
1386        MaxV2Elt == static_cast<int>(V2.size()) - 1))
1387     return false;
1388 
1389   // GetBaseMaskValue takes one of the inputs, which may either be a shuffle, a
1390   // shuffle of another shuffle, or not a shuffle (that is treated like a
1391   // identity shuffle).
1392   auto GetBaseMaskValue = [&](Instruction *I, int M) {
1393     auto *SV = dyn_cast<ShuffleVectorInst>(I);
1394     if (!SV)
1395       return M;
1396     if (isa<UndefValue>(SV->getOperand(1)))
1397       if (auto *SSV = dyn_cast<ShuffleVectorInst>(SV->getOperand(0)))
1398         if (InputShuffles.contains(SSV))
1399           return SSV->getMaskValue(SV->getMaskValue(M));
1400     return SV->getMaskValue(M);
1401   };
1402 
1403   // Attempt to sort the inputs my ascending mask values to make simpler input
1404   // shuffles and push complex shuffles down to the uses. We sort on the first
1405   // of the two input shuffle orders, to try and get at least one input into a
1406   // nice order.
1407   auto SortBase = [&](Instruction *A, std::pair<int, int> X,
1408                       std::pair<int, int> Y) {
1409     int MXA = GetBaseMaskValue(A, X.first);
1410     int MYA = GetBaseMaskValue(A, Y.first);
1411     return MXA < MYA;
1412   };
1413   stable_sort(V1, [&](std::pair<int, int> A, std::pair<int, int> B) {
1414     return SortBase(SVI0A, A, B);
1415   });
1416   stable_sort(V2, [&](std::pair<int, int> A, std::pair<int, int> B) {
1417     return SortBase(SVI1A, A, B);
1418   });
1419   // Calculate our ReconstructMasks from the OrigReconstructMasks and the
1420   // modified order of the input shuffles.
1421   SmallVector<SmallVector<int>> ReconstructMasks;
1422   for (auto Mask : OrigReconstructMasks) {
1423     SmallVector<int> ReconstructMask;
1424     for (int M : Mask) {
1425       auto FindIndex = [](const SmallVector<std::pair<int, int>> &V, int M) {
1426         auto It = find_if(V, [M](auto A) { return A.second == M; });
1427         assert(It != V.end() && "Expected all entries in Mask");
1428         return std::distance(V.begin(), It);
1429       };
1430       if (M < 0)
1431         ReconstructMask.push_back(-1);
1432       else if (M < static_cast<int>(NumElts)) {
1433         ReconstructMask.push_back(FindIndex(V1, M));
1434       } else {
1435         ReconstructMask.push_back(NumElts + FindIndex(V2, M));
1436       }
1437     }
1438     ReconstructMasks.push_back(std::move(ReconstructMask));
1439   }
1440 
1441   // Calculate the masks needed for the new input shuffles, which get padded
1442   // with undef
1443   SmallVector<int> V1A, V1B, V2A, V2B;
1444   for (unsigned I = 0; I < V1.size(); I++) {
1445     V1A.push_back(GetBaseMaskValue(SVI0A, V1[I].first));
1446     V1B.push_back(GetBaseMaskValue(SVI0B, V1[I].first));
1447   }
1448   for (unsigned I = 0; I < V2.size(); I++) {
1449     V2A.push_back(GetBaseMaskValue(SVI1A, V2[I].first));
1450     V2B.push_back(GetBaseMaskValue(SVI1B, V2[I].first));
1451   }
1452   while (V1A.size() < NumElts) {
1453     V1A.push_back(UndefMaskElem);
1454     V1B.push_back(UndefMaskElem);
1455   }
1456   while (V2A.size() < NumElts) {
1457     V2A.push_back(UndefMaskElem);
1458     V2B.push_back(UndefMaskElem);
1459   }
1460 
1461   auto AddShuffleCost = [&](InstructionCost C, Instruction *I) {
1462     auto *SV = dyn_cast<ShuffleVectorInst>(I);
1463     if (!SV)
1464       return C;
1465     return C + TTI.getShuffleCost(isa<UndefValue>(SV->getOperand(1))
1466                                       ? TTI::SK_PermuteSingleSrc
1467                                       : TTI::SK_PermuteTwoSrc,
1468                                   VT, SV->getShuffleMask());
1469   };
1470   auto AddShuffleMaskCost = [&](InstructionCost C, ArrayRef<int> Mask) {
1471     return C + TTI.getShuffleCost(TTI::SK_PermuteTwoSrc, VT, Mask);
1472   };
1473 
1474   // Get the costs of the shuffles + binops before and after with the new
1475   // shuffle masks.
1476   InstructionCost CostBefore =
1477       TTI.getArithmeticInstrCost(Op0->getOpcode(), VT) +
1478       TTI.getArithmeticInstrCost(Op1->getOpcode(), VT);
1479   CostBefore += std::accumulate(Shuffles.begin(), Shuffles.end(),
1480                                 InstructionCost(0), AddShuffleCost);
1481   CostBefore += std::accumulate(InputShuffles.begin(), InputShuffles.end(),
1482                                 InstructionCost(0), AddShuffleCost);
1483 
1484   // The new binops will be unused for lanes past the used shuffle lengths.
1485   // These types attempt to get the correct cost for that from the target.
1486   FixedVectorType *Op0SmallVT =
1487       FixedVectorType::get(VT->getScalarType(), V1.size());
1488   FixedVectorType *Op1SmallVT =
1489       FixedVectorType::get(VT->getScalarType(), V2.size());
1490   InstructionCost CostAfter =
1491       TTI.getArithmeticInstrCost(Op0->getOpcode(), Op0SmallVT) +
1492       TTI.getArithmeticInstrCost(Op1->getOpcode(), Op1SmallVT);
1493   CostAfter += std::accumulate(ReconstructMasks.begin(), ReconstructMasks.end(),
1494                                InstructionCost(0), AddShuffleMaskCost);
1495   std::set<SmallVector<int>> OutputShuffleMasks({V1A, V1B, V2A, V2B});
1496   CostAfter +=
1497       std::accumulate(OutputShuffleMasks.begin(), OutputShuffleMasks.end(),
1498                       InstructionCost(0), AddShuffleMaskCost);
1499 
1500   LLVM_DEBUG(dbgs() << "Found a binop select shuffle pattern: " << I << "\n");
1501   LLVM_DEBUG(dbgs() << "  CostBefore: " << CostBefore
1502                     << " vs CostAfter: " << CostAfter << "\n");
1503   if (CostBefore <= CostAfter)
1504     return false;
1505 
1506   // The cost model has passed, create the new instructions.
1507   auto GetShuffleOperand = [&](Instruction *I, unsigned Op) -> Value * {
1508     auto *SV = dyn_cast<ShuffleVectorInst>(I);
1509     if (!SV)
1510       return I;
1511     if (isa<UndefValue>(SV->getOperand(1)))
1512       if (auto *SSV = dyn_cast<ShuffleVectorInst>(SV->getOperand(0)))
1513         if (InputShuffles.contains(SSV))
1514           return SSV->getOperand(Op);
1515     return SV->getOperand(Op);
1516   };
1517   Builder.SetInsertPoint(SVI0A->getNextNode());
1518   Value *NSV0A = Builder.CreateShuffleVector(GetShuffleOperand(SVI0A, 0),
1519                                              GetShuffleOperand(SVI0A, 1), V1A);
1520   Builder.SetInsertPoint(SVI0B->getNextNode());
1521   Value *NSV0B = Builder.CreateShuffleVector(GetShuffleOperand(SVI0B, 0),
1522                                              GetShuffleOperand(SVI0B, 1), V1B);
1523   Builder.SetInsertPoint(SVI1A->getNextNode());
1524   Value *NSV1A = Builder.CreateShuffleVector(GetShuffleOperand(SVI1A, 0),
1525                                              GetShuffleOperand(SVI1A, 1), V2A);
1526   Builder.SetInsertPoint(SVI1B->getNextNode());
1527   Value *NSV1B = Builder.CreateShuffleVector(GetShuffleOperand(SVI1B, 0),
1528                                              GetShuffleOperand(SVI1B, 1), V2B);
1529   Builder.SetInsertPoint(Op0);
1530   Value *NOp0 = Builder.CreateBinOp((Instruction::BinaryOps)Op0->getOpcode(),
1531                                     NSV0A, NSV0B);
1532   if (auto *I = dyn_cast<Instruction>(NOp0))
1533     I->copyIRFlags(Op0, true);
1534   Builder.SetInsertPoint(Op1);
1535   Value *NOp1 = Builder.CreateBinOp((Instruction::BinaryOps)Op1->getOpcode(),
1536                                     NSV1A, NSV1B);
1537   if (auto *I = dyn_cast<Instruction>(NOp1))
1538     I->copyIRFlags(Op1, true);
1539 
1540   for (int S = 0, E = ReconstructMasks.size(); S != E; S++) {
1541     Builder.SetInsertPoint(Shuffles[S]);
1542     Value *NSV = Builder.CreateShuffleVector(NOp0, NOp1, ReconstructMasks[S]);
1543     replaceValue(*Shuffles[S], *NSV);
1544   }
1545 
1546   Worklist.pushValue(NSV0A);
1547   Worklist.pushValue(NSV0B);
1548   Worklist.pushValue(NSV1A);
1549   Worklist.pushValue(NSV1B);
1550   for (auto *S : Shuffles)
1551     Worklist.add(S);
1552   return true;
1553 }
1554 
1555 /// This is the entry point for all transforms. Pass manager differences are
1556 /// handled in the callers of this function.
1557 bool VectorCombine::run() {
1558   if (DisableVectorCombine)
1559     return false;
1560 
1561   // Don't attempt vectorization if the target does not support vectors.
1562   if (!TTI.getNumberOfRegisters(TTI.getRegisterClassForType(/*Vector*/ true)))
1563     return false;
1564 
1565   bool MadeChange = false;
1566   auto FoldInst = [this, &MadeChange](Instruction &I) {
1567     Builder.SetInsertPoint(&I);
1568     if (!ScalarizationOnly) {
1569       MadeChange |= vectorizeLoadInsert(I);
1570       MadeChange |= foldExtractExtract(I);
1571       MadeChange |= foldBitcastShuf(I);
1572       MadeChange |= foldExtractedCmps(I);
1573       MadeChange |= foldShuffleOfBinops(I);
1574       MadeChange |= foldShuffleFromReductions(I);
1575       MadeChange |= foldSelectShuffle(I);
1576     }
1577     MadeChange |= scalarizeBinopOrCmp(I);
1578     MadeChange |= scalarizeLoadExtract(I);
1579     MadeChange |= foldSingleElementStore(I);
1580   };
1581   for (BasicBlock &BB : F) {
1582     // Ignore unreachable basic blocks.
1583     if (!DT.isReachableFromEntry(&BB))
1584       continue;
1585     // Use early increment range so that we can erase instructions in loop.
1586     for (Instruction &I : make_early_inc_range(BB)) {
1587       if (I.isDebugOrPseudoInst())
1588         continue;
1589       FoldInst(I);
1590     }
1591   }
1592 
1593   while (!Worklist.isEmpty()) {
1594     Instruction *I = Worklist.removeOne();
1595     if (!I)
1596       continue;
1597 
1598     if (isInstructionTriviallyDead(I)) {
1599       eraseInstruction(*I);
1600       continue;
1601     }
1602 
1603     FoldInst(*I);
1604   }
1605 
1606   return MadeChange;
1607 }
1608 
1609 // Pass manager boilerplate below here.
1610 
1611 namespace {
1612 class VectorCombineLegacyPass : public FunctionPass {
1613 public:
1614   static char ID;
1615   VectorCombineLegacyPass() : FunctionPass(ID) {
1616     initializeVectorCombineLegacyPassPass(*PassRegistry::getPassRegistry());
1617   }
1618 
1619   void getAnalysisUsage(AnalysisUsage &AU) const override {
1620     AU.addRequired<AssumptionCacheTracker>();
1621     AU.addRequired<DominatorTreeWrapperPass>();
1622     AU.addRequired<TargetTransformInfoWrapperPass>();
1623     AU.addRequired<AAResultsWrapperPass>();
1624     AU.setPreservesCFG();
1625     AU.addPreserved<DominatorTreeWrapperPass>();
1626     AU.addPreserved<GlobalsAAWrapperPass>();
1627     AU.addPreserved<AAResultsWrapperPass>();
1628     AU.addPreserved<BasicAAWrapperPass>();
1629     FunctionPass::getAnalysisUsage(AU);
1630   }
1631 
1632   bool runOnFunction(Function &F) override {
1633     if (skipFunction(F))
1634       return false;
1635     auto &AC = getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F);
1636     auto &TTI = getAnalysis<TargetTransformInfoWrapperPass>().getTTI(F);
1637     auto &DT = getAnalysis<DominatorTreeWrapperPass>().getDomTree();
1638     auto &AA = getAnalysis<AAResultsWrapperPass>().getAAResults();
1639     VectorCombine Combiner(F, TTI, DT, AA, AC, false);
1640     return Combiner.run();
1641   }
1642 };
1643 } // namespace
1644 
1645 char VectorCombineLegacyPass::ID = 0;
1646 INITIALIZE_PASS_BEGIN(VectorCombineLegacyPass, "vector-combine",
1647                       "Optimize scalar/vector ops", false,
1648                       false)
1649 INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker)
1650 INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)
1651 INITIALIZE_PASS_END(VectorCombineLegacyPass, "vector-combine",
1652                     "Optimize scalar/vector ops", false, false)
1653 Pass *llvm::createVectorCombinePass() {
1654   return new VectorCombineLegacyPass();
1655 }
1656 
1657 PreservedAnalyses VectorCombinePass::run(Function &F,
1658                                          FunctionAnalysisManager &FAM) {
1659   auto &AC = FAM.getResult<AssumptionAnalysis>(F);
1660   TargetTransformInfo &TTI = FAM.getResult<TargetIRAnalysis>(F);
1661   DominatorTree &DT = FAM.getResult<DominatorTreeAnalysis>(F);
1662   AAResults &AA = FAM.getResult<AAManager>(F);
1663   VectorCombine Combiner(F, TTI, DT, AA, AC, ScalarizationOnly);
1664   if (!Combiner.run())
1665     return PreservedAnalyses::all();
1666   PreservedAnalyses PA;
1667   PA.preserveSet<CFGAnalyses>();
1668   return PA;
1669 }
1670