1 //===------- VectorCombine.cpp - Optimize partial vector operations -------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This pass optimizes scalar/vector interactions using target cost models. The
10 // transforms implemented here may not fit in traditional loop-based or SLP
11 // vectorization passes.
12 //
13 //===----------------------------------------------------------------------===//
14 
15 #include "llvm/Transforms/Vectorize/VectorCombine.h"
16 #include "llvm/ADT/Statistic.h"
17 #include "llvm/Analysis/AssumptionCache.h"
18 #include "llvm/Analysis/BasicAliasAnalysis.h"
19 #include "llvm/Analysis/GlobalsModRef.h"
20 #include "llvm/Analysis/Loads.h"
21 #include "llvm/Analysis/TargetTransformInfo.h"
22 #include "llvm/Analysis/ValueTracking.h"
23 #include "llvm/Analysis/VectorUtils.h"
24 #include "llvm/IR/Dominators.h"
25 #include "llvm/IR/Function.h"
26 #include "llvm/IR/IRBuilder.h"
27 #include "llvm/IR/PatternMatch.h"
28 #include "llvm/InitializePasses.h"
29 #include "llvm/Pass.h"
30 #include "llvm/Support/CommandLine.h"
31 #include "llvm/Transforms/Utils/Local.h"
32 #include "llvm/Transforms/Vectorize.h"
33 
34 #define DEBUG_TYPE "vector-combine"
35 #include "llvm/Transforms/Utils/InstructionWorklist.h"
36 
37 using namespace llvm;
38 using namespace llvm::PatternMatch;
39 
40 STATISTIC(NumVecLoad, "Number of vector loads formed");
41 STATISTIC(NumVecCmp, "Number of vector compares formed");
42 STATISTIC(NumVecBO, "Number of vector binops formed");
43 STATISTIC(NumVecCmpBO, "Number of vector compare + binop formed");
44 STATISTIC(NumShufOfBitcast, "Number of shuffles moved after bitcast");
45 STATISTIC(NumScalarBO, "Number of scalar binops formed");
46 STATISTIC(NumScalarCmp, "Number of scalar compares formed");
47 
48 static cl::opt<bool> DisableVectorCombine(
49     "disable-vector-combine", cl::init(false), cl::Hidden,
50     cl::desc("Disable all vector combine transforms"));
51 
52 static cl::opt<bool> DisableBinopExtractShuffle(
53     "disable-binop-extract-shuffle", cl::init(false), cl::Hidden,
54     cl::desc("Disable binop extract to shuffle transforms"));
55 
56 static cl::opt<unsigned> MaxInstrsToScan(
57     "vector-combine-max-scan-instrs", cl::init(30), cl::Hidden,
58     cl::desc("Max number of instructions to scan for vector combining."));
59 
60 static const unsigned InvalidIndex = std::numeric_limits<unsigned>::max();
61 
62 namespace {
63 class VectorCombine {
64 public:
65   VectorCombine(Function &F, const TargetTransformInfo &TTI,
66                 const DominatorTree &DT, AAResults &AA, AssumptionCache &AC,
67                 bool ScalarizationOnly)
68       : F(F), Builder(F.getContext()), TTI(TTI), DT(DT), AA(AA), AC(AC),
69         ScalarizationOnly(ScalarizationOnly) {}
70 
71   bool run();
72 
73 private:
74   Function &F;
75   IRBuilder<> Builder;
76   const TargetTransformInfo &TTI;
77   const DominatorTree &DT;
78   AAResults &AA;
79   AssumptionCache &AC;
80 
81   /// If true only perform scalarization combines and do not introduce new
82   /// vector operations.
83   bool ScalarizationOnly;
84 
85   InstructionWorklist Worklist;
86 
87   bool vectorizeLoadInsert(Instruction &I);
88   ExtractElementInst *getShuffleExtract(ExtractElementInst *Ext0,
89                                         ExtractElementInst *Ext1,
90                                         unsigned PreferredExtractIndex) const;
91   bool isExtractExtractCheap(ExtractElementInst *Ext0, ExtractElementInst *Ext1,
92                              const Instruction &I,
93                              ExtractElementInst *&ConvertToShuffle,
94                              unsigned PreferredExtractIndex);
95   void foldExtExtCmp(ExtractElementInst *Ext0, ExtractElementInst *Ext1,
96                      Instruction &I);
97   void foldExtExtBinop(ExtractElementInst *Ext0, ExtractElementInst *Ext1,
98                        Instruction &I);
99   bool foldExtractExtract(Instruction &I);
100   bool foldBitcastShuf(Instruction &I);
101   bool scalarizeBinopOrCmp(Instruction &I);
102   bool foldExtractedCmps(Instruction &I);
103   bool foldSingleElementStore(Instruction &I);
104   bool scalarizeLoadExtract(Instruction &I);
105 
106   void replaceValue(Value &Old, Value &New) {
107     Old.replaceAllUsesWith(&New);
108     New.takeName(&Old);
109     if (auto *NewI = dyn_cast<Instruction>(&New)) {
110       Worklist.pushUsersToWorkList(*NewI);
111       Worklist.pushValue(NewI);
112     }
113     Worklist.pushValue(&Old);
114   }
115 
116   void eraseInstruction(Instruction &I) {
117     for (Value *Op : I.operands())
118       Worklist.pushValue(Op);
119     Worklist.remove(&I);
120     I.eraseFromParent();
121   }
122 };
123 } // namespace
124 
125 bool VectorCombine::vectorizeLoadInsert(Instruction &I) {
126   // Match insert into fixed vector of scalar value.
127   // TODO: Handle non-zero insert index.
128   auto *Ty = dyn_cast<FixedVectorType>(I.getType());
129   Value *Scalar;
130   if (!Ty || !match(&I, m_InsertElt(m_Undef(), m_Value(Scalar), m_ZeroInt())) ||
131       !Scalar->hasOneUse())
132     return false;
133 
134   // Optionally match an extract from another vector.
135   Value *X;
136   bool HasExtract = match(Scalar, m_ExtractElt(m_Value(X), m_ZeroInt()));
137   if (!HasExtract)
138     X = Scalar;
139 
140   // Match source value as load of scalar or vector.
141   // Do not vectorize scalar load (widening) if atomic/volatile or under
142   // asan/hwasan/memtag/tsan. The widened load may load data from dirty regions
143   // or create data races non-existent in the source.
144   auto *Load = dyn_cast<LoadInst>(X);
145   if (!Load || !Load->isSimple() || !Load->hasOneUse() ||
146       Load->getFunction()->hasFnAttribute(Attribute::SanitizeMemTag) ||
147       mustSuppressSpeculation(*Load))
148     return false;
149 
150   const DataLayout &DL = I.getModule()->getDataLayout();
151   Value *SrcPtr = Load->getPointerOperand()->stripPointerCasts();
152   assert(isa<PointerType>(SrcPtr->getType()) && "Expected a pointer type");
153 
154   // If original AS != Load's AS, we can't bitcast the original pointer and have
155   // to use Load's operand instead. Ideally we would want to strip pointer casts
156   // without changing AS, but there's no API to do that ATM.
157   unsigned AS = Load->getPointerAddressSpace();
158   if (AS != SrcPtr->getType()->getPointerAddressSpace())
159     SrcPtr = Load->getPointerOperand();
160 
161   // We are potentially transforming byte-sized (8-bit) memory accesses, so make
162   // sure we have all of our type-based constraints in place for this target.
163   Type *ScalarTy = Scalar->getType();
164   uint64_t ScalarSize = ScalarTy->getPrimitiveSizeInBits();
165   unsigned MinVectorSize = TTI.getMinVectorRegisterBitWidth();
166   if (!ScalarSize || !MinVectorSize || MinVectorSize % ScalarSize != 0 ||
167       ScalarSize % 8 != 0)
168     return false;
169 
170   // Check safety of replacing the scalar load with a larger vector load.
171   // We use minimal alignment (maximum flexibility) because we only care about
172   // the dereferenceable region. When calculating cost and creating a new op,
173   // we may use a larger value based on alignment attributes.
174   unsigned MinVecNumElts = MinVectorSize / ScalarSize;
175   auto *MinVecTy = VectorType::get(ScalarTy, MinVecNumElts, false);
176   unsigned OffsetEltIndex = 0;
177   Align Alignment = Load->getAlign();
178   if (!isSafeToLoadUnconditionally(SrcPtr, MinVecTy, Align(1), DL, Load, &DT)) {
179     // It is not safe to load directly from the pointer, but we can still peek
180     // through gep offsets and check if it safe to load from a base address with
181     // updated alignment. If it is, we can shuffle the element(s) into place
182     // after loading.
183     unsigned OffsetBitWidth = DL.getIndexTypeSizeInBits(SrcPtr->getType());
184     APInt Offset(OffsetBitWidth, 0);
185     SrcPtr = SrcPtr->stripAndAccumulateInBoundsConstantOffsets(DL, Offset);
186 
187     // We want to shuffle the result down from a high element of a vector, so
188     // the offset must be positive.
189     if (Offset.isNegative())
190       return false;
191 
192     // The offset must be a multiple of the scalar element to shuffle cleanly
193     // in the element's size.
194     uint64_t ScalarSizeInBytes = ScalarSize / 8;
195     if (Offset.urem(ScalarSizeInBytes) != 0)
196       return false;
197 
198     // If we load MinVecNumElts, will our target element still be loaded?
199     OffsetEltIndex = Offset.udiv(ScalarSizeInBytes).getZExtValue();
200     if (OffsetEltIndex >= MinVecNumElts)
201       return false;
202 
203     if (!isSafeToLoadUnconditionally(SrcPtr, MinVecTy, Align(1), DL, Load, &DT))
204       return false;
205 
206     // Update alignment with offset value. Note that the offset could be negated
207     // to more accurately represent "(new) SrcPtr - Offset = (old) SrcPtr", but
208     // negation does not change the result of the alignment calculation.
209     Alignment = commonAlignment(Alignment, Offset.getZExtValue());
210   }
211 
212   // Original pattern: insertelt undef, load [free casts of] PtrOp, 0
213   // Use the greater of the alignment on the load or its source pointer.
214   Alignment = std::max(SrcPtr->getPointerAlignment(DL), Alignment);
215   Type *LoadTy = Load->getType();
216   InstructionCost OldCost =
217       TTI.getMemoryOpCost(Instruction::Load, LoadTy, Alignment, AS);
218   APInt DemandedElts = APInt::getOneBitSet(MinVecNumElts, 0);
219   OldCost += TTI.getScalarizationOverhead(MinVecTy, DemandedElts,
220                                           /* Insert */ true, HasExtract);
221 
222   // New pattern: load VecPtr
223   InstructionCost NewCost =
224       TTI.getMemoryOpCost(Instruction::Load, MinVecTy, Alignment, AS);
225   // Optionally, we are shuffling the loaded vector element(s) into place.
226   // For the mask set everything but element 0 to undef to prevent poison from
227   // propagating from the extra loaded memory. This will also optionally
228   // shrink/grow the vector from the loaded size to the output size.
229   // We assume this operation has no cost in codegen if there was no offset.
230   // Note that we could use freeze to avoid poison problems, but then we might
231   // still need a shuffle to change the vector size.
232   unsigned OutputNumElts = Ty->getNumElements();
233   SmallVector<int, 16> Mask(OutputNumElts, UndefMaskElem);
234   assert(OffsetEltIndex < MinVecNumElts && "Address offset too big");
235   Mask[0] = OffsetEltIndex;
236   if (OffsetEltIndex)
237     NewCost += TTI.getShuffleCost(TTI::SK_PermuteSingleSrc, MinVecTy, Mask);
238 
239   // We can aggressively convert to the vector form because the backend can
240   // invert this transform if it does not result in a performance win.
241   if (OldCost < NewCost || !NewCost.isValid())
242     return false;
243 
244   // It is safe and potentially profitable to load a vector directly:
245   // inselt undef, load Scalar, 0 --> load VecPtr
246   IRBuilder<> Builder(Load);
247   Value *CastedPtr = Builder.CreateBitCast(SrcPtr, MinVecTy->getPointerTo(AS));
248   Value *VecLd = Builder.CreateAlignedLoad(MinVecTy, CastedPtr, Alignment);
249   VecLd = Builder.CreateShuffleVector(VecLd, Mask);
250 
251   replaceValue(I, *VecLd);
252   ++NumVecLoad;
253   return true;
254 }
255 
256 /// Determine which, if any, of the inputs should be replaced by a shuffle
257 /// followed by extract from a different index.
258 ExtractElementInst *VectorCombine::getShuffleExtract(
259     ExtractElementInst *Ext0, ExtractElementInst *Ext1,
260     unsigned PreferredExtractIndex = InvalidIndex) const {
261   assert(isa<ConstantInt>(Ext0->getIndexOperand()) &&
262          isa<ConstantInt>(Ext1->getIndexOperand()) &&
263          "Expected constant extract indexes");
264 
265   unsigned Index0 = cast<ConstantInt>(Ext0->getIndexOperand())->getZExtValue();
266   unsigned Index1 = cast<ConstantInt>(Ext1->getIndexOperand())->getZExtValue();
267 
268   // If the extract indexes are identical, no shuffle is needed.
269   if (Index0 == Index1)
270     return nullptr;
271 
272   Type *VecTy = Ext0->getVectorOperand()->getType();
273   assert(VecTy == Ext1->getVectorOperand()->getType() && "Need matching types");
274   InstructionCost Cost0 =
275       TTI.getVectorInstrCost(Ext0->getOpcode(), VecTy, Index0);
276   InstructionCost Cost1 =
277       TTI.getVectorInstrCost(Ext1->getOpcode(), VecTy, Index1);
278 
279   // If both costs are invalid no shuffle is needed
280   if (!Cost0.isValid() && !Cost1.isValid())
281     return nullptr;
282 
283   // We are extracting from 2 different indexes, so one operand must be shuffled
284   // before performing a vector operation and/or extract. The more expensive
285   // extract will be replaced by a shuffle.
286   if (Cost0 > Cost1)
287     return Ext0;
288   if (Cost1 > Cost0)
289     return Ext1;
290 
291   // If the costs are equal and there is a preferred extract index, shuffle the
292   // opposite operand.
293   if (PreferredExtractIndex == Index0)
294     return Ext1;
295   if (PreferredExtractIndex == Index1)
296     return Ext0;
297 
298   // Otherwise, replace the extract with the higher index.
299   return Index0 > Index1 ? Ext0 : Ext1;
300 }
301 
302 /// Compare the relative costs of 2 extracts followed by scalar operation vs.
303 /// vector operation(s) followed by extract. Return true if the existing
304 /// instructions are cheaper than a vector alternative. Otherwise, return false
305 /// and if one of the extracts should be transformed to a shufflevector, set
306 /// \p ConvertToShuffle to that extract instruction.
307 bool VectorCombine::isExtractExtractCheap(ExtractElementInst *Ext0,
308                                           ExtractElementInst *Ext1,
309                                           const Instruction &I,
310                                           ExtractElementInst *&ConvertToShuffle,
311                                           unsigned PreferredExtractIndex) {
312   assert(isa<ConstantInt>(Ext0->getOperand(1)) &&
313          isa<ConstantInt>(Ext1->getOperand(1)) &&
314          "Expected constant extract indexes");
315   unsigned Opcode = I.getOpcode();
316   Type *ScalarTy = Ext0->getType();
317   auto *VecTy = cast<VectorType>(Ext0->getOperand(0)->getType());
318   InstructionCost ScalarOpCost, VectorOpCost;
319 
320   // Get cost estimates for scalar and vector versions of the operation.
321   bool IsBinOp = Instruction::isBinaryOp(Opcode);
322   if (IsBinOp) {
323     ScalarOpCost = TTI.getArithmeticInstrCost(Opcode, ScalarTy);
324     VectorOpCost = TTI.getArithmeticInstrCost(Opcode, VecTy);
325   } else {
326     assert((Opcode == Instruction::ICmp || Opcode == Instruction::FCmp) &&
327            "Expected a compare");
328     CmpInst::Predicate Pred = cast<CmpInst>(I).getPredicate();
329     ScalarOpCost = TTI.getCmpSelInstrCost(
330         Opcode, ScalarTy, CmpInst::makeCmpResultType(ScalarTy), Pred);
331     VectorOpCost = TTI.getCmpSelInstrCost(
332         Opcode, VecTy, CmpInst::makeCmpResultType(VecTy), Pred);
333   }
334 
335   // Get cost estimates for the extract elements. These costs will factor into
336   // both sequences.
337   unsigned Ext0Index = cast<ConstantInt>(Ext0->getOperand(1))->getZExtValue();
338   unsigned Ext1Index = cast<ConstantInt>(Ext1->getOperand(1))->getZExtValue();
339 
340   InstructionCost Extract0Cost =
341       TTI.getVectorInstrCost(Instruction::ExtractElement, VecTy, Ext0Index);
342   InstructionCost Extract1Cost =
343       TTI.getVectorInstrCost(Instruction::ExtractElement, VecTy, Ext1Index);
344 
345   // A more expensive extract will always be replaced by a splat shuffle.
346   // For example, if Ext0 is more expensive:
347   // opcode (extelt V0, Ext0), (ext V1, Ext1) -->
348   // extelt (opcode (splat V0, Ext0), V1), Ext1
349   // TODO: Evaluate whether that always results in lowest cost. Alternatively,
350   //       check the cost of creating a broadcast shuffle and shuffling both
351   //       operands to element 0.
352   InstructionCost CheapExtractCost = std::min(Extract0Cost, Extract1Cost);
353 
354   // Extra uses of the extracts mean that we include those costs in the
355   // vector total because those instructions will not be eliminated.
356   InstructionCost OldCost, NewCost;
357   if (Ext0->getOperand(0) == Ext1->getOperand(0) && Ext0Index == Ext1Index) {
358     // Handle a special case. If the 2 extracts are identical, adjust the
359     // formulas to account for that. The extra use charge allows for either the
360     // CSE'd pattern or an unoptimized form with identical values:
361     // opcode (extelt V, C), (extelt V, C) --> extelt (opcode V, V), C
362     bool HasUseTax = Ext0 == Ext1 ? !Ext0->hasNUses(2)
363                                   : !Ext0->hasOneUse() || !Ext1->hasOneUse();
364     OldCost = CheapExtractCost + ScalarOpCost;
365     NewCost = VectorOpCost + CheapExtractCost + HasUseTax * CheapExtractCost;
366   } else {
367     // Handle the general case. Each extract is actually a different value:
368     // opcode (extelt V0, C0), (extelt V1, C1) --> extelt (opcode V0, V1), C
369     OldCost = Extract0Cost + Extract1Cost + ScalarOpCost;
370     NewCost = VectorOpCost + CheapExtractCost +
371               !Ext0->hasOneUse() * Extract0Cost +
372               !Ext1->hasOneUse() * Extract1Cost;
373   }
374 
375   ConvertToShuffle = getShuffleExtract(Ext0, Ext1, PreferredExtractIndex);
376   if (ConvertToShuffle) {
377     if (IsBinOp && DisableBinopExtractShuffle)
378       return true;
379 
380     // If we are extracting from 2 different indexes, then one operand must be
381     // shuffled before performing the vector operation. The shuffle mask is
382     // undefined except for 1 lane that is being translated to the remaining
383     // extraction lane. Therefore, it is a splat shuffle. Ex:
384     // ShufMask = { undef, undef, 0, undef }
385     // TODO: The cost model has an option for a "broadcast" shuffle
386     //       (splat-from-element-0), but no option for a more general splat.
387     NewCost +=
388         TTI.getShuffleCost(TargetTransformInfo::SK_PermuteSingleSrc, VecTy);
389   }
390 
391   // Aggressively form a vector op if the cost is equal because the transform
392   // may enable further optimization.
393   // Codegen can reverse this transform (scalarize) if it was not profitable.
394   return OldCost < NewCost;
395 }
396 
397 /// Create a shuffle that translates (shifts) 1 element from the input vector
398 /// to a new element location.
399 static Value *createShiftShuffle(Value *Vec, unsigned OldIndex,
400                                  unsigned NewIndex, IRBuilder<> &Builder) {
401   // The shuffle mask is undefined except for 1 lane that is being translated
402   // to the new element index. Example for OldIndex == 2 and NewIndex == 0:
403   // ShufMask = { 2, undef, undef, undef }
404   auto *VecTy = cast<FixedVectorType>(Vec->getType());
405   SmallVector<int, 32> ShufMask(VecTy->getNumElements(), UndefMaskElem);
406   ShufMask[NewIndex] = OldIndex;
407   return Builder.CreateShuffleVector(Vec, ShufMask, "shift");
408 }
409 
410 /// Given an extract element instruction with constant index operand, shuffle
411 /// the source vector (shift the scalar element) to a NewIndex for extraction.
412 /// Return null if the input can be constant folded, so that we are not creating
413 /// unnecessary instructions.
414 static ExtractElementInst *translateExtract(ExtractElementInst *ExtElt,
415                                             unsigned NewIndex,
416                                             IRBuilder<> &Builder) {
417   // If the extract can be constant-folded, this code is unsimplified. Defer
418   // to other passes to handle that.
419   Value *X = ExtElt->getVectorOperand();
420   Value *C = ExtElt->getIndexOperand();
421   assert(isa<ConstantInt>(C) && "Expected a constant index operand");
422   if (isa<Constant>(X))
423     return nullptr;
424 
425   Value *Shuf = createShiftShuffle(X, cast<ConstantInt>(C)->getZExtValue(),
426                                    NewIndex, Builder);
427   return cast<ExtractElementInst>(Builder.CreateExtractElement(Shuf, NewIndex));
428 }
429 
430 /// Try to reduce extract element costs by converting scalar compares to vector
431 /// compares followed by extract.
432 /// cmp (ext0 V0, C), (ext1 V1, C)
433 void VectorCombine::foldExtExtCmp(ExtractElementInst *Ext0,
434                                   ExtractElementInst *Ext1, Instruction &I) {
435   assert(isa<CmpInst>(&I) && "Expected a compare");
436   assert(cast<ConstantInt>(Ext0->getIndexOperand())->getZExtValue() ==
437              cast<ConstantInt>(Ext1->getIndexOperand())->getZExtValue() &&
438          "Expected matching constant extract indexes");
439 
440   // cmp Pred (extelt V0, C), (extelt V1, C) --> extelt (cmp Pred V0, V1), C
441   ++NumVecCmp;
442   CmpInst::Predicate Pred = cast<CmpInst>(&I)->getPredicate();
443   Value *V0 = Ext0->getVectorOperand(), *V1 = Ext1->getVectorOperand();
444   Value *VecCmp = Builder.CreateCmp(Pred, V0, V1);
445   Value *NewExt = Builder.CreateExtractElement(VecCmp, Ext0->getIndexOperand());
446   replaceValue(I, *NewExt);
447 }
448 
449 /// Try to reduce extract element costs by converting scalar binops to vector
450 /// binops followed by extract.
451 /// bo (ext0 V0, C), (ext1 V1, C)
452 void VectorCombine::foldExtExtBinop(ExtractElementInst *Ext0,
453                                     ExtractElementInst *Ext1, Instruction &I) {
454   assert(isa<BinaryOperator>(&I) && "Expected a binary operator");
455   assert(cast<ConstantInt>(Ext0->getIndexOperand())->getZExtValue() ==
456              cast<ConstantInt>(Ext1->getIndexOperand())->getZExtValue() &&
457          "Expected matching constant extract indexes");
458 
459   // bo (extelt V0, C), (extelt V1, C) --> extelt (bo V0, V1), C
460   ++NumVecBO;
461   Value *V0 = Ext0->getVectorOperand(), *V1 = Ext1->getVectorOperand();
462   Value *VecBO =
463       Builder.CreateBinOp(cast<BinaryOperator>(&I)->getOpcode(), V0, V1);
464 
465   // All IR flags are safe to back-propagate because any potential poison
466   // created in unused vector elements is discarded by the extract.
467   if (auto *VecBOInst = dyn_cast<Instruction>(VecBO))
468     VecBOInst->copyIRFlags(&I);
469 
470   Value *NewExt = Builder.CreateExtractElement(VecBO, Ext0->getIndexOperand());
471   replaceValue(I, *NewExt);
472 }
473 
474 /// Match an instruction with extracted vector operands.
475 bool VectorCombine::foldExtractExtract(Instruction &I) {
476   // It is not safe to transform things like div, urem, etc. because we may
477   // create undefined behavior when executing those on unknown vector elements.
478   if (!isSafeToSpeculativelyExecute(&I))
479     return false;
480 
481   Instruction *I0, *I1;
482   CmpInst::Predicate Pred = CmpInst::BAD_ICMP_PREDICATE;
483   if (!match(&I, m_Cmp(Pred, m_Instruction(I0), m_Instruction(I1))) &&
484       !match(&I, m_BinOp(m_Instruction(I0), m_Instruction(I1))))
485     return false;
486 
487   Value *V0, *V1;
488   uint64_t C0, C1;
489   if (!match(I0, m_ExtractElt(m_Value(V0), m_ConstantInt(C0))) ||
490       !match(I1, m_ExtractElt(m_Value(V1), m_ConstantInt(C1))) ||
491       V0->getType() != V1->getType())
492     return false;
493 
494   // If the scalar value 'I' is going to be re-inserted into a vector, then try
495   // to create an extract to that same element. The extract/insert can be
496   // reduced to a "select shuffle".
497   // TODO: If we add a larger pattern match that starts from an insert, this
498   //       probably becomes unnecessary.
499   auto *Ext0 = cast<ExtractElementInst>(I0);
500   auto *Ext1 = cast<ExtractElementInst>(I1);
501   uint64_t InsertIndex = InvalidIndex;
502   if (I.hasOneUse())
503     match(I.user_back(),
504           m_InsertElt(m_Value(), m_Value(), m_ConstantInt(InsertIndex)));
505 
506   ExtractElementInst *ExtractToChange;
507   if (isExtractExtractCheap(Ext0, Ext1, I, ExtractToChange, InsertIndex))
508     return false;
509 
510   if (ExtractToChange) {
511     unsigned CheapExtractIdx = ExtractToChange == Ext0 ? C1 : C0;
512     ExtractElementInst *NewExtract =
513         translateExtract(ExtractToChange, CheapExtractIdx, Builder);
514     if (!NewExtract)
515       return false;
516     if (ExtractToChange == Ext0)
517       Ext0 = NewExtract;
518     else
519       Ext1 = NewExtract;
520   }
521 
522   if (Pred != CmpInst::BAD_ICMP_PREDICATE)
523     foldExtExtCmp(Ext0, Ext1, I);
524   else
525     foldExtExtBinop(Ext0, Ext1, I);
526 
527   Worklist.push(Ext0);
528   Worklist.push(Ext1);
529   return true;
530 }
531 
532 /// If this is a bitcast of a shuffle, try to bitcast the source vector to the
533 /// destination type followed by shuffle. This can enable further transforms by
534 /// moving bitcasts or shuffles together.
535 bool VectorCombine::foldBitcastShuf(Instruction &I) {
536   Value *V;
537   ArrayRef<int> Mask;
538   if (!match(&I, m_BitCast(
539                      m_OneUse(m_Shuffle(m_Value(V), m_Undef(), m_Mask(Mask))))))
540     return false;
541 
542   // 1) Do not fold bitcast shuffle for scalable type. First, shuffle cost for
543   // scalable type is unknown; Second, we cannot reason if the narrowed shuffle
544   // mask for scalable type is a splat or not.
545   // 2) Disallow non-vector casts and length-changing shuffles.
546   // TODO: We could allow any shuffle.
547   auto *DestTy = dyn_cast<FixedVectorType>(I.getType());
548   auto *SrcTy = dyn_cast<FixedVectorType>(V->getType());
549   if (!SrcTy || !DestTy || I.getOperand(0)->getType() != SrcTy)
550     return false;
551 
552   unsigned DestNumElts = DestTy->getNumElements();
553   unsigned SrcNumElts = SrcTy->getNumElements();
554   SmallVector<int, 16> NewMask;
555   if (SrcNumElts <= DestNumElts) {
556     // The bitcast is from wide to narrow/equal elements. The shuffle mask can
557     // always be expanded to the equivalent form choosing narrower elements.
558     assert(DestNumElts % SrcNumElts == 0 && "Unexpected shuffle mask");
559     unsigned ScaleFactor = DestNumElts / SrcNumElts;
560     narrowShuffleMaskElts(ScaleFactor, Mask, NewMask);
561   } else {
562     // The bitcast is from narrow elements to wide elements. The shuffle mask
563     // must choose consecutive elements to allow casting first.
564     assert(SrcNumElts % DestNumElts == 0 && "Unexpected shuffle mask");
565     unsigned ScaleFactor = SrcNumElts / DestNumElts;
566     if (!widenShuffleMaskElts(ScaleFactor, Mask, NewMask))
567       return false;
568   }
569 
570   // The new shuffle must not cost more than the old shuffle. The bitcast is
571   // moved ahead of the shuffle, so assume that it has the same cost as before.
572   InstructionCost DestCost = TTI.getShuffleCost(
573       TargetTransformInfo::SK_PermuteSingleSrc, DestTy, NewMask);
574   InstructionCost SrcCost =
575       TTI.getShuffleCost(TargetTransformInfo::SK_PermuteSingleSrc, SrcTy, Mask);
576   if (DestCost > SrcCost || !DestCost.isValid())
577     return false;
578 
579   // bitcast (shuf V, MaskC) --> shuf (bitcast V), MaskC'
580   ++NumShufOfBitcast;
581   Value *CastV = Builder.CreateBitCast(V, DestTy);
582   Value *Shuf = Builder.CreateShuffleVector(CastV, NewMask);
583   replaceValue(I, *Shuf);
584   return true;
585 }
586 
587 /// Match a vector binop or compare instruction with at least one inserted
588 /// scalar operand and convert to scalar binop/cmp followed by insertelement.
589 bool VectorCombine::scalarizeBinopOrCmp(Instruction &I) {
590   CmpInst::Predicate Pred = CmpInst::BAD_ICMP_PREDICATE;
591   Value *Ins0, *Ins1;
592   if (!match(&I, m_BinOp(m_Value(Ins0), m_Value(Ins1))) &&
593       !match(&I, m_Cmp(Pred, m_Value(Ins0), m_Value(Ins1))))
594     return false;
595 
596   // Do not convert the vector condition of a vector select into a scalar
597   // condition. That may cause problems for codegen because of differences in
598   // boolean formats and register-file transfers.
599   // TODO: Can we account for that in the cost model?
600   bool IsCmp = Pred != CmpInst::Predicate::BAD_ICMP_PREDICATE;
601   if (IsCmp)
602     for (User *U : I.users())
603       if (match(U, m_Select(m_Specific(&I), m_Value(), m_Value())))
604         return false;
605 
606   // Match against one or both scalar values being inserted into constant
607   // vectors:
608   // vec_op VecC0, (inselt VecC1, V1, Index)
609   // vec_op (inselt VecC0, V0, Index), VecC1
610   // vec_op (inselt VecC0, V0, Index), (inselt VecC1, V1, Index)
611   // TODO: Deal with mismatched index constants and variable indexes?
612   Constant *VecC0 = nullptr, *VecC1 = nullptr;
613   Value *V0 = nullptr, *V1 = nullptr;
614   uint64_t Index0 = 0, Index1 = 0;
615   if (!match(Ins0, m_InsertElt(m_Constant(VecC0), m_Value(V0),
616                                m_ConstantInt(Index0))) &&
617       !match(Ins0, m_Constant(VecC0)))
618     return false;
619   if (!match(Ins1, m_InsertElt(m_Constant(VecC1), m_Value(V1),
620                                m_ConstantInt(Index1))) &&
621       !match(Ins1, m_Constant(VecC1)))
622     return false;
623 
624   bool IsConst0 = !V0;
625   bool IsConst1 = !V1;
626   if (IsConst0 && IsConst1)
627     return false;
628   if (!IsConst0 && !IsConst1 && Index0 != Index1)
629     return false;
630 
631   // Bail for single insertion if it is a load.
632   // TODO: Handle this once getVectorInstrCost can cost for load/stores.
633   auto *I0 = dyn_cast_or_null<Instruction>(V0);
634   auto *I1 = dyn_cast_or_null<Instruction>(V1);
635   if ((IsConst0 && I1 && I1->mayReadFromMemory()) ||
636       (IsConst1 && I0 && I0->mayReadFromMemory()))
637     return false;
638 
639   uint64_t Index = IsConst0 ? Index1 : Index0;
640   Type *ScalarTy = IsConst0 ? V1->getType() : V0->getType();
641   Type *VecTy = I.getType();
642   assert(VecTy->isVectorTy() &&
643          (IsConst0 || IsConst1 || V0->getType() == V1->getType()) &&
644          (ScalarTy->isIntegerTy() || ScalarTy->isFloatingPointTy() ||
645           ScalarTy->isPointerTy()) &&
646          "Unexpected types for insert element into binop or cmp");
647 
648   unsigned Opcode = I.getOpcode();
649   InstructionCost ScalarOpCost, VectorOpCost;
650   if (IsCmp) {
651     CmpInst::Predicate Pred = cast<CmpInst>(I).getPredicate();
652     ScalarOpCost = TTI.getCmpSelInstrCost(
653         Opcode, ScalarTy, CmpInst::makeCmpResultType(ScalarTy), Pred);
654     VectorOpCost = TTI.getCmpSelInstrCost(
655         Opcode, VecTy, CmpInst::makeCmpResultType(VecTy), Pred);
656   } else {
657     ScalarOpCost = TTI.getArithmeticInstrCost(Opcode, ScalarTy);
658     VectorOpCost = TTI.getArithmeticInstrCost(Opcode, VecTy);
659   }
660 
661   // Get cost estimate for the insert element. This cost will factor into
662   // both sequences.
663   InstructionCost InsertCost =
664       TTI.getVectorInstrCost(Instruction::InsertElement, VecTy, Index);
665   InstructionCost OldCost =
666       (IsConst0 ? 0 : InsertCost) + (IsConst1 ? 0 : InsertCost) + VectorOpCost;
667   InstructionCost NewCost = ScalarOpCost + InsertCost +
668                             (IsConst0 ? 0 : !Ins0->hasOneUse() * InsertCost) +
669                             (IsConst1 ? 0 : !Ins1->hasOneUse() * InsertCost);
670 
671   // We want to scalarize unless the vector variant actually has lower cost.
672   if (OldCost < NewCost || !NewCost.isValid())
673     return false;
674 
675   // vec_op (inselt VecC0, V0, Index), (inselt VecC1, V1, Index) -->
676   // inselt NewVecC, (scalar_op V0, V1), Index
677   if (IsCmp)
678     ++NumScalarCmp;
679   else
680     ++NumScalarBO;
681 
682   // For constant cases, extract the scalar element, this should constant fold.
683   if (IsConst0)
684     V0 = ConstantExpr::getExtractElement(VecC0, Builder.getInt64(Index));
685   if (IsConst1)
686     V1 = ConstantExpr::getExtractElement(VecC1, Builder.getInt64(Index));
687 
688   Value *Scalar =
689       IsCmp ? Builder.CreateCmp(Pred, V0, V1)
690             : Builder.CreateBinOp((Instruction::BinaryOps)Opcode, V0, V1);
691 
692   Scalar->setName(I.getName() + ".scalar");
693 
694   // All IR flags are safe to back-propagate. There is no potential for extra
695   // poison to be created by the scalar instruction.
696   if (auto *ScalarInst = dyn_cast<Instruction>(Scalar))
697     ScalarInst->copyIRFlags(&I);
698 
699   // Fold the vector constants in the original vectors into a new base vector.
700   Constant *NewVecC = IsCmp ? ConstantExpr::getCompare(Pred, VecC0, VecC1)
701                             : ConstantExpr::get(Opcode, VecC0, VecC1);
702   Value *Insert = Builder.CreateInsertElement(NewVecC, Scalar, Index);
703   replaceValue(I, *Insert);
704   return true;
705 }
706 
707 /// Try to combine a scalar binop + 2 scalar compares of extracted elements of
708 /// a vector into vector operations followed by extract. Note: The SLP pass
709 /// may miss this pattern because of implementation problems.
710 bool VectorCombine::foldExtractedCmps(Instruction &I) {
711   // We are looking for a scalar binop of booleans.
712   // binop i1 (cmp Pred I0, C0), (cmp Pred I1, C1)
713   if (!I.isBinaryOp() || !I.getType()->isIntegerTy(1))
714     return false;
715 
716   // The compare predicates should match, and each compare should have a
717   // constant operand.
718   // TODO: Relax the one-use constraints.
719   Value *B0 = I.getOperand(0), *B1 = I.getOperand(1);
720   Instruction *I0, *I1;
721   Constant *C0, *C1;
722   CmpInst::Predicate P0, P1;
723   if (!match(B0, m_OneUse(m_Cmp(P0, m_Instruction(I0), m_Constant(C0)))) ||
724       !match(B1, m_OneUse(m_Cmp(P1, m_Instruction(I1), m_Constant(C1)))) ||
725       P0 != P1)
726     return false;
727 
728   // The compare operands must be extracts of the same vector with constant
729   // extract indexes.
730   // TODO: Relax the one-use constraints.
731   Value *X;
732   uint64_t Index0, Index1;
733   if (!match(I0, m_OneUse(m_ExtractElt(m_Value(X), m_ConstantInt(Index0)))) ||
734       !match(I1, m_OneUse(m_ExtractElt(m_Specific(X), m_ConstantInt(Index1)))))
735     return false;
736 
737   auto *Ext0 = cast<ExtractElementInst>(I0);
738   auto *Ext1 = cast<ExtractElementInst>(I1);
739   ExtractElementInst *ConvertToShuf = getShuffleExtract(Ext0, Ext1);
740   if (!ConvertToShuf)
741     return false;
742 
743   // The original scalar pattern is:
744   // binop i1 (cmp Pred (ext X, Index0), C0), (cmp Pred (ext X, Index1), C1)
745   CmpInst::Predicate Pred = P0;
746   unsigned CmpOpcode = CmpInst::isFPPredicate(Pred) ? Instruction::FCmp
747                                                     : Instruction::ICmp;
748   auto *VecTy = dyn_cast<FixedVectorType>(X->getType());
749   if (!VecTy)
750     return false;
751 
752   InstructionCost OldCost =
753       TTI.getVectorInstrCost(Ext0->getOpcode(), VecTy, Index0);
754   OldCost += TTI.getVectorInstrCost(Ext1->getOpcode(), VecTy, Index1);
755   OldCost +=
756       TTI.getCmpSelInstrCost(CmpOpcode, I0->getType(),
757                              CmpInst::makeCmpResultType(I0->getType()), Pred) *
758       2;
759   OldCost += TTI.getArithmeticInstrCost(I.getOpcode(), I.getType());
760 
761   // The proposed vector pattern is:
762   // vcmp = cmp Pred X, VecC
763   // ext (binop vNi1 vcmp, (shuffle vcmp, Index1)), Index0
764   int CheapIndex = ConvertToShuf == Ext0 ? Index1 : Index0;
765   int ExpensiveIndex = ConvertToShuf == Ext0 ? Index0 : Index1;
766   auto *CmpTy = cast<FixedVectorType>(CmpInst::makeCmpResultType(X->getType()));
767   InstructionCost NewCost = TTI.getCmpSelInstrCost(
768       CmpOpcode, X->getType(), CmpInst::makeCmpResultType(X->getType()), Pred);
769   SmallVector<int, 32> ShufMask(VecTy->getNumElements(), UndefMaskElem);
770   ShufMask[CheapIndex] = ExpensiveIndex;
771   NewCost += TTI.getShuffleCost(TargetTransformInfo::SK_PermuteSingleSrc, CmpTy,
772                                 ShufMask);
773   NewCost += TTI.getArithmeticInstrCost(I.getOpcode(), CmpTy);
774   NewCost += TTI.getVectorInstrCost(Ext0->getOpcode(), CmpTy, CheapIndex);
775 
776   // Aggressively form vector ops if the cost is equal because the transform
777   // may enable further optimization.
778   // Codegen can reverse this transform (scalarize) if it was not profitable.
779   if (OldCost < NewCost || !NewCost.isValid())
780     return false;
781 
782   // Create a vector constant from the 2 scalar constants.
783   SmallVector<Constant *, 32> CmpC(VecTy->getNumElements(),
784                                    UndefValue::get(VecTy->getElementType()));
785   CmpC[Index0] = C0;
786   CmpC[Index1] = C1;
787   Value *VCmp = Builder.CreateCmp(Pred, X, ConstantVector::get(CmpC));
788 
789   Value *Shuf = createShiftShuffle(VCmp, ExpensiveIndex, CheapIndex, Builder);
790   Value *VecLogic = Builder.CreateBinOp(cast<BinaryOperator>(I).getOpcode(),
791                                         VCmp, Shuf);
792   Value *NewExt = Builder.CreateExtractElement(VecLogic, CheapIndex);
793   replaceValue(I, *NewExt);
794   ++NumVecCmpBO;
795   return true;
796 }
797 
798 // Check if memory loc modified between two instrs in the same BB
799 static bool isMemModifiedBetween(BasicBlock::iterator Begin,
800                                  BasicBlock::iterator End,
801                                  const MemoryLocation &Loc, AAResults &AA) {
802   unsigned NumScanned = 0;
803   return std::any_of(Begin, End, [&](const Instruction &Instr) {
804     return isModSet(AA.getModRefInfo(&Instr, Loc)) ||
805            ++NumScanned > MaxInstrsToScan;
806   });
807 }
808 
809 /// Helper class to indicate whether a vector index can be safely scalarized and
810 /// if a freeze needs to be inserted.
811 class ScalarizationResult {
812   enum class StatusTy { Unsafe, Safe, SafeWithFreeze };
813 
814   StatusTy Status;
815   Value *ToFreeze;
816 
817   ScalarizationResult(StatusTy Status, Value *ToFreeze = nullptr)
818       : Status(Status), ToFreeze(ToFreeze) {}
819 
820 public:
821   ScalarizationResult(const ScalarizationResult &Other) = default;
822   ~ScalarizationResult() {
823     assert(!ToFreeze && "freeze() not called with ToFreeze being set");
824   }
825 
826   static ScalarizationResult unsafe() { return {StatusTy::Unsafe}; }
827   static ScalarizationResult safe() { return {StatusTy::Safe}; }
828   static ScalarizationResult safeWithFreeze(Value *ToFreeze) {
829     return {StatusTy::SafeWithFreeze, ToFreeze};
830   }
831 
832   /// Returns true if the index can be scalarize without requiring a freeze.
833   bool isSafe() const { return Status == StatusTy::Safe; }
834   /// Returns true if the index cannot be scalarized.
835   bool isUnsafe() const { return Status == StatusTy::Unsafe; }
836   /// Returns true if the index can be scalarize, but requires inserting a
837   /// freeze.
838   bool isSafeWithFreeze() const { return Status == StatusTy::SafeWithFreeze; }
839 
840   /// Reset the state of Unsafe and clear ToFreze if set.
841   void discard() {
842     ToFreeze = nullptr;
843     Status = StatusTy::Unsafe;
844   }
845 
846   /// Freeze the ToFreeze and update the use in \p User to use it.
847   void freeze(IRBuilder<> &Builder, Instruction &UserI) {
848     assert(isSafeWithFreeze() &&
849            "should only be used when freezing is required");
850     assert(is_contained(ToFreeze->users(), &UserI) &&
851            "UserI must be a user of ToFreeze");
852     IRBuilder<>::InsertPointGuard Guard(Builder);
853     Builder.SetInsertPoint(cast<Instruction>(&UserI));
854     Value *Frozen =
855         Builder.CreateFreeze(ToFreeze, ToFreeze->getName() + ".frozen");
856     for (Use &U : make_early_inc_range((UserI.operands())))
857       if (U.get() == ToFreeze)
858         U.set(Frozen);
859 
860     ToFreeze = nullptr;
861   }
862 };
863 
864 /// Check if it is legal to scalarize a memory access to \p VecTy at index \p
865 /// Idx. \p Idx must access a valid vector element.
866 static ScalarizationResult canScalarizeAccess(FixedVectorType *VecTy,
867                                               Value *Idx, Instruction *CtxI,
868                                               AssumptionCache &AC,
869                                               const DominatorTree &DT) {
870   if (auto *C = dyn_cast<ConstantInt>(Idx)) {
871     if (C->getValue().ult(VecTy->getNumElements()))
872       return ScalarizationResult::safe();
873     return ScalarizationResult::unsafe();
874   }
875 
876   unsigned IntWidth = Idx->getType()->getScalarSizeInBits();
877   APInt Zero(IntWidth, 0);
878   APInt MaxElts(IntWidth, VecTy->getNumElements());
879   ConstantRange ValidIndices(Zero, MaxElts);
880   ConstantRange IdxRange(IntWidth, true);
881 
882   if (isGuaranteedNotToBePoison(Idx, &AC)) {
883     if (ValidIndices.contains(computeConstantRange(Idx, true, &AC, CtxI, &DT)))
884       return ScalarizationResult::safe();
885     return ScalarizationResult::unsafe();
886   }
887 
888   // If the index may be poison, check if we can insert a freeze before the
889   // range of the index is restricted.
890   Value *IdxBase;
891   ConstantInt *CI;
892   if (match(Idx, m_And(m_Value(IdxBase), m_ConstantInt(CI)))) {
893     IdxRange = IdxRange.binaryAnd(CI->getValue());
894   } else if (match(Idx, m_URem(m_Value(IdxBase), m_ConstantInt(CI)))) {
895     IdxRange = IdxRange.urem(CI->getValue());
896   }
897 
898   if (ValidIndices.contains(IdxRange))
899     return ScalarizationResult::safeWithFreeze(IdxBase);
900   return ScalarizationResult::unsafe();
901 }
902 
903 /// The memory operation on a vector of \p ScalarType had alignment of
904 /// \p VectorAlignment. Compute the maximal, but conservatively correct,
905 /// alignment that will be valid for the memory operation on a single scalar
906 /// element of the same type with index \p Idx.
907 static Align computeAlignmentAfterScalarization(Align VectorAlignment,
908                                                 Type *ScalarType, Value *Idx,
909                                                 const DataLayout &DL) {
910   if (auto *C = dyn_cast<ConstantInt>(Idx))
911     return commonAlignment(VectorAlignment,
912                            C->getZExtValue() * DL.getTypeStoreSize(ScalarType));
913   return commonAlignment(VectorAlignment, DL.getTypeStoreSize(ScalarType));
914 }
915 
916 // Combine patterns like:
917 //   %0 = load <4 x i32>, <4 x i32>* %a
918 //   %1 = insertelement <4 x i32> %0, i32 %b, i32 1
919 //   store <4 x i32> %1, <4 x i32>* %a
920 // to:
921 //   %0 = bitcast <4 x i32>* %a to i32*
922 //   %1 = getelementptr inbounds i32, i32* %0, i64 0, i64 1
923 //   store i32 %b, i32* %1
924 bool VectorCombine::foldSingleElementStore(Instruction &I) {
925   StoreInst *SI = dyn_cast<StoreInst>(&I);
926   if (!SI || !SI->isSimple() ||
927       !isa<FixedVectorType>(SI->getValueOperand()->getType()))
928     return false;
929 
930   // TODO: Combine more complicated patterns (multiple insert) by referencing
931   // TargetTransformInfo.
932   Instruction *Source;
933   Value *NewElement;
934   Value *Idx;
935   if (!match(SI->getValueOperand(),
936              m_InsertElt(m_Instruction(Source), m_Value(NewElement),
937                          m_Value(Idx))))
938     return false;
939 
940   if (auto *Load = dyn_cast<LoadInst>(Source)) {
941     auto VecTy = cast<FixedVectorType>(SI->getValueOperand()->getType());
942     const DataLayout &DL = I.getModule()->getDataLayout();
943     Value *SrcAddr = Load->getPointerOperand()->stripPointerCasts();
944     // Don't optimize for atomic/volatile load or store. Ensure memory is not
945     // modified between, vector type matches store size, and index is inbounds.
946     if (!Load->isSimple() || Load->getParent() != SI->getParent() ||
947         !DL.typeSizeEqualsStoreSize(Load->getType()) ||
948         SrcAddr != SI->getPointerOperand()->stripPointerCasts())
949       return false;
950 
951     auto ScalarizableIdx = canScalarizeAccess(VecTy, Idx, Load, AC, DT);
952     if (ScalarizableIdx.isUnsafe() ||
953         isMemModifiedBetween(Load->getIterator(), SI->getIterator(),
954                              MemoryLocation::get(SI), AA))
955       return false;
956 
957     if (ScalarizableIdx.isSafeWithFreeze())
958       ScalarizableIdx.freeze(Builder, *cast<Instruction>(Idx));
959     Value *GEP = Builder.CreateInBoundsGEP(
960         SI->getValueOperand()->getType(), SI->getPointerOperand(),
961         {ConstantInt::get(Idx->getType(), 0), Idx});
962     StoreInst *NSI = Builder.CreateStore(NewElement, GEP);
963     NSI->copyMetadata(*SI);
964     Align ScalarOpAlignment = computeAlignmentAfterScalarization(
965         std::max(SI->getAlign(), Load->getAlign()), NewElement->getType(), Idx,
966         DL);
967     NSI->setAlignment(ScalarOpAlignment);
968     replaceValue(I, *NSI);
969     eraseInstruction(I);
970     return true;
971   }
972 
973   return false;
974 }
975 
976 /// Try to scalarize vector loads feeding extractelement instructions.
977 bool VectorCombine::scalarizeLoadExtract(Instruction &I) {
978   Value *Ptr;
979   if (!match(&I, m_Load(m_Value(Ptr))))
980     return false;
981 
982   auto *LI = cast<LoadInst>(&I);
983   const DataLayout &DL = I.getModule()->getDataLayout();
984   if (LI->isVolatile() || !DL.typeSizeEqualsStoreSize(LI->getType()))
985     return false;
986 
987   auto *FixedVT = dyn_cast<FixedVectorType>(LI->getType());
988   if (!FixedVT)
989     return false;
990 
991   InstructionCost OriginalCost = TTI.getMemoryOpCost(
992       Instruction::Load, LI->getType(), Align(LI->getAlignment()),
993       LI->getPointerAddressSpace());
994   InstructionCost ScalarizedCost = 0;
995 
996   Instruction *LastCheckedInst = LI;
997   unsigned NumInstChecked = 0;
998   // Check if all users of the load are extracts with no memory modifications
999   // between the load and the extract. Compute the cost of both the original
1000   // code and the scalarized version.
1001   for (User *U : LI->users()) {
1002     auto *UI = dyn_cast<ExtractElementInst>(U);
1003     if (!UI || UI->getParent() != LI->getParent())
1004       return false;
1005 
1006     if (!isGuaranteedNotToBePoison(UI->getOperand(1), &AC, LI, &DT))
1007       return false;
1008 
1009     // Check if any instruction between the load and the extract may modify
1010     // memory.
1011     if (LastCheckedInst->comesBefore(UI)) {
1012       for (Instruction &I :
1013            make_range(std::next(LI->getIterator()), UI->getIterator())) {
1014         // Bail out if we reached the check limit or the instruction may write
1015         // to memory.
1016         if (NumInstChecked == MaxInstrsToScan || I.mayWriteToMemory())
1017           return false;
1018         NumInstChecked++;
1019       }
1020     }
1021 
1022     if (!LastCheckedInst)
1023       LastCheckedInst = UI;
1024     else if (LastCheckedInst->comesBefore(UI))
1025       LastCheckedInst = UI;
1026 
1027     auto ScalarIdx = canScalarizeAccess(FixedVT, UI->getOperand(1), &I, AC, DT);
1028     if (!ScalarIdx.isSafe()) {
1029       // TODO: Freeze index if it is safe to do so.
1030       ScalarIdx.discard();
1031       return false;
1032     }
1033 
1034     auto *Index = dyn_cast<ConstantInt>(UI->getOperand(1));
1035     OriginalCost +=
1036         TTI.getVectorInstrCost(Instruction::ExtractElement, LI->getType(),
1037                                Index ? Index->getZExtValue() : -1);
1038     ScalarizedCost +=
1039         TTI.getMemoryOpCost(Instruction::Load, FixedVT->getElementType(),
1040                             Align(1), LI->getPointerAddressSpace());
1041     ScalarizedCost += TTI.getAddressComputationCost(FixedVT->getElementType());
1042   }
1043 
1044   if (ScalarizedCost >= OriginalCost)
1045     return false;
1046 
1047   // Replace extracts with narrow scalar loads.
1048   for (User *U : LI->users()) {
1049     auto *EI = cast<ExtractElementInst>(U);
1050     Builder.SetInsertPoint(EI);
1051 
1052     Value *Idx = EI->getOperand(1);
1053     Value *GEP =
1054         Builder.CreateInBoundsGEP(FixedVT, Ptr, {Builder.getInt32(0), Idx});
1055     auto *NewLoad = cast<LoadInst>(Builder.CreateLoad(
1056         FixedVT->getElementType(), GEP, EI->getName() + ".scalar"));
1057 
1058     Align ScalarOpAlignment = computeAlignmentAfterScalarization(
1059         LI->getAlign(), FixedVT->getElementType(), Idx, DL);
1060     NewLoad->setAlignment(ScalarOpAlignment);
1061 
1062     replaceValue(*EI, *NewLoad);
1063   }
1064 
1065   return true;
1066 }
1067 
1068 /// This is the entry point for all transforms. Pass manager differences are
1069 /// handled in the callers of this function.
1070 bool VectorCombine::run() {
1071   if (DisableVectorCombine)
1072     return false;
1073 
1074   // Don't attempt vectorization if the target does not support vectors.
1075   if (!TTI.getNumberOfRegisters(TTI.getRegisterClassForType(/*Vector*/ true)))
1076     return false;
1077 
1078   bool MadeChange = false;
1079   auto FoldInst = [this, &MadeChange](Instruction &I) {
1080     Builder.SetInsertPoint(&I);
1081     if (!ScalarizationOnly) {
1082       MadeChange |= vectorizeLoadInsert(I);
1083       MadeChange |= foldExtractExtract(I);
1084       MadeChange |= foldBitcastShuf(I);
1085       MadeChange |= foldExtractedCmps(I);
1086     }
1087     MadeChange |= scalarizeBinopOrCmp(I);
1088     MadeChange |= scalarizeLoadExtract(I);
1089     MadeChange |= foldSingleElementStore(I);
1090   };
1091   for (BasicBlock &BB : F) {
1092     // Ignore unreachable basic blocks.
1093     if (!DT.isReachableFromEntry(&BB))
1094       continue;
1095     // Use early increment range so that we can erase instructions in loop.
1096     for (Instruction &I : make_early_inc_range(BB)) {
1097       if (I.isDebugOrPseudoInst())
1098         continue;
1099       FoldInst(I);
1100     }
1101   }
1102 
1103   while (!Worklist.isEmpty()) {
1104     Instruction *I = Worklist.removeOne();
1105     if (!I)
1106       continue;
1107 
1108     if (isInstructionTriviallyDead(I)) {
1109       eraseInstruction(*I);
1110       continue;
1111     }
1112 
1113     FoldInst(*I);
1114   }
1115 
1116   return MadeChange;
1117 }
1118 
1119 // Pass manager boilerplate below here.
1120 
1121 namespace {
1122 class VectorCombineLegacyPass : public FunctionPass {
1123 public:
1124   static char ID;
1125   VectorCombineLegacyPass() : FunctionPass(ID) {
1126     initializeVectorCombineLegacyPassPass(*PassRegistry::getPassRegistry());
1127   }
1128 
1129   void getAnalysisUsage(AnalysisUsage &AU) const override {
1130     AU.addRequired<AssumptionCacheTracker>();
1131     AU.addRequired<DominatorTreeWrapperPass>();
1132     AU.addRequired<TargetTransformInfoWrapperPass>();
1133     AU.addRequired<AAResultsWrapperPass>();
1134     AU.setPreservesCFG();
1135     AU.addPreserved<DominatorTreeWrapperPass>();
1136     AU.addPreserved<GlobalsAAWrapperPass>();
1137     AU.addPreserved<AAResultsWrapperPass>();
1138     AU.addPreserved<BasicAAWrapperPass>();
1139     FunctionPass::getAnalysisUsage(AU);
1140   }
1141 
1142   bool runOnFunction(Function &F) override {
1143     if (skipFunction(F))
1144       return false;
1145     auto &AC = getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F);
1146     auto &TTI = getAnalysis<TargetTransformInfoWrapperPass>().getTTI(F);
1147     auto &DT = getAnalysis<DominatorTreeWrapperPass>().getDomTree();
1148     auto &AA = getAnalysis<AAResultsWrapperPass>().getAAResults();
1149     VectorCombine Combiner(F, TTI, DT, AA, AC, false);
1150     return Combiner.run();
1151   }
1152 };
1153 } // namespace
1154 
1155 char VectorCombineLegacyPass::ID = 0;
1156 INITIALIZE_PASS_BEGIN(VectorCombineLegacyPass, "vector-combine",
1157                       "Optimize scalar/vector ops", false,
1158                       false)
1159 INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker)
1160 INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)
1161 INITIALIZE_PASS_END(VectorCombineLegacyPass, "vector-combine",
1162                     "Optimize scalar/vector ops", false, false)
1163 Pass *llvm::createVectorCombinePass() {
1164   return new VectorCombineLegacyPass();
1165 }
1166 
1167 PreservedAnalyses VectorCombinePass::run(Function &F,
1168                                          FunctionAnalysisManager &FAM) {
1169   auto &AC = FAM.getResult<AssumptionAnalysis>(F);
1170   TargetTransformInfo &TTI = FAM.getResult<TargetIRAnalysis>(F);
1171   DominatorTree &DT = FAM.getResult<DominatorTreeAnalysis>(F);
1172   AAResults &AA = FAM.getResult<AAManager>(F);
1173   VectorCombine Combiner(F, TTI, DT, AA, AC, ScalarizationOnly);
1174   if (!Combiner.run())
1175     return PreservedAnalyses::all();
1176   PreservedAnalyses PA;
1177   PA.preserveSet<CFGAnalyses>();
1178   return PA;
1179 }
1180