1 //===----------- VectorUtils.cpp - Vectorizer utility functions -----------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file defines vectorizer utilities.
10 //
11 //===----------------------------------------------------------------------===//
12 
13 #include "llvm/Analysis/VectorUtils.h"
14 #include "llvm/ADT/EquivalenceClasses.h"
15 #include "llvm/Analysis/DemandedBits.h"
16 #include "llvm/Analysis/LoopInfo.h"
17 #include "llvm/Analysis/LoopIterator.h"
18 #include "llvm/Analysis/ScalarEvolution.h"
19 #include "llvm/Analysis/ScalarEvolutionExpressions.h"
20 #include "llvm/Analysis/TargetTransformInfo.h"
21 #include "llvm/Analysis/ValueTracking.h"
22 #include "llvm/IR/Constants.h"
23 #include "llvm/IR/GetElementPtrTypeIterator.h"
24 #include "llvm/IR/IRBuilder.h"
25 #include "llvm/IR/PatternMatch.h"
26 #include "llvm/IR/Value.h"
27 #include "llvm/Support/CommandLine.h"
28 
29 #define DEBUG_TYPE "vectorutils"
30 
31 using namespace llvm;
32 using namespace llvm::PatternMatch;
33 
34 /// Maximum factor for an interleaved memory access.
35 static cl::opt<unsigned> MaxInterleaveGroupFactor(
36     "max-interleave-group-factor", cl::Hidden,
37     cl::desc("Maximum factor for an interleaved access group (default = 8)"),
38     cl::init(8));
39 
40 /// Return true if all of the intrinsic's arguments and return type are scalars
41 /// for the scalar form of the intrinsic, and vectors for the vector form of the
42 /// intrinsic (except operands that are marked as always being scalar by
43 /// isVectorIntrinsicWithScalarOpAtArg).
44 bool llvm::isTriviallyVectorizable(Intrinsic::ID ID) {
45   switch (ID) {
46   case Intrinsic::abs:   // Begin integer bit-manipulation.
47   case Intrinsic::bswap:
48   case Intrinsic::bitreverse:
49   case Intrinsic::ctpop:
50   case Intrinsic::ctlz:
51   case Intrinsic::cttz:
52   case Intrinsic::fshl:
53   case Intrinsic::fshr:
54   case Intrinsic::smax:
55   case Intrinsic::smin:
56   case Intrinsic::umax:
57   case Intrinsic::umin:
58   case Intrinsic::sadd_sat:
59   case Intrinsic::ssub_sat:
60   case Intrinsic::uadd_sat:
61   case Intrinsic::usub_sat:
62   case Intrinsic::smul_fix:
63   case Intrinsic::smul_fix_sat:
64   case Intrinsic::umul_fix:
65   case Intrinsic::umul_fix_sat:
66   case Intrinsic::sqrt: // Begin floating-point.
67   case Intrinsic::sin:
68   case Intrinsic::cos:
69   case Intrinsic::exp:
70   case Intrinsic::exp2:
71   case Intrinsic::log:
72   case Intrinsic::log10:
73   case Intrinsic::log2:
74   case Intrinsic::fabs:
75   case Intrinsic::minnum:
76   case Intrinsic::maxnum:
77   case Intrinsic::minimum:
78   case Intrinsic::maximum:
79   case Intrinsic::copysign:
80   case Intrinsic::floor:
81   case Intrinsic::ceil:
82   case Intrinsic::trunc:
83   case Intrinsic::rint:
84   case Intrinsic::nearbyint:
85   case Intrinsic::round:
86   case Intrinsic::roundeven:
87   case Intrinsic::pow:
88   case Intrinsic::fma:
89   case Intrinsic::fmuladd:
90   case Intrinsic::powi:
91   case Intrinsic::canonicalize:
92   case Intrinsic::fptosi_sat:
93   case Intrinsic::fptoui_sat:
94     return true;
95   default:
96     return false;
97   }
98 }
99 
100 /// Identifies if the vector form of the intrinsic has a scalar operand.
101 bool llvm::isVectorIntrinsicWithScalarOpAtArg(Intrinsic::ID ID,
102                                               unsigned ScalarOpdIdx) {
103   switch (ID) {
104   case Intrinsic::abs:
105   case Intrinsic::ctlz:
106   case Intrinsic::cttz:
107   case Intrinsic::powi:
108     return (ScalarOpdIdx == 1);
109   case Intrinsic::smul_fix:
110   case Intrinsic::smul_fix_sat:
111   case Intrinsic::umul_fix:
112   case Intrinsic::umul_fix_sat:
113     return (ScalarOpdIdx == 2);
114   default:
115     return false;
116   }
117 }
118 
119 bool llvm::isVectorIntrinsicWithOverloadTypeAtArg(Intrinsic::ID ID,
120                                                   unsigned OpdIdx) {
121   switch (ID) {
122   case Intrinsic::fptosi_sat:
123   case Intrinsic::fptoui_sat:
124     return OpdIdx == 0;
125   case Intrinsic::powi:
126     return OpdIdx == 1;
127   default:
128     return false;
129   }
130 }
131 
132 /// Returns intrinsic ID for call.
133 /// For the input call instruction it finds mapping intrinsic and returns
134 /// its ID, in case it does not found it return not_intrinsic.
135 Intrinsic::ID llvm::getVectorIntrinsicIDForCall(const CallInst *CI,
136                                                 const TargetLibraryInfo *TLI) {
137   Intrinsic::ID ID = getIntrinsicForCallSite(*CI, TLI);
138   if (ID == Intrinsic::not_intrinsic)
139     return Intrinsic::not_intrinsic;
140 
141   if (isTriviallyVectorizable(ID) || ID == Intrinsic::lifetime_start ||
142       ID == Intrinsic::lifetime_end || ID == Intrinsic::assume ||
143       ID == Intrinsic::experimental_noalias_scope_decl ||
144       ID == Intrinsic::sideeffect || ID == Intrinsic::pseudoprobe)
145     return ID;
146   return Intrinsic::not_intrinsic;
147 }
148 
149 /// Find the operand of the GEP that should be checked for consecutive
150 /// stores. This ignores trailing indices that have no effect on the final
151 /// pointer.
152 unsigned llvm::getGEPInductionOperand(const GetElementPtrInst *Gep) {
153   const DataLayout &DL = Gep->getModule()->getDataLayout();
154   unsigned LastOperand = Gep->getNumOperands() - 1;
155   TypeSize GEPAllocSize = DL.getTypeAllocSize(Gep->getResultElementType());
156 
157   // Walk backwards and try to peel off zeros.
158   while (LastOperand > 1 && match(Gep->getOperand(LastOperand), m_Zero())) {
159     // Find the type we're currently indexing into.
160     gep_type_iterator GEPTI = gep_type_begin(Gep);
161     std::advance(GEPTI, LastOperand - 2);
162 
163     // If it's a type with the same allocation size as the result of the GEP we
164     // can peel off the zero index.
165     if (DL.getTypeAllocSize(GEPTI.getIndexedType()) != GEPAllocSize)
166       break;
167     --LastOperand;
168   }
169 
170   return LastOperand;
171 }
172 
173 /// If the argument is a GEP, then returns the operand identified by
174 /// getGEPInductionOperand. However, if there is some other non-loop-invariant
175 /// operand, it returns that instead.
176 Value *llvm::stripGetElementPtr(Value *Ptr, ScalarEvolution *SE, Loop *Lp) {
177   GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(Ptr);
178   if (!GEP)
179     return Ptr;
180 
181   unsigned InductionOperand = getGEPInductionOperand(GEP);
182 
183   // Check that all of the gep indices are uniform except for our induction
184   // operand.
185   for (unsigned i = 0, e = GEP->getNumOperands(); i != e; ++i)
186     if (i != InductionOperand &&
187         !SE->isLoopInvariant(SE->getSCEV(GEP->getOperand(i)), Lp))
188       return Ptr;
189   return GEP->getOperand(InductionOperand);
190 }
191 
192 /// If a value has only one user that is a CastInst, return it.
193 Value *llvm::getUniqueCastUse(Value *Ptr, Loop *Lp, Type *Ty) {
194   Value *UniqueCast = nullptr;
195   for (User *U : Ptr->users()) {
196     CastInst *CI = dyn_cast<CastInst>(U);
197     if (CI && CI->getType() == Ty) {
198       if (!UniqueCast)
199         UniqueCast = CI;
200       else
201         return nullptr;
202     }
203   }
204   return UniqueCast;
205 }
206 
207 /// Get the stride of a pointer access in a loop. Looks for symbolic
208 /// strides "a[i*stride]". Returns the symbolic stride, or null otherwise.
209 Value *llvm::getStrideFromPointer(Value *Ptr, ScalarEvolution *SE, Loop *Lp) {
210   auto *PtrTy = dyn_cast<PointerType>(Ptr->getType());
211   if (!PtrTy || PtrTy->isAggregateType())
212     return nullptr;
213 
214   // Try to remove a gep instruction to make the pointer (actually index at this
215   // point) easier analyzable. If OrigPtr is equal to Ptr we are analyzing the
216   // pointer, otherwise, we are analyzing the index.
217   Value *OrigPtr = Ptr;
218 
219   // The size of the pointer access.
220   int64_t PtrAccessSize = 1;
221 
222   Ptr = stripGetElementPtr(Ptr, SE, Lp);
223   const SCEV *V = SE->getSCEV(Ptr);
224 
225   if (Ptr != OrigPtr)
226     // Strip off casts.
227     while (const SCEVIntegralCastExpr *C = dyn_cast<SCEVIntegralCastExpr>(V))
228       V = C->getOperand();
229 
230   const SCEVAddRecExpr *S = dyn_cast<SCEVAddRecExpr>(V);
231   if (!S)
232     return nullptr;
233 
234   V = S->getStepRecurrence(*SE);
235   if (!V)
236     return nullptr;
237 
238   // Strip off the size of access multiplication if we are still analyzing the
239   // pointer.
240   if (OrigPtr == Ptr) {
241     if (const SCEVMulExpr *M = dyn_cast<SCEVMulExpr>(V)) {
242       if (M->getOperand(0)->getSCEVType() != scConstant)
243         return nullptr;
244 
245       const APInt &APStepVal = cast<SCEVConstant>(M->getOperand(0))->getAPInt();
246 
247       // Huge step value - give up.
248       if (APStepVal.getBitWidth() > 64)
249         return nullptr;
250 
251       int64_t StepVal = APStepVal.getSExtValue();
252       if (PtrAccessSize != StepVal)
253         return nullptr;
254       V = M->getOperand(1);
255     }
256   }
257 
258   // Strip off casts.
259   Type *StripedOffRecurrenceCast = nullptr;
260   if (const SCEVIntegralCastExpr *C = dyn_cast<SCEVIntegralCastExpr>(V)) {
261     StripedOffRecurrenceCast = C->getType();
262     V = C->getOperand();
263   }
264 
265   // Look for the loop invariant symbolic value.
266   const SCEVUnknown *U = dyn_cast<SCEVUnknown>(V);
267   if (!U)
268     return nullptr;
269 
270   Value *Stride = U->getValue();
271   if (!Lp->isLoopInvariant(Stride))
272     return nullptr;
273 
274   // If we have stripped off the recurrence cast we have to make sure that we
275   // return the value that is used in this loop so that we can replace it later.
276   if (StripedOffRecurrenceCast)
277     Stride = getUniqueCastUse(Stride, Lp, StripedOffRecurrenceCast);
278 
279   return Stride;
280 }
281 
282 /// Given a vector and an element number, see if the scalar value is
283 /// already around as a register, for example if it were inserted then extracted
284 /// from the vector.
285 Value *llvm::findScalarElement(Value *V, unsigned EltNo) {
286   assert(V->getType()->isVectorTy() && "Not looking at a vector?");
287   VectorType *VTy = cast<VectorType>(V->getType());
288   // For fixed-length vector, return undef for out of range access.
289   if (auto *FVTy = dyn_cast<FixedVectorType>(VTy)) {
290     unsigned Width = FVTy->getNumElements();
291     if (EltNo >= Width)
292       return UndefValue::get(FVTy->getElementType());
293   }
294 
295   if (Constant *C = dyn_cast<Constant>(V))
296     return C->getAggregateElement(EltNo);
297 
298   if (InsertElementInst *III = dyn_cast<InsertElementInst>(V)) {
299     // If this is an insert to a variable element, we don't know what it is.
300     if (!isa<ConstantInt>(III->getOperand(2)))
301       return nullptr;
302     unsigned IIElt = cast<ConstantInt>(III->getOperand(2))->getZExtValue();
303 
304     // If this is an insert to the element we are looking for, return the
305     // inserted value.
306     if (EltNo == IIElt)
307       return III->getOperand(1);
308 
309     // Guard against infinite loop on malformed, unreachable IR.
310     if (III == III->getOperand(0))
311       return nullptr;
312 
313     // Otherwise, the insertelement doesn't modify the value, recurse on its
314     // vector input.
315     return findScalarElement(III->getOperand(0), EltNo);
316   }
317 
318   ShuffleVectorInst *SVI = dyn_cast<ShuffleVectorInst>(V);
319   // Restrict the following transformation to fixed-length vector.
320   if (SVI && isa<FixedVectorType>(SVI->getType())) {
321     unsigned LHSWidth =
322         cast<FixedVectorType>(SVI->getOperand(0)->getType())->getNumElements();
323     int InEl = SVI->getMaskValue(EltNo);
324     if (InEl < 0)
325       return UndefValue::get(VTy->getElementType());
326     if (InEl < (int)LHSWidth)
327       return findScalarElement(SVI->getOperand(0), InEl);
328     return findScalarElement(SVI->getOperand(1), InEl - LHSWidth);
329   }
330 
331   // Extract a value from a vector add operation with a constant zero.
332   // TODO: Use getBinOpIdentity() to generalize this.
333   Value *Val; Constant *C;
334   if (match(V, m_Add(m_Value(Val), m_Constant(C))))
335     if (Constant *Elt = C->getAggregateElement(EltNo))
336       if (Elt->isNullValue())
337         return findScalarElement(Val, EltNo);
338 
339   // If the vector is a splat then we can trivially find the scalar element.
340   if (isa<ScalableVectorType>(VTy))
341     if (Value *Splat = getSplatValue(V))
342       if (EltNo < VTy->getElementCount().getKnownMinValue())
343         return Splat;
344 
345   // Otherwise, we don't know.
346   return nullptr;
347 }
348 
349 int llvm::getSplatIndex(ArrayRef<int> Mask) {
350   int SplatIndex = -1;
351   for (int M : Mask) {
352     // Ignore invalid (undefined) mask elements.
353     if (M < 0)
354       continue;
355 
356     // There can be only 1 non-negative mask element value if this is a splat.
357     if (SplatIndex != -1 && SplatIndex != M)
358       return -1;
359 
360     // Initialize the splat index to the 1st non-negative mask element.
361     SplatIndex = M;
362   }
363   assert((SplatIndex == -1 || SplatIndex >= 0) && "Negative index?");
364   return SplatIndex;
365 }
366 
367 /// Get splat value if the input is a splat vector or return nullptr.
368 /// This function is not fully general. It checks only 2 cases:
369 /// the input value is (1) a splat constant vector or (2) a sequence
370 /// of instructions that broadcasts a scalar at element 0.
371 Value *llvm::getSplatValue(const Value *V) {
372   if (isa<VectorType>(V->getType()))
373     if (auto *C = dyn_cast<Constant>(V))
374       return C->getSplatValue();
375 
376   // shuf (inselt ?, Splat, 0), ?, <0, undef, 0, ...>
377   Value *Splat;
378   if (match(V,
379             m_Shuffle(m_InsertElt(m_Value(), m_Value(Splat), m_ZeroInt()),
380                       m_Value(), m_ZeroMask())))
381     return Splat;
382 
383   return nullptr;
384 }
385 
386 bool llvm::isSplatValue(const Value *V, int Index, unsigned Depth) {
387   assert(Depth <= MaxAnalysisRecursionDepth && "Limit Search Depth");
388 
389   if (isa<VectorType>(V->getType())) {
390     if (isa<UndefValue>(V))
391       return true;
392     // FIXME: We can allow undefs, but if Index was specified, we may want to
393     //        check that the constant is defined at that index.
394     if (auto *C = dyn_cast<Constant>(V))
395       return C->getSplatValue() != nullptr;
396   }
397 
398   if (auto *Shuf = dyn_cast<ShuffleVectorInst>(V)) {
399     // FIXME: We can safely allow undefs here. If Index was specified, we will
400     //        check that the mask elt is defined at the required index.
401     if (!is_splat(Shuf->getShuffleMask()))
402       return false;
403 
404     // Match any index.
405     if (Index == -1)
406       return true;
407 
408     // Match a specific element. The mask should be defined at and match the
409     // specified index.
410     return Shuf->getMaskValue(Index) == Index;
411   }
412 
413   // The remaining tests are all recursive, so bail out if we hit the limit.
414   if (Depth++ == MaxAnalysisRecursionDepth)
415     return false;
416 
417   // If both operands of a binop are splats, the result is a splat.
418   Value *X, *Y, *Z;
419   if (match(V, m_BinOp(m_Value(X), m_Value(Y))))
420     return isSplatValue(X, Index, Depth) && isSplatValue(Y, Index, Depth);
421 
422   // If all operands of a select are splats, the result is a splat.
423   if (match(V, m_Select(m_Value(X), m_Value(Y), m_Value(Z))))
424     return isSplatValue(X, Index, Depth) && isSplatValue(Y, Index, Depth) &&
425            isSplatValue(Z, Index, Depth);
426 
427   // TODO: Add support for unary ops (fneg), casts, intrinsics (overflow ops).
428 
429   return false;
430 }
431 
432 void llvm::narrowShuffleMaskElts(int Scale, ArrayRef<int> Mask,
433                                  SmallVectorImpl<int> &ScaledMask) {
434   assert(Scale > 0 && "Unexpected scaling factor");
435 
436   // Fast-path: if no scaling, then it is just a copy.
437   if (Scale == 1) {
438     ScaledMask.assign(Mask.begin(), Mask.end());
439     return;
440   }
441 
442   ScaledMask.clear();
443   for (int MaskElt : Mask) {
444     if (MaskElt >= 0) {
445       assert(((uint64_t)Scale * MaskElt + (Scale - 1)) <= INT32_MAX &&
446              "Overflowed 32-bits");
447     }
448     for (int SliceElt = 0; SliceElt != Scale; ++SliceElt)
449       ScaledMask.push_back(MaskElt < 0 ? MaskElt : Scale * MaskElt + SliceElt);
450   }
451 }
452 
453 bool llvm::widenShuffleMaskElts(int Scale, ArrayRef<int> Mask,
454                                 SmallVectorImpl<int> &ScaledMask) {
455   assert(Scale > 0 && "Unexpected scaling factor");
456 
457   // Fast-path: if no scaling, then it is just a copy.
458   if (Scale == 1) {
459     ScaledMask.assign(Mask.begin(), Mask.end());
460     return true;
461   }
462 
463   // We must map the original elements down evenly to a type with less elements.
464   int NumElts = Mask.size();
465   if (NumElts % Scale != 0)
466     return false;
467 
468   ScaledMask.clear();
469   ScaledMask.reserve(NumElts / Scale);
470 
471   // Step through the input mask by splitting into Scale-sized slices.
472   do {
473     ArrayRef<int> MaskSlice = Mask.take_front(Scale);
474     assert((int)MaskSlice.size() == Scale && "Expected Scale-sized slice.");
475 
476     // The first element of the slice determines how we evaluate this slice.
477     int SliceFront = MaskSlice.front();
478     if (SliceFront < 0) {
479       // Negative values (undef or other "sentinel" values) must be equal across
480       // the entire slice.
481       if (!is_splat(MaskSlice))
482         return false;
483       ScaledMask.push_back(SliceFront);
484     } else {
485       // A positive mask element must be cleanly divisible.
486       if (SliceFront % Scale != 0)
487         return false;
488       // Elements of the slice must be consecutive.
489       for (int i = 1; i < Scale; ++i)
490         if (MaskSlice[i] != SliceFront + i)
491           return false;
492       ScaledMask.push_back(SliceFront / Scale);
493     }
494     Mask = Mask.drop_front(Scale);
495   } while (!Mask.empty());
496 
497   assert((int)ScaledMask.size() * Scale == NumElts && "Unexpected scaled mask");
498 
499   // All elements of the original mask can be scaled down to map to the elements
500   // of a mask with wider elements.
501   return true;
502 }
503 
504 void llvm::processShuffleMasks(
505     ArrayRef<int> Mask, unsigned NumOfSrcRegs, unsigned NumOfDestRegs,
506     unsigned NumOfUsedRegs, function_ref<void()> NoInputAction,
507     function_ref<void(ArrayRef<int>, unsigned, unsigned)> SingleInputAction,
508     function_ref<void(ArrayRef<int>, unsigned, unsigned)> ManyInputsAction) {
509   SmallVector<SmallVector<SmallVector<int>>> Res(NumOfDestRegs);
510   // Try to perform better estimation of the permutation.
511   // 1. Split the source/destination vectors into real registers.
512   // 2. Do the mask analysis to identify which real registers are
513   // permuted.
514   int Sz = Mask.size();
515   unsigned SzDest = Sz / NumOfDestRegs;
516   unsigned SzSrc = Sz / NumOfSrcRegs;
517   for (unsigned I = 0; I < NumOfDestRegs; ++I) {
518     auto &RegMasks = Res[I];
519     RegMasks.assign(NumOfSrcRegs, {});
520     // Check that the values in dest registers are in the one src
521     // register.
522     for (unsigned K = 0; K < SzDest; ++K) {
523       int Idx = I * SzDest + K;
524       if (Idx == Sz)
525         break;
526       if (Mask[Idx] >= Sz || Mask[Idx] == UndefMaskElem)
527         continue;
528       int SrcRegIdx = Mask[Idx] / SzSrc;
529       // Add a cost of PermuteTwoSrc for each new source register permute,
530       // if we have more than one source registers.
531       if (RegMasks[SrcRegIdx].empty())
532         RegMasks[SrcRegIdx].assign(SzDest, UndefMaskElem);
533       RegMasks[SrcRegIdx][K] = Mask[Idx] % SzSrc;
534     }
535   }
536   // Process split mask.
537   for (unsigned I = 0; I < NumOfUsedRegs; ++I) {
538     auto &Dest = Res[I];
539     int NumSrcRegs =
540         count_if(Dest, [](ArrayRef<int> Mask) { return !Mask.empty(); });
541     switch (NumSrcRegs) {
542     case 0:
543       // No input vectors were used!
544       NoInputAction();
545       break;
546     case 1: {
547       // Find the only mask with at least single undef mask elem.
548       auto *It =
549           find_if(Dest, [](ArrayRef<int> Mask) { return !Mask.empty(); });
550       unsigned SrcReg = std::distance(Dest.begin(), It);
551       SingleInputAction(*It, SrcReg, I);
552       break;
553     }
554     default: {
555       // The first mask is a permutation of a single register. Since we have >2
556       // input registers to shuffle, we merge the masks for 2 first registers
557       // and generate a shuffle of 2 registers rather than the reordering of the
558       // first register and then shuffle with the second register. Next,
559       // generate the shuffles of the resulting register + the remaining
560       // registers from the list.
561       auto &&CombineMasks = [](MutableArrayRef<int> FirstMask,
562                                ArrayRef<int> SecondMask) {
563         for (int Idx = 0, VF = FirstMask.size(); Idx < VF; ++Idx) {
564           if (SecondMask[Idx] != UndefMaskElem) {
565             assert(FirstMask[Idx] == UndefMaskElem &&
566                    "Expected undefined mask element.");
567             FirstMask[Idx] = SecondMask[Idx] + VF;
568           }
569         }
570       };
571       auto &&NormalizeMask = [](MutableArrayRef<int> Mask) {
572         for (int Idx = 0, VF = Mask.size(); Idx < VF; ++Idx) {
573           if (Mask[Idx] != UndefMaskElem)
574             Mask[Idx] = Idx;
575         }
576       };
577       int SecondIdx;
578       do {
579         int FirstIdx = -1;
580         SecondIdx = -1;
581         MutableArrayRef<int> FirstMask, SecondMask;
582         for (unsigned I = 0; I < NumOfDestRegs; ++I) {
583           SmallVectorImpl<int> &RegMask = Dest[I];
584           if (RegMask.empty())
585             continue;
586 
587           if (FirstIdx == SecondIdx) {
588             FirstIdx = I;
589             FirstMask = RegMask;
590             continue;
591           }
592           SecondIdx = I;
593           SecondMask = RegMask;
594           CombineMasks(FirstMask, SecondMask);
595           ManyInputsAction(FirstMask, FirstIdx, SecondIdx);
596           NormalizeMask(FirstMask);
597           RegMask.clear();
598           SecondMask = FirstMask;
599           SecondIdx = FirstIdx;
600         }
601         if (FirstIdx != SecondIdx && SecondIdx >= 0) {
602           CombineMasks(SecondMask, FirstMask);
603           ManyInputsAction(SecondMask, SecondIdx, FirstIdx);
604           Dest[FirstIdx].clear();
605           NormalizeMask(SecondMask);
606         }
607       } while (SecondIdx >= 0);
608       break;
609     }
610     }
611   }
612 }
613 
614 MapVector<Instruction *, uint64_t>
615 llvm::computeMinimumValueSizes(ArrayRef<BasicBlock *> Blocks, DemandedBits &DB,
616                                const TargetTransformInfo *TTI) {
617 
618   // DemandedBits will give us every value's live-out bits. But we want
619   // to ensure no extra casts would need to be inserted, so every DAG
620   // of connected values must have the same minimum bitwidth.
621   EquivalenceClasses<Value *> ECs;
622   SmallVector<Value *, 16> Worklist;
623   SmallPtrSet<Value *, 4> Roots;
624   SmallPtrSet<Value *, 16> Visited;
625   DenseMap<Value *, uint64_t> DBits;
626   SmallPtrSet<Instruction *, 4> InstructionSet;
627   MapVector<Instruction *, uint64_t> MinBWs;
628 
629   // Determine the roots. We work bottom-up, from truncs or icmps.
630   bool SeenExtFromIllegalType = false;
631   for (auto *BB : Blocks)
632     for (auto &I : *BB) {
633       InstructionSet.insert(&I);
634 
635       if (TTI && (isa<ZExtInst>(&I) || isa<SExtInst>(&I)) &&
636           !TTI->isTypeLegal(I.getOperand(0)->getType()))
637         SeenExtFromIllegalType = true;
638 
639       // Only deal with non-vector integers up to 64-bits wide.
640       if ((isa<TruncInst>(&I) || isa<ICmpInst>(&I)) &&
641           !I.getType()->isVectorTy() &&
642           I.getOperand(0)->getType()->getScalarSizeInBits() <= 64) {
643         // Don't make work for ourselves. If we know the loaded type is legal,
644         // don't add it to the worklist.
645         if (TTI && isa<TruncInst>(&I) && TTI->isTypeLegal(I.getType()))
646           continue;
647 
648         Worklist.push_back(&I);
649         Roots.insert(&I);
650       }
651     }
652   // Early exit.
653   if (Worklist.empty() || (TTI && !SeenExtFromIllegalType))
654     return MinBWs;
655 
656   // Now proceed breadth-first, unioning values together.
657   while (!Worklist.empty()) {
658     Value *Val = Worklist.pop_back_val();
659     Value *Leader = ECs.getOrInsertLeaderValue(Val);
660 
661     if (Visited.count(Val))
662       continue;
663     Visited.insert(Val);
664 
665     // Non-instructions terminate a chain successfully.
666     if (!isa<Instruction>(Val))
667       continue;
668     Instruction *I = cast<Instruction>(Val);
669 
670     // If we encounter a type that is larger than 64 bits, we can't represent
671     // it so bail out.
672     if (DB.getDemandedBits(I).getBitWidth() > 64)
673       return MapVector<Instruction *, uint64_t>();
674 
675     uint64_t V = DB.getDemandedBits(I).getZExtValue();
676     DBits[Leader] |= V;
677     DBits[I] = V;
678 
679     // Casts, loads and instructions outside of our range terminate a chain
680     // successfully.
681     if (isa<SExtInst>(I) || isa<ZExtInst>(I) || isa<LoadInst>(I) ||
682         !InstructionSet.count(I))
683       continue;
684 
685     // Unsafe casts terminate a chain unsuccessfully. We can't do anything
686     // useful with bitcasts, ptrtoints or inttoptrs and it'd be unsafe to
687     // transform anything that relies on them.
688     if (isa<BitCastInst>(I) || isa<PtrToIntInst>(I) || isa<IntToPtrInst>(I) ||
689         !I->getType()->isIntegerTy()) {
690       DBits[Leader] |= ~0ULL;
691       continue;
692     }
693 
694     // We don't modify the types of PHIs. Reductions will already have been
695     // truncated if possible, and inductions' sizes will have been chosen by
696     // indvars.
697     if (isa<PHINode>(I))
698       continue;
699 
700     if (DBits[Leader] == ~0ULL)
701       // All bits demanded, no point continuing.
702       continue;
703 
704     for (Value *O : cast<User>(I)->operands()) {
705       ECs.unionSets(Leader, O);
706       Worklist.push_back(O);
707     }
708   }
709 
710   // Now we've discovered all values, walk them to see if there are
711   // any users we didn't see. If there are, we can't optimize that
712   // chain.
713   for (auto &I : DBits)
714     for (auto *U : I.first->users())
715       if (U->getType()->isIntegerTy() && DBits.count(U) == 0)
716         DBits[ECs.getOrInsertLeaderValue(I.first)] |= ~0ULL;
717 
718   for (auto I = ECs.begin(), E = ECs.end(); I != E; ++I) {
719     uint64_t LeaderDemandedBits = 0;
720     for (Value *M : llvm::make_range(ECs.member_begin(I), ECs.member_end()))
721       LeaderDemandedBits |= DBits[M];
722 
723     uint64_t MinBW = (sizeof(LeaderDemandedBits) * 8) -
724                      llvm::countLeadingZeros(LeaderDemandedBits);
725     // Round up to a power of 2
726     if (!isPowerOf2_64((uint64_t)MinBW))
727       MinBW = NextPowerOf2(MinBW);
728 
729     // We don't modify the types of PHIs. Reductions will already have been
730     // truncated if possible, and inductions' sizes will have been chosen by
731     // indvars.
732     // If we are required to shrink a PHI, abandon this entire equivalence class.
733     bool Abort = false;
734     for (Value *M : llvm::make_range(ECs.member_begin(I), ECs.member_end()))
735       if (isa<PHINode>(M) && MinBW < M->getType()->getScalarSizeInBits()) {
736         Abort = true;
737         break;
738       }
739     if (Abort)
740       continue;
741 
742     for (Value *M : llvm::make_range(ECs.member_begin(I), ECs.member_end())) {
743       if (!isa<Instruction>(M))
744         continue;
745       Type *Ty = M->getType();
746       if (Roots.count(M))
747         Ty = cast<Instruction>(M)->getOperand(0)->getType();
748       if (MinBW < Ty->getScalarSizeInBits())
749         MinBWs[cast<Instruction>(M)] = MinBW;
750     }
751   }
752 
753   return MinBWs;
754 }
755 
756 /// Add all access groups in @p AccGroups to @p List.
757 template <typename ListT>
758 static void addToAccessGroupList(ListT &List, MDNode *AccGroups) {
759   // Interpret an access group as a list containing itself.
760   if (AccGroups->getNumOperands() == 0) {
761     assert(isValidAsAccessGroup(AccGroups) && "Node must be an access group");
762     List.insert(AccGroups);
763     return;
764   }
765 
766   for (auto &AccGroupListOp : AccGroups->operands()) {
767     auto *Item = cast<MDNode>(AccGroupListOp.get());
768     assert(isValidAsAccessGroup(Item) && "List item must be an access group");
769     List.insert(Item);
770   }
771 }
772 
773 MDNode *llvm::uniteAccessGroups(MDNode *AccGroups1, MDNode *AccGroups2) {
774   if (!AccGroups1)
775     return AccGroups2;
776   if (!AccGroups2)
777     return AccGroups1;
778   if (AccGroups1 == AccGroups2)
779     return AccGroups1;
780 
781   SmallSetVector<Metadata *, 4> Union;
782   addToAccessGroupList(Union, AccGroups1);
783   addToAccessGroupList(Union, AccGroups2);
784 
785   if (Union.size() == 0)
786     return nullptr;
787   if (Union.size() == 1)
788     return cast<MDNode>(Union.front());
789 
790   LLVMContext &Ctx = AccGroups1->getContext();
791   return MDNode::get(Ctx, Union.getArrayRef());
792 }
793 
794 MDNode *llvm::intersectAccessGroups(const Instruction *Inst1,
795                                     const Instruction *Inst2) {
796   bool MayAccessMem1 = Inst1->mayReadOrWriteMemory();
797   bool MayAccessMem2 = Inst2->mayReadOrWriteMemory();
798 
799   if (!MayAccessMem1 && !MayAccessMem2)
800     return nullptr;
801   if (!MayAccessMem1)
802     return Inst2->getMetadata(LLVMContext::MD_access_group);
803   if (!MayAccessMem2)
804     return Inst1->getMetadata(LLVMContext::MD_access_group);
805 
806   MDNode *MD1 = Inst1->getMetadata(LLVMContext::MD_access_group);
807   MDNode *MD2 = Inst2->getMetadata(LLVMContext::MD_access_group);
808   if (!MD1 || !MD2)
809     return nullptr;
810   if (MD1 == MD2)
811     return MD1;
812 
813   // Use set for scalable 'contains' check.
814   SmallPtrSet<Metadata *, 4> AccGroupSet2;
815   addToAccessGroupList(AccGroupSet2, MD2);
816 
817   SmallVector<Metadata *, 4> Intersection;
818   if (MD1->getNumOperands() == 0) {
819     assert(isValidAsAccessGroup(MD1) && "Node must be an access group");
820     if (AccGroupSet2.count(MD1))
821       Intersection.push_back(MD1);
822   } else {
823     for (const MDOperand &Node : MD1->operands()) {
824       auto *Item = cast<MDNode>(Node.get());
825       assert(isValidAsAccessGroup(Item) && "List item must be an access group");
826       if (AccGroupSet2.count(Item))
827         Intersection.push_back(Item);
828     }
829   }
830 
831   if (Intersection.size() == 0)
832     return nullptr;
833   if (Intersection.size() == 1)
834     return cast<MDNode>(Intersection.front());
835 
836   LLVMContext &Ctx = Inst1->getContext();
837   return MDNode::get(Ctx, Intersection);
838 }
839 
840 /// \returns \p I after propagating metadata from \p VL.
841 Instruction *llvm::propagateMetadata(Instruction *Inst, ArrayRef<Value *> VL) {
842   if (VL.empty())
843     return Inst;
844   Instruction *I0 = cast<Instruction>(VL[0]);
845   SmallVector<std::pair<unsigned, MDNode *>, 4> Metadata;
846   I0->getAllMetadataOtherThanDebugLoc(Metadata);
847 
848   for (auto Kind : {LLVMContext::MD_tbaa, LLVMContext::MD_alias_scope,
849                     LLVMContext::MD_noalias, LLVMContext::MD_fpmath,
850                     LLVMContext::MD_nontemporal, LLVMContext::MD_invariant_load,
851                     LLVMContext::MD_access_group}) {
852     MDNode *MD = I0->getMetadata(Kind);
853 
854     for (int J = 1, E = VL.size(); MD && J != E; ++J) {
855       const Instruction *IJ = cast<Instruction>(VL[J]);
856       MDNode *IMD = IJ->getMetadata(Kind);
857       switch (Kind) {
858       case LLVMContext::MD_tbaa:
859         MD = MDNode::getMostGenericTBAA(MD, IMD);
860         break;
861       case LLVMContext::MD_alias_scope:
862         MD = MDNode::getMostGenericAliasScope(MD, IMD);
863         break;
864       case LLVMContext::MD_fpmath:
865         MD = MDNode::getMostGenericFPMath(MD, IMD);
866         break;
867       case LLVMContext::MD_noalias:
868       case LLVMContext::MD_nontemporal:
869       case LLVMContext::MD_invariant_load:
870         MD = MDNode::intersect(MD, IMD);
871         break;
872       case LLVMContext::MD_access_group:
873         MD = intersectAccessGroups(Inst, IJ);
874         break;
875       default:
876         llvm_unreachable("unhandled metadata");
877       }
878     }
879 
880     Inst->setMetadata(Kind, MD);
881   }
882 
883   return Inst;
884 }
885 
886 Constant *
887 llvm::createBitMaskForGaps(IRBuilderBase &Builder, unsigned VF,
888                            const InterleaveGroup<Instruction> &Group) {
889   // All 1's means mask is not needed.
890   if (Group.getNumMembers() == Group.getFactor())
891     return nullptr;
892 
893   // TODO: support reversed access.
894   assert(!Group.isReverse() && "Reversed group not supported.");
895 
896   SmallVector<Constant *, 16> Mask;
897   for (unsigned i = 0; i < VF; i++)
898     for (unsigned j = 0; j < Group.getFactor(); ++j) {
899       unsigned HasMember = Group.getMember(j) ? 1 : 0;
900       Mask.push_back(Builder.getInt1(HasMember));
901     }
902 
903   return ConstantVector::get(Mask);
904 }
905 
906 llvm::SmallVector<int, 16>
907 llvm::createReplicatedMask(unsigned ReplicationFactor, unsigned VF) {
908   SmallVector<int, 16> MaskVec;
909   for (unsigned i = 0; i < VF; i++)
910     for (unsigned j = 0; j < ReplicationFactor; j++)
911       MaskVec.push_back(i);
912 
913   return MaskVec;
914 }
915 
916 llvm::SmallVector<int, 16> llvm::createInterleaveMask(unsigned VF,
917                                                       unsigned NumVecs) {
918   SmallVector<int, 16> Mask;
919   for (unsigned i = 0; i < VF; i++)
920     for (unsigned j = 0; j < NumVecs; j++)
921       Mask.push_back(j * VF + i);
922 
923   return Mask;
924 }
925 
926 llvm::SmallVector<int, 16>
927 llvm::createStrideMask(unsigned Start, unsigned Stride, unsigned VF) {
928   SmallVector<int, 16> Mask;
929   for (unsigned i = 0; i < VF; i++)
930     Mask.push_back(Start + i * Stride);
931 
932   return Mask;
933 }
934 
935 llvm::SmallVector<int, 16> llvm::createSequentialMask(unsigned Start,
936                                                       unsigned NumInts,
937                                                       unsigned NumUndefs) {
938   SmallVector<int, 16> Mask;
939   for (unsigned i = 0; i < NumInts; i++)
940     Mask.push_back(Start + i);
941 
942   for (unsigned i = 0; i < NumUndefs; i++)
943     Mask.push_back(-1);
944 
945   return Mask;
946 }
947 
948 llvm::SmallVector<int, 16> llvm::createUnaryMask(ArrayRef<int> Mask,
949                                                  unsigned NumElts) {
950   // Avoid casts in the loop and make sure we have a reasonable number.
951   int NumEltsSigned = NumElts;
952   assert(NumEltsSigned > 0 && "Expected smaller or non-zero element count");
953 
954   // If the mask chooses an element from operand 1, reduce it to choose from the
955   // corresponding element of operand 0. Undef mask elements are unchanged.
956   SmallVector<int, 16> UnaryMask;
957   for (int MaskElt : Mask) {
958     assert((MaskElt < NumEltsSigned * 2) && "Expected valid shuffle mask");
959     int UnaryElt = MaskElt >= NumEltsSigned ? MaskElt - NumEltsSigned : MaskElt;
960     UnaryMask.push_back(UnaryElt);
961   }
962   return UnaryMask;
963 }
964 
965 /// A helper function for concatenating vectors. This function concatenates two
966 /// vectors having the same element type. If the second vector has fewer
967 /// elements than the first, it is padded with undefs.
968 static Value *concatenateTwoVectors(IRBuilderBase &Builder, Value *V1,
969                                     Value *V2) {
970   VectorType *VecTy1 = dyn_cast<VectorType>(V1->getType());
971   VectorType *VecTy2 = dyn_cast<VectorType>(V2->getType());
972   assert(VecTy1 && VecTy2 &&
973          VecTy1->getScalarType() == VecTy2->getScalarType() &&
974          "Expect two vectors with the same element type");
975 
976   unsigned NumElts1 = cast<FixedVectorType>(VecTy1)->getNumElements();
977   unsigned NumElts2 = cast<FixedVectorType>(VecTy2)->getNumElements();
978   assert(NumElts1 >= NumElts2 && "Unexpect the first vector has less elements");
979 
980   if (NumElts1 > NumElts2) {
981     // Extend with UNDEFs.
982     V2 = Builder.CreateShuffleVector(
983         V2, createSequentialMask(0, NumElts2, NumElts1 - NumElts2));
984   }
985 
986   return Builder.CreateShuffleVector(
987       V1, V2, createSequentialMask(0, NumElts1 + NumElts2, 0));
988 }
989 
990 Value *llvm::concatenateVectors(IRBuilderBase &Builder,
991                                 ArrayRef<Value *> Vecs) {
992   unsigned NumVecs = Vecs.size();
993   assert(NumVecs > 1 && "Should be at least two vectors");
994 
995   SmallVector<Value *, 8> ResList;
996   ResList.append(Vecs.begin(), Vecs.end());
997   do {
998     SmallVector<Value *, 8> TmpList;
999     for (unsigned i = 0; i < NumVecs - 1; i += 2) {
1000       Value *V0 = ResList[i], *V1 = ResList[i + 1];
1001       assert((V0->getType() == V1->getType() || i == NumVecs - 2) &&
1002              "Only the last vector may have a different type");
1003 
1004       TmpList.push_back(concatenateTwoVectors(Builder, V0, V1));
1005     }
1006 
1007     // Push the last vector if the total number of vectors is odd.
1008     if (NumVecs % 2 != 0)
1009       TmpList.push_back(ResList[NumVecs - 1]);
1010 
1011     ResList = TmpList;
1012     NumVecs = ResList.size();
1013   } while (NumVecs > 1);
1014 
1015   return ResList[0];
1016 }
1017 
1018 bool llvm::maskIsAllZeroOrUndef(Value *Mask) {
1019   assert(isa<VectorType>(Mask->getType()) &&
1020          isa<IntegerType>(Mask->getType()->getScalarType()) &&
1021          cast<IntegerType>(Mask->getType()->getScalarType())->getBitWidth() ==
1022              1 &&
1023          "Mask must be a vector of i1");
1024 
1025   auto *ConstMask = dyn_cast<Constant>(Mask);
1026   if (!ConstMask)
1027     return false;
1028   if (ConstMask->isNullValue() || isa<UndefValue>(ConstMask))
1029     return true;
1030   if (isa<ScalableVectorType>(ConstMask->getType()))
1031     return false;
1032   for (unsigned
1033            I = 0,
1034            E = cast<FixedVectorType>(ConstMask->getType())->getNumElements();
1035        I != E; ++I) {
1036     if (auto *MaskElt = ConstMask->getAggregateElement(I))
1037       if (MaskElt->isNullValue() || isa<UndefValue>(MaskElt))
1038         continue;
1039     return false;
1040   }
1041   return true;
1042 }
1043 
1044 bool llvm::maskIsAllOneOrUndef(Value *Mask) {
1045   assert(isa<VectorType>(Mask->getType()) &&
1046          isa<IntegerType>(Mask->getType()->getScalarType()) &&
1047          cast<IntegerType>(Mask->getType()->getScalarType())->getBitWidth() ==
1048              1 &&
1049          "Mask must be a vector of i1");
1050 
1051   auto *ConstMask = dyn_cast<Constant>(Mask);
1052   if (!ConstMask)
1053     return false;
1054   if (ConstMask->isAllOnesValue() || isa<UndefValue>(ConstMask))
1055     return true;
1056   if (isa<ScalableVectorType>(ConstMask->getType()))
1057     return false;
1058   for (unsigned
1059            I = 0,
1060            E = cast<FixedVectorType>(ConstMask->getType())->getNumElements();
1061        I != E; ++I) {
1062     if (auto *MaskElt = ConstMask->getAggregateElement(I))
1063       if (MaskElt->isAllOnesValue() || isa<UndefValue>(MaskElt))
1064         continue;
1065     return false;
1066   }
1067   return true;
1068 }
1069 
1070 /// TODO: This is a lot like known bits, but for
1071 /// vectors.  Is there something we can common this with?
1072 APInt llvm::possiblyDemandedEltsInMask(Value *Mask) {
1073   assert(isa<FixedVectorType>(Mask->getType()) &&
1074          isa<IntegerType>(Mask->getType()->getScalarType()) &&
1075          cast<IntegerType>(Mask->getType()->getScalarType())->getBitWidth() ==
1076              1 &&
1077          "Mask must be a fixed width vector of i1");
1078 
1079   const unsigned VWidth =
1080       cast<FixedVectorType>(Mask->getType())->getNumElements();
1081   APInt DemandedElts = APInt::getAllOnes(VWidth);
1082   if (auto *CV = dyn_cast<ConstantVector>(Mask))
1083     for (unsigned i = 0; i < VWidth; i++)
1084       if (CV->getAggregateElement(i)->isNullValue())
1085         DemandedElts.clearBit(i);
1086   return DemandedElts;
1087 }
1088 
1089 bool InterleavedAccessInfo::isStrided(int Stride) {
1090   unsigned Factor = std::abs(Stride);
1091   return Factor >= 2 && Factor <= MaxInterleaveGroupFactor;
1092 }
1093 
1094 void InterleavedAccessInfo::collectConstStrideAccesses(
1095     MapVector<Instruction *, StrideDescriptor> &AccessStrideInfo,
1096     const ValueToValueMap &Strides) {
1097   auto &DL = TheLoop->getHeader()->getModule()->getDataLayout();
1098 
1099   // Since it's desired that the load/store instructions be maintained in
1100   // "program order" for the interleaved access analysis, we have to visit the
1101   // blocks in the loop in reverse postorder (i.e., in a topological order).
1102   // Such an ordering will ensure that any load/store that may be executed
1103   // before a second load/store will precede the second load/store in
1104   // AccessStrideInfo.
1105   LoopBlocksDFS DFS(TheLoop);
1106   DFS.perform(LI);
1107   for (BasicBlock *BB : make_range(DFS.beginRPO(), DFS.endRPO()))
1108     for (auto &I : *BB) {
1109       Value *Ptr = getLoadStorePointerOperand(&I);
1110       if (!Ptr)
1111         continue;
1112       Type *ElementTy = getLoadStoreType(&I);
1113 
1114       // We don't check wrapping here because we don't know yet if Ptr will be
1115       // part of a full group or a group with gaps. Checking wrapping for all
1116       // pointers (even those that end up in groups with no gaps) will be overly
1117       // conservative. For full groups, wrapping should be ok since if we would
1118       // wrap around the address space we would do a memory access at nullptr
1119       // even without the transformation. The wrapping checks are therefore
1120       // deferred until after we've formed the interleaved groups.
1121       int64_t Stride = getPtrStride(PSE, ElementTy, Ptr, TheLoop, Strides,
1122                                     /*Assume=*/true, /*ShouldCheckWrap=*/false);
1123 
1124       const SCEV *Scev = replaceSymbolicStrideSCEV(PSE, Strides, Ptr);
1125       uint64_t Size = DL.getTypeAllocSize(ElementTy);
1126       AccessStrideInfo[&I] = StrideDescriptor(Stride, Scev, Size,
1127                                               getLoadStoreAlignment(&I));
1128     }
1129 }
1130 
1131 // Analyze interleaved accesses and collect them into interleaved load and
1132 // store groups.
1133 //
1134 // When generating code for an interleaved load group, we effectively hoist all
1135 // loads in the group to the location of the first load in program order. When
1136 // generating code for an interleaved store group, we sink all stores to the
1137 // location of the last store. This code motion can change the order of load
1138 // and store instructions and may break dependences.
1139 //
1140 // The code generation strategy mentioned above ensures that we won't violate
1141 // any write-after-read (WAR) dependences.
1142 //
1143 // E.g., for the WAR dependence:  a = A[i];      // (1)
1144 //                                A[i] = b;      // (2)
1145 //
1146 // The store group of (2) is always inserted at or below (2), and the load
1147 // group of (1) is always inserted at or above (1). Thus, the instructions will
1148 // never be reordered. All other dependences are checked to ensure the
1149 // correctness of the instruction reordering.
1150 //
1151 // The algorithm visits all memory accesses in the loop in bottom-up program
1152 // order. Program order is established by traversing the blocks in the loop in
1153 // reverse postorder when collecting the accesses.
1154 //
1155 // We visit the memory accesses in bottom-up order because it can simplify the
1156 // construction of store groups in the presence of write-after-write (WAW)
1157 // dependences.
1158 //
1159 // E.g., for the WAW dependence:  A[i] = a;      // (1)
1160 //                                A[i] = b;      // (2)
1161 //                                A[i + 1] = c;  // (3)
1162 //
1163 // We will first create a store group with (3) and (2). (1) can't be added to
1164 // this group because it and (2) are dependent. However, (1) can be grouped
1165 // with other accesses that may precede it in program order. Note that a
1166 // bottom-up order does not imply that WAW dependences should not be checked.
1167 void InterleavedAccessInfo::analyzeInterleaving(
1168                                  bool EnablePredicatedInterleavedMemAccesses) {
1169   LLVM_DEBUG(dbgs() << "LV: Analyzing interleaved accesses...\n");
1170   const ValueToValueMap &Strides = LAI->getSymbolicStrides();
1171 
1172   // Holds all accesses with a constant stride.
1173   MapVector<Instruction *, StrideDescriptor> AccessStrideInfo;
1174   collectConstStrideAccesses(AccessStrideInfo, Strides);
1175 
1176   if (AccessStrideInfo.empty())
1177     return;
1178 
1179   // Collect the dependences in the loop.
1180   collectDependences();
1181 
1182   // Holds all interleaved store groups temporarily.
1183   SmallSetVector<InterleaveGroup<Instruction> *, 4> StoreGroups;
1184   // Holds all interleaved load groups temporarily.
1185   SmallSetVector<InterleaveGroup<Instruction> *, 4> LoadGroups;
1186 
1187   // Search in bottom-up program order for pairs of accesses (A and B) that can
1188   // form interleaved load or store groups. In the algorithm below, access A
1189   // precedes access B in program order. We initialize a group for B in the
1190   // outer loop of the algorithm, and then in the inner loop, we attempt to
1191   // insert each A into B's group if:
1192   //
1193   //  1. A and B have the same stride,
1194   //  2. A and B have the same memory object size, and
1195   //  3. A belongs in B's group according to its distance from B.
1196   //
1197   // Special care is taken to ensure group formation will not break any
1198   // dependences.
1199   for (auto BI = AccessStrideInfo.rbegin(), E = AccessStrideInfo.rend();
1200        BI != E; ++BI) {
1201     Instruction *B = BI->first;
1202     StrideDescriptor DesB = BI->second;
1203 
1204     // Initialize a group for B if it has an allowable stride. Even if we don't
1205     // create a group for B, we continue with the bottom-up algorithm to ensure
1206     // we don't break any of B's dependences.
1207     InterleaveGroup<Instruction> *Group = nullptr;
1208     if (isStrided(DesB.Stride) &&
1209         (!isPredicated(B->getParent()) || EnablePredicatedInterleavedMemAccesses)) {
1210       Group = getInterleaveGroup(B);
1211       if (!Group) {
1212         LLVM_DEBUG(dbgs() << "LV: Creating an interleave group with:" << *B
1213                           << '\n');
1214         Group = createInterleaveGroup(B, DesB.Stride, DesB.Alignment);
1215       }
1216       if (B->mayWriteToMemory())
1217         StoreGroups.insert(Group);
1218       else
1219         LoadGroups.insert(Group);
1220     }
1221 
1222     for (auto AI = std::next(BI); AI != E; ++AI) {
1223       Instruction *A = AI->first;
1224       StrideDescriptor DesA = AI->second;
1225 
1226       // Our code motion strategy implies that we can't have dependences
1227       // between accesses in an interleaved group and other accesses located
1228       // between the first and last member of the group. Note that this also
1229       // means that a group can't have more than one member at a given offset.
1230       // The accesses in a group can have dependences with other accesses, but
1231       // we must ensure we don't extend the boundaries of the group such that
1232       // we encompass those dependent accesses.
1233       //
1234       // For example, assume we have the sequence of accesses shown below in a
1235       // stride-2 loop:
1236       //
1237       //  (1, 2) is a group | A[i]   = a;  // (1)
1238       //                    | A[i-1] = b;  // (2) |
1239       //                      A[i-3] = c;  // (3)
1240       //                      A[i]   = d;  // (4) | (2, 4) is not a group
1241       //
1242       // Because accesses (2) and (3) are dependent, we can group (2) with (1)
1243       // but not with (4). If we did, the dependent access (3) would be within
1244       // the boundaries of the (2, 4) group.
1245       if (!canReorderMemAccessesForInterleavedGroups(&*AI, &*BI)) {
1246         // If a dependence exists and A is already in a group, we know that A
1247         // must be a store since A precedes B and WAR dependences are allowed.
1248         // Thus, A would be sunk below B. We release A's group to prevent this
1249         // illegal code motion. A will then be free to form another group with
1250         // instructions that precede it.
1251         if (isInterleaved(A)) {
1252           InterleaveGroup<Instruction> *StoreGroup = getInterleaveGroup(A);
1253 
1254           LLVM_DEBUG(dbgs() << "LV: Invalidated store group due to "
1255                                "dependence between " << *A << " and "<< *B << '\n');
1256 
1257           StoreGroups.remove(StoreGroup);
1258           releaseGroup(StoreGroup);
1259         }
1260 
1261         // If a dependence exists and A is not already in a group (or it was
1262         // and we just released it), B might be hoisted above A (if B is a
1263         // load) or another store might be sunk below A (if B is a store). In
1264         // either case, we can't add additional instructions to B's group. B
1265         // will only form a group with instructions that it precedes.
1266         break;
1267       }
1268 
1269       // At this point, we've checked for illegal code motion. If either A or B
1270       // isn't strided, there's nothing left to do.
1271       if (!isStrided(DesA.Stride) || !isStrided(DesB.Stride))
1272         continue;
1273 
1274       // Ignore A if it's already in a group or isn't the same kind of memory
1275       // operation as B.
1276       // Note that mayReadFromMemory() isn't mutually exclusive to
1277       // mayWriteToMemory in the case of atomic loads. We shouldn't see those
1278       // here, canVectorizeMemory() should have returned false - except for the
1279       // case we asked for optimization remarks.
1280       if (isInterleaved(A) ||
1281           (A->mayReadFromMemory() != B->mayReadFromMemory()) ||
1282           (A->mayWriteToMemory() != B->mayWriteToMemory()))
1283         continue;
1284 
1285       // Check rules 1 and 2. Ignore A if its stride or size is different from
1286       // that of B.
1287       if (DesA.Stride != DesB.Stride || DesA.Size != DesB.Size)
1288         continue;
1289 
1290       // Ignore A if the memory object of A and B don't belong to the same
1291       // address space
1292       if (getLoadStoreAddressSpace(A) != getLoadStoreAddressSpace(B))
1293         continue;
1294 
1295       // Calculate the distance from A to B.
1296       const SCEVConstant *DistToB = dyn_cast<SCEVConstant>(
1297           PSE.getSE()->getMinusSCEV(DesA.Scev, DesB.Scev));
1298       if (!DistToB)
1299         continue;
1300       int64_t DistanceToB = DistToB->getAPInt().getSExtValue();
1301 
1302       // Check rule 3. Ignore A if its distance to B is not a multiple of the
1303       // size.
1304       if (DistanceToB % static_cast<int64_t>(DesB.Size))
1305         continue;
1306 
1307       // All members of a predicated interleave-group must have the same predicate,
1308       // and currently must reside in the same BB.
1309       BasicBlock *BlockA = A->getParent();
1310       BasicBlock *BlockB = B->getParent();
1311       if ((isPredicated(BlockA) || isPredicated(BlockB)) &&
1312           (!EnablePredicatedInterleavedMemAccesses || BlockA != BlockB))
1313         continue;
1314 
1315       // The index of A is the index of B plus A's distance to B in multiples
1316       // of the size.
1317       int IndexA =
1318           Group->getIndex(B) + DistanceToB / static_cast<int64_t>(DesB.Size);
1319 
1320       // Try to insert A into B's group.
1321       if (Group->insertMember(A, IndexA, DesA.Alignment)) {
1322         LLVM_DEBUG(dbgs() << "LV: Inserted:" << *A << '\n'
1323                           << "    into the interleave group with" << *B
1324                           << '\n');
1325         InterleaveGroupMap[A] = Group;
1326 
1327         // Set the first load in program order as the insert position.
1328         if (A->mayReadFromMemory())
1329           Group->setInsertPos(A);
1330       }
1331     } // Iteration over A accesses.
1332   }   // Iteration over B accesses.
1333 
1334   auto InvalidateGroupIfMemberMayWrap = [&](InterleaveGroup<Instruction> *Group,
1335                                             int Index,
1336                                             std::string FirstOrLast) -> bool {
1337     Instruction *Member = Group->getMember(Index);
1338     assert(Member && "Group member does not exist");
1339     Value *MemberPtr = getLoadStorePointerOperand(Member);
1340     Type *AccessTy = getLoadStoreType(Member);
1341     if (getPtrStride(PSE, AccessTy, MemberPtr, TheLoop, Strides,
1342                      /*Assume=*/false, /*ShouldCheckWrap=*/true))
1343       return false;
1344     LLVM_DEBUG(dbgs() << "LV: Invalidate candidate interleaved group due to "
1345                       << FirstOrLast
1346                       << " group member potentially pointer-wrapping.\n");
1347     releaseGroup(Group);
1348     return true;
1349   };
1350 
1351   // Remove interleaved groups with gaps whose memory
1352   // accesses may wrap around. We have to revisit the getPtrStride analysis,
1353   // this time with ShouldCheckWrap=true, since collectConstStrideAccesses does
1354   // not check wrapping (see documentation there).
1355   // FORNOW we use Assume=false;
1356   // TODO: Change to Assume=true but making sure we don't exceed the threshold
1357   // of runtime SCEV assumptions checks (thereby potentially failing to
1358   // vectorize altogether).
1359   // Additional optional optimizations:
1360   // TODO: If we are peeling the loop and we know that the first pointer doesn't
1361   // wrap then we can deduce that all pointers in the group don't wrap.
1362   // This means that we can forcefully peel the loop in order to only have to
1363   // check the first pointer for no-wrap. When we'll change to use Assume=true
1364   // we'll only need at most one runtime check per interleaved group.
1365   for (auto *Group : LoadGroups) {
1366     // Case 1: A full group. Can Skip the checks; For full groups, if the wide
1367     // load would wrap around the address space we would do a memory access at
1368     // nullptr even without the transformation.
1369     if (Group->getNumMembers() == Group->getFactor())
1370       continue;
1371 
1372     // Case 2: If first and last members of the group don't wrap this implies
1373     // that all the pointers in the group don't wrap.
1374     // So we check only group member 0 (which is always guaranteed to exist),
1375     // and group member Factor - 1; If the latter doesn't exist we rely on
1376     // peeling (if it is a non-reversed accsess -- see Case 3).
1377     if (InvalidateGroupIfMemberMayWrap(Group, 0, std::string("first")))
1378       continue;
1379     if (Group->getMember(Group->getFactor() - 1))
1380       InvalidateGroupIfMemberMayWrap(Group, Group->getFactor() - 1,
1381                                      std::string("last"));
1382     else {
1383       // Case 3: A non-reversed interleaved load group with gaps: We need
1384       // to execute at least one scalar epilogue iteration. This will ensure
1385       // we don't speculatively access memory out-of-bounds. We only need
1386       // to look for a member at index factor - 1, since every group must have
1387       // a member at index zero.
1388       if (Group->isReverse()) {
1389         LLVM_DEBUG(
1390             dbgs() << "LV: Invalidate candidate interleaved group due to "
1391                       "a reverse access with gaps.\n");
1392         releaseGroup(Group);
1393         continue;
1394       }
1395       LLVM_DEBUG(
1396           dbgs() << "LV: Interleaved group requires epilogue iteration.\n");
1397       RequiresScalarEpilogue = true;
1398     }
1399   }
1400 
1401   for (auto *Group : StoreGroups) {
1402     // Case 1: A full group. Can Skip the checks; For full groups, if the wide
1403     // store would wrap around the address space we would do a memory access at
1404     // nullptr even without the transformation.
1405     if (Group->getNumMembers() == Group->getFactor())
1406       continue;
1407 
1408     // Interleave-store-group with gaps is implemented using masked wide store.
1409     // Remove interleaved store groups with gaps if
1410     // masked-interleaved-accesses are not enabled by the target.
1411     if (!EnablePredicatedInterleavedMemAccesses) {
1412       LLVM_DEBUG(
1413           dbgs() << "LV: Invalidate candidate interleaved store group due "
1414                     "to gaps.\n");
1415       releaseGroup(Group);
1416       continue;
1417     }
1418 
1419     // Case 2: If first and last members of the group don't wrap this implies
1420     // that all the pointers in the group don't wrap.
1421     // So we check only group member 0 (which is always guaranteed to exist),
1422     // and the last group member. Case 3 (scalar epilog) is not relevant for
1423     // stores with gaps, which are implemented with masked-store (rather than
1424     // speculative access, as in loads).
1425     if (InvalidateGroupIfMemberMayWrap(Group, 0, std::string("first")))
1426       continue;
1427     for (int Index = Group->getFactor() - 1; Index > 0; Index--)
1428       if (Group->getMember(Index)) {
1429         InvalidateGroupIfMemberMayWrap(Group, Index, std::string("last"));
1430         break;
1431       }
1432   }
1433 }
1434 
1435 void InterleavedAccessInfo::invalidateGroupsRequiringScalarEpilogue() {
1436   // If no group had triggered the requirement to create an epilogue loop,
1437   // there is nothing to do.
1438   if (!requiresScalarEpilogue())
1439     return;
1440 
1441   bool ReleasedGroup = false;
1442   // Release groups requiring scalar epilogues. Note that this also removes them
1443   // from InterleaveGroups.
1444   for (auto *Group : make_early_inc_range(InterleaveGroups)) {
1445     if (!Group->requiresScalarEpilogue())
1446       continue;
1447     LLVM_DEBUG(
1448         dbgs()
1449         << "LV: Invalidate candidate interleaved group due to gaps that "
1450            "require a scalar epilogue (not allowed under optsize) and cannot "
1451            "be masked (not enabled). \n");
1452     releaseGroup(Group);
1453     ReleasedGroup = true;
1454   }
1455   assert(ReleasedGroup && "At least one group must be invalidated, as a "
1456                           "scalar epilogue was required");
1457   (void)ReleasedGroup;
1458   RequiresScalarEpilogue = false;
1459 }
1460 
1461 template <typename InstT>
1462 void InterleaveGroup<InstT>::addMetadata(InstT *NewInst) const {
1463   llvm_unreachable("addMetadata can only be used for Instruction");
1464 }
1465 
1466 namespace llvm {
1467 template <>
1468 void InterleaveGroup<Instruction>::addMetadata(Instruction *NewInst) const {
1469   SmallVector<Value *, 4> VL;
1470   std::transform(Members.begin(), Members.end(), std::back_inserter(VL),
1471                  [](std::pair<int, Instruction *> p) { return p.second; });
1472   propagateMetadata(NewInst, VL);
1473 }
1474 }
1475 
1476 std::string VFABI::mangleTLIVectorName(StringRef VectorName,
1477                                        StringRef ScalarName, unsigned numArgs,
1478                                        ElementCount VF) {
1479   SmallString<256> Buffer;
1480   llvm::raw_svector_ostream Out(Buffer);
1481   Out << "_ZGV" << VFABI::_LLVM_ << "N";
1482   if (VF.isScalable())
1483     Out << 'x';
1484   else
1485     Out << VF.getFixedValue();
1486   for (unsigned I = 0; I < numArgs; ++I)
1487     Out << "v";
1488   Out << "_" << ScalarName << "(" << VectorName << ")";
1489   return std::string(Out.str());
1490 }
1491 
1492 void VFABI::getVectorVariantNames(
1493     const CallInst &CI, SmallVectorImpl<std::string> &VariantMappings) {
1494   const StringRef S = CI.getFnAttr(VFABI::MappingsAttrName).getValueAsString();
1495   if (S.empty())
1496     return;
1497 
1498   SmallVector<StringRef, 8> ListAttr;
1499   S.split(ListAttr, ",");
1500 
1501   for (auto &S : SetVector<StringRef>(ListAttr.begin(), ListAttr.end())) {
1502 #ifndef NDEBUG
1503     LLVM_DEBUG(dbgs() << "VFABI: adding mapping '" << S << "'\n");
1504     Optional<VFInfo> Info = VFABI::tryDemangleForVFABI(S, *(CI.getModule()));
1505     assert(Info.hasValue() && "Invalid name for a VFABI variant.");
1506     assert(CI.getModule()->getFunction(Info.getValue().VectorName) &&
1507            "Vector function is missing.");
1508 #endif
1509     VariantMappings.push_back(std::string(S));
1510   }
1511 }
1512 
1513 bool VFShape::hasValidParameterList() const {
1514   for (unsigned Pos = 0, NumParams = Parameters.size(); Pos < NumParams;
1515        ++Pos) {
1516     assert(Parameters[Pos].ParamPos == Pos && "Broken parameter list.");
1517 
1518     switch (Parameters[Pos].ParamKind) {
1519     default: // Nothing to check.
1520       break;
1521     case VFParamKind::OMP_Linear:
1522     case VFParamKind::OMP_LinearRef:
1523     case VFParamKind::OMP_LinearVal:
1524     case VFParamKind::OMP_LinearUVal:
1525       // Compile time linear steps must be non-zero.
1526       if (Parameters[Pos].LinearStepOrPos == 0)
1527         return false;
1528       break;
1529     case VFParamKind::OMP_LinearPos:
1530     case VFParamKind::OMP_LinearRefPos:
1531     case VFParamKind::OMP_LinearValPos:
1532     case VFParamKind::OMP_LinearUValPos:
1533       // The runtime linear step must be referring to some other
1534       // parameters in the signature.
1535       if (Parameters[Pos].LinearStepOrPos >= int(NumParams))
1536         return false;
1537       // The linear step parameter must be marked as uniform.
1538       if (Parameters[Parameters[Pos].LinearStepOrPos].ParamKind !=
1539           VFParamKind::OMP_Uniform)
1540         return false;
1541       // The linear step parameter can't point at itself.
1542       if (Parameters[Pos].LinearStepOrPos == int(Pos))
1543         return false;
1544       break;
1545     case VFParamKind::GlobalPredicate:
1546       // The global predicate must be the unique. Can be placed anywhere in the
1547       // signature.
1548       for (unsigned NextPos = Pos + 1; NextPos < NumParams; ++NextPos)
1549         if (Parameters[NextPos].ParamKind == VFParamKind::GlobalPredicate)
1550           return false;
1551       break;
1552     }
1553   }
1554   return true;
1555 }
1556