1 //===----------- VectorUtils.cpp - Vectorizer utility functions -----------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file defines vectorizer utilities.
10 //
11 //===----------------------------------------------------------------------===//
12 
13 #include "llvm/Analysis/VectorUtils.h"
14 #include "llvm/ADT/EquivalenceClasses.h"
15 #include "llvm/Analysis/DemandedBits.h"
16 #include "llvm/Analysis/LoopInfo.h"
17 #include "llvm/Analysis/LoopIterator.h"
18 #include "llvm/Analysis/ScalarEvolution.h"
19 #include "llvm/Analysis/ScalarEvolutionExpressions.h"
20 #include "llvm/Analysis/TargetTransformInfo.h"
21 #include "llvm/Analysis/ValueTracking.h"
22 #include "llvm/IR/Constants.h"
23 #include "llvm/IR/GetElementPtrTypeIterator.h"
24 #include "llvm/IR/IRBuilder.h"
25 #include "llvm/IR/PatternMatch.h"
26 #include "llvm/IR/Value.h"
27 #include "llvm/Support/CommandLine.h"
28 
29 #define DEBUG_TYPE "vectorutils"
30 
31 using namespace llvm;
32 using namespace llvm::PatternMatch;
33 
34 /// Maximum factor for an interleaved memory access.
35 static cl::opt<unsigned> MaxInterleaveGroupFactor(
36     "max-interleave-group-factor", cl::Hidden,
37     cl::desc("Maximum factor for an interleaved access group (default = 8)"),
38     cl::init(8));
39 
40 /// Return true if all of the intrinsic's arguments and return type are scalars
41 /// for the scalar form of the intrinsic, and vectors for the vector form of the
42 /// intrinsic (except operands that are marked as always being scalar by
43 /// hasVectorInstrinsicScalarOpd).
44 bool llvm::isTriviallyVectorizable(Intrinsic::ID ID) {
45   switch (ID) {
46   case Intrinsic::bswap: // Begin integer bit-manipulation.
47   case Intrinsic::bitreverse:
48   case Intrinsic::ctpop:
49   case Intrinsic::ctlz:
50   case Intrinsic::cttz:
51   case Intrinsic::fshl:
52   case Intrinsic::fshr:
53   case Intrinsic::sadd_sat:
54   case Intrinsic::ssub_sat:
55   case Intrinsic::uadd_sat:
56   case Intrinsic::usub_sat:
57   case Intrinsic::smul_fix:
58   case Intrinsic::smul_fix_sat:
59   case Intrinsic::umul_fix:
60   case Intrinsic::umul_fix_sat:
61   case Intrinsic::sqrt: // Begin floating-point.
62   case Intrinsic::sin:
63   case Intrinsic::cos:
64   case Intrinsic::exp:
65   case Intrinsic::exp2:
66   case Intrinsic::log:
67   case Intrinsic::log10:
68   case Intrinsic::log2:
69   case Intrinsic::fabs:
70   case Intrinsic::minnum:
71   case Intrinsic::maxnum:
72   case Intrinsic::minimum:
73   case Intrinsic::maximum:
74   case Intrinsic::copysign:
75   case Intrinsic::floor:
76   case Intrinsic::ceil:
77   case Intrinsic::trunc:
78   case Intrinsic::rint:
79   case Intrinsic::nearbyint:
80   case Intrinsic::round:
81   case Intrinsic::pow:
82   case Intrinsic::fma:
83   case Intrinsic::fmuladd:
84   case Intrinsic::powi:
85   case Intrinsic::canonicalize:
86     return true;
87   default:
88     return false;
89   }
90 }
91 
92 /// Identifies if the vector form of the intrinsic has a scalar operand.
93 bool llvm::hasVectorInstrinsicScalarOpd(Intrinsic::ID ID,
94                                         unsigned ScalarOpdIdx) {
95   switch (ID) {
96   case Intrinsic::ctlz:
97   case Intrinsic::cttz:
98   case Intrinsic::powi:
99     return (ScalarOpdIdx == 1);
100   case Intrinsic::smul_fix:
101   case Intrinsic::smul_fix_sat:
102   case Intrinsic::umul_fix:
103   case Intrinsic::umul_fix_sat:
104     return (ScalarOpdIdx == 2);
105   default:
106     return false;
107   }
108 }
109 
110 /// Returns intrinsic ID for call.
111 /// For the input call instruction it finds mapping intrinsic and returns
112 /// its ID, in case it does not found it return not_intrinsic.
113 Intrinsic::ID llvm::getVectorIntrinsicIDForCall(const CallInst *CI,
114                                                 const TargetLibraryInfo *TLI) {
115   Intrinsic::ID ID = getIntrinsicForCallSite(CI, TLI);
116   if (ID == Intrinsic::not_intrinsic)
117     return Intrinsic::not_intrinsic;
118 
119   if (isTriviallyVectorizable(ID) || ID == Intrinsic::lifetime_start ||
120       ID == Intrinsic::lifetime_end || ID == Intrinsic::assume ||
121       ID == Intrinsic::sideeffect)
122     return ID;
123   return Intrinsic::not_intrinsic;
124 }
125 
126 /// Find the operand of the GEP that should be checked for consecutive
127 /// stores. This ignores trailing indices that have no effect on the final
128 /// pointer.
129 unsigned llvm::getGEPInductionOperand(const GetElementPtrInst *Gep) {
130   const DataLayout &DL = Gep->getModule()->getDataLayout();
131   unsigned LastOperand = Gep->getNumOperands() - 1;
132   unsigned GEPAllocSize = DL.getTypeAllocSize(Gep->getResultElementType());
133 
134   // Walk backwards and try to peel off zeros.
135   while (LastOperand > 1 && match(Gep->getOperand(LastOperand), m_Zero())) {
136     // Find the type we're currently indexing into.
137     gep_type_iterator GEPTI = gep_type_begin(Gep);
138     std::advance(GEPTI, LastOperand - 2);
139 
140     // If it's a type with the same allocation size as the result of the GEP we
141     // can peel off the zero index.
142     if (DL.getTypeAllocSize(GEPTI.getIndexedType()) != GEPAllocSize)
143       break;
144     --LastOperand;
145   }
146 
147   return LastOperand;
148 }
149 
150 /// If the argument is a GEP, then returns the operand identified by
151 /// getGEPInductionOperand. However, if there is some other non-loop-invariant
152 /// operand, it returns that instead.
153 Value *llvm::stripGetElementPtr(Value *Ptr, ScalarEvolution *SE, Loop *Lp) {
154   GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(Ptr);
155   if (!GEP)
156     return Ptr;
157 
158   unsigned InductionOperand = getGEPInductionOperand(GEP);
159 
160   // Check that all of the gep indices are uniform except for our induction
161   // operand.
162   for (unsigned i = 0, e = GEP->getNumOperands(); i != e; ++i)
163     if (i != InductionOperand &&
164         !SE->isLoopInvariant(SE->getSCEV(GEP->getOperand(i)), Lp))
165       return Ptr;
166   return GEP->getOperand(InductionOperand);
167 }
168 
169 /// If a value has only one user that is a CastInst, return it.
170 Value *llvm::getUniqueCastUse(Value *Ptr, Loop *Lp, Type *Ty) {
171   Value *UniqueCast = nullptr;
172   for (User *U : Ptr->users()) {
173     CastInst *CI = dyn_cast<CastInst>(U);
174     if (CI && CI->getType() == Ty) {
175       if (!UniqueCast)
176         UniqueCast = CI;
177       else
178         return nullptr;
179     }
180   }
181   return UniqueCast;
182 }
183 
184 /// Get the stride of a pointer access in a loop. Looks for symbolic
185 /// strides "a[i*stride]". Returns the symbolic stride, or null otherwise.
186 Value *llvm::getStrideFromPointer(Value *Ptr, ScalarEvolution *SE, Loop *Lp) {
187   auto *PtrTy = dyn_cast<PointerType>(Ptr->getType());
188   if (!PtrTy || PtrTy->isAggregateType())
189     return nullptr;
190 
191   // Try to remove a gep instruction to make the pointer (actually index at this
192   // point) easier analyzable. If OrigPtr is equal to Ptr we are analyzing the
193   // pointer, otherwise, we are analyzing the index.
194   Value *OrigPtr = Ptr;
195 
196   // The size of the pointer access.
197   int64_t PtrAccessSize = 1;
198 
199   Ptr = stripGetElementPtr(Ptr, SE, Lp);
200   const SCEV *V = SE->getSCEV(Ptr);
201 
202   if (Ptr != OrigPtr)
203     // Strip off casts.
204     while (const SCEVCastExpr *C = dyn_cast<SCEVCastExpr>(V))
205       V = C->getOperand();
206 
207   const SCEVAddRecExpr *S = dyn_cast<SCEVAddRecExpr>(V);
208   if (!S)
209     return nullptr;
210 
211   V = S->getStepRecurrence(*SE);
212   if (!V)
213     return nullptr;
214 
215   // Strip off the size of access multiplication if we are still analyzing the
216   // pointer.
217   if (OrigPtr == Ptr) {
218     if (const SCEVMulExpr *M = dyn_cast<SCEVMulExpr>(V)) {
219       if (M->getOperand(0)->getSCEVType() != scConstant)
220         return nullptr;
221 
222       const APInt &APStepVal = cast<SCEVConstant>(M->getOperand(0))->getAPInt();
223 
224       // Huge step value - give up.
225       if (APStepVal.getBitWidth() > 64)
226         return nullptr;
227 
228       int64_t StepVal = APStepVal.getSExtValue();
229       if (PtrAccessSize != StepVal)
230         return nullptr;
231       V = M->getOperand(1);
232     }
233   }
234 
235   // Strip off casts.
236   Type *StripedOffRecurrenceCast = nullptr;
237   if (const SCEVCastExpr *C = dyn_cast<SCEVCastExpr>(V)) {
238     StripedOffRecurrenceCast = C->getType();
239     V = C->getOperand();
240   }
241 
242   // Look for the loop invariant symbolic value.
243   const SCEVUnknown *U = dyn_cast<SCEVUnknown>(V);
244   if (!U)
245     return nullptr;
246 
247   Value *Stride = U->getValue();
248   if (!Lp->isLoopInvariant(Stride))
249     return nullptr;
250 
251   // If we have stripped off the recurrence cast we have to make sure that we
252   // return the value that is used in this loop so that we can replace it later.
253   if (StripedOffRecurrenceCast)
254     Stride = getUniqueCastUse(Stride, Lp, StripedOffRecurrenceCast);
255 
256   return Stride;
257 }
258 
259 /// Given a vector and an element number, see if the scalar value is
260 /// already around as a register, for example if it were inserted then extracted
261 /// from the vector.
262 Value *llvm::findScalarElement(Value *V, unsigned EltNo) {
263   assert(V->getType()->isVectorTy() && "Not looking at a vector?");
264   VectorType *VTy = cast<VectorType>(V->getType());
265   unsigned Width = VTy->getNumElements();
266   if (EltNo >= Width)  // Out of range access.
267     return UndefValue::get(VTy->getElementType());
268 
269   if (Constant *C = dyn_cast<Constant>(V))
270     return C->getAggregateElement(EltNo);
271 
272   if (InsertElementInst *III = dyn_cast<InsertElementInst>(V)) {
273     // If this is an insert to a variable element, we don't know what it is.
274     if (!isa<ConstantInt>(III->getOperand(2)))
275       return nullptr;
276     unsigned IIElt = cast<ConstantInt>(III->getOperand(2))->getZExtValue();
277 
278     // If this is an insert to the element we are looking for, return the
279     // inserted value.
280     if (EltNo == IIElt)
281       return III->getOperand(1);
282 
283     // Otherwise, the insertelement doesn't modify the value, recurse on its
284     // vector input.
285     return findScalarElement(III->getOperand(0), EltNo);
286   }
287 
288   if (ShuffleVectorInst *SVI = dyn_cast<ShuffleVectorInst>(V)) {
289     unsigned LHSWidth = SVI->getOperand(0)->getType()->getVectorNumElements();
290     int InEl = SVI->getMaskValue(EltNo);
291     if (InEl < 0)
292       return UndefValue::get(VTy->getElementType());
293     if (InEl < (int)LHSWidth)
294       return findScalarElement(SVI->getOperand(0), InEl);
295     return findScalarElement(SVI->getOperand(1), InEl - LHSWidth);
296   }
297 
298   // Extract a value from a vector add operation with a constant zero.
299   // TODO: Use getBinOpIdentity() to generalize this.
300   Value *Val; Constant *C;
301   if (match(V, m_Add(m_Value(Val), m_Constant(C))))
302     if (Constant *Elt = C->getAggregateElement(EltNo))
303       if (Elt->isNullValue())
304         return findScalarElement(Val, EltNo);
305 
306   // Otherwise, we don't know.
307   return nullptr;
308 }
309 
310 int llvm::getSplatIndex(ArrayRef<int> Mask) {
311   int SplatIndex = -1;
312   for (int M : Mask) {
313     // Ignore invalid (undefined) mask elements.
314     if (M < 0)
315       continue;
316 
317     // There can be only 1 non-negative mask element value if this is a splat.
318     if (SplatIndex != -1 && SplatIndex != M)
319       return -1;
320 
321     // Initialize the splat index to the 1st non-negative mask element.
322     SplatIndex = M;
323   }
324   assert((SplatIndex == -1 || SplatIndex >= 0) && "Negative index?");
325   return SplatIndex;
326 }
327 
328 /// Get splat value if the input is a splat vector or return nullptr.
329 /// This function is not fully general. It checks only 2 cases:
330 /// the input value is (1) a splat constant vector or (2) a sequence
331 /// of instructions that broadcasts a scalar at element 0.
332 const llvm::Value *llvm::getSplatValue(const Value *V) {
333   if (isa<VectorType>(V->getType()))
334     if (auto *C = dyn_cast<Constant>(V))
335       return C->getSplatValue();
336 
337   // shuf (inselt ?, Splat, 0), ?, <0, undef, 0, ...>
338   Value *Splat;
339   if (match(V, m_ShuffleVector(m_InsertElement(m_Value(), m_Value(Splat),
340                                                m_ZeroInt()),
341                                m_Value(), m_ZeroInt())))
342     return Splat;
343 
344   return nullptr;
345 }
346 
347 // This setting is based on its counterpart in value tracking, but it could be
348 // adjusted if needed.
349 const unsigned MaxDepth = 6;
350 
351 bool llvm::isSplatValue(const Value *V, int Index, unsigned Depth) {
352   assert(Depth <= MaxDepth && "Limit Search Depth");
353 
354   if (isa<VectorType>(V->getType())) {
355     if (isa<UndefValue>(V))
356       return true;
357     // FIXME: We can allow undefs, but if Index was specified, we may want to
358     //        check that the constant is defined at that index.
359     if (auto *C = dyn_cast<Constant>(V))
360       return C->getSplatValue() != nullptr;
361   }
362 
363   if (auto *Shuf = dyn_cast<ShuffleVectorInst>(V)) {
364     // FIXME: We can safely allow undefs here. If Index was specified, we will
365     //        check that the mask elt is defined at the required index.
366     if (!Shuf->getMask()->getSplatValue())
367       return false;
368 
369     // Match any index.
370     if (Index == -1)
371       return true;
372 
373     // Match a specific element. The mask should be defined at and match the
374     // specified index.
375     return Shuf->getMaskValue(Index) == Index;
376   }
377 
378   // The remaining tests are all recursive, so bail out if we hit the limit.
379   if (Depth++ == MaxDepth)
380     return false;
381 
382   // If both operands of a binop are splats, the result is a splat.
383   Value *X, *Y, *Z;
384   if (match(V, m_BinOp(m_Value(X), m_Value(Y))))
385     return isSplatValue(X, Index, Depth) && isSplatValue(Y, Index, Depth);
386 
387   // If all operands of a select are splats, the result is a splat.
388   if (match(V, m_Select(m_Value(X), m_Value(Y), m_Value(Z))))
389     return isSplatValue(X, Index, Depth) && isSplatValue(Y, Index, Depth) &&
390            isSplatValue(Z, Index, Depth);
391 
392   // TODO: Add support for unary ops (fneg), casts, intrinsics (overflow ops).
393 
394   return false;
395 }
396 
397 MapVector<Instruction *, uint64_t>
398 llvm::computeMinimumValueSizes(ArrayRef<BasicBlock *> Blocks, DemandedBits &DB,
399                                const TargetTransformInfo *TTI) {
400 
401   // DemandedBits will give us every value's live-out bits. But we want
402   // to ensure no extra casts would need to be inserted, so every DAG
403   // of connected values must have the same minimum bitwidth.
404   EquivalenceClasses<Value *> ECs;
405   SmallVector<Value *, 16> Worklist;
406   SmallPtrSet<Value *, 4> Roots;
407   SmallPtrSet<Value *, 16> Visited;
408   DenseMap<Value *, uint64_t> DBits;
409   SmallPtrSet<Instruction *, 4> InstructionSet;
410   MapVector<Instruction *, uint64_t> MinBWs;
411 
412   // Determine the roots. We work bottom-up, from truncs or icmps.
413   bool SeenExtFromIllegalType = false;
414   for (auto *BB : Blocks)
415     for (auto &I : *BB) {
416       InstructionSet.insert(&I);
417 
418       if (TTI && (isa<ZExtInst>(&I) || isa<SExtInst>(&I)) &&
419           !TTI->isTypeLegal(I.getOperand(0)->getType()))
420         SeenExtFromIllegalType = true;
421 
422       // Only deal with non-vector integers up to 64-bits wide.
423       if ((isa<TruncInst>(&I) || isa<ICmpInst>(&I)) &&
424           !I.getType()->isVectorTy() &&
425           I.getOperand(0)->getType()->getScalarSizeInBits() <= 64) {
426         // Don't make work for ourselves. If we know the loaded type is legal,
427         // don't add it to the worklist.
428         if (TTI && isa<TruncInst>(&I) && TTI->isTypeLegal(I.getType()))
429           continue;
430 
431         Worklist.push_back(&I);
432         Roots.insert(&I);
433       }
434     }
435   // Early exit.
436   if (Worklist.empty() || (TTI && !SeenExtFromIllegalType))
437     return MinBWs;
438 
439   // Now proceed breadth-first, unioning values together.
440   while (!Worklist.empty()) {
441     Value *Val = Worklist.pop_back_val();
442     Value *Leader = ECs.getOrInsertLeaderValue(Val);
443 
444     if (Visited.count(Val))
445       continue;
446     Visited.insert(Val);
447 
448     // Non-instructions terminate a chain successfully.
449     if (!isa<Instruction>(Val))
450       continue;
451     Instruction *I = cast<Instruction>(Val);
452 
453     // If we encounter a type that is larger than 64 bits, we can't represent
454     // it so bail out.
455     if (DB.getDemandedBits(I).getBitWidth() > 64)
456       return MapVector<Instruction *, uint64_t>();
457 
458     uint64_t V = DB.getDemandedBits(I).getZExtValue();
459     DBits[Leader] |= V;
460     DBits[I] = V;
461 
462     // Casts, loads and instructions outside of our range terminate a chain
463     // successfully.
464     if (isa<SExtInst>(I) || isa<ZExtInst>(I) || isa<LoadInst>(I) ||
465         !InstructionSet.count(I))
466       continue;
467 
468     // Unsafe casts terminate a chain unsuccessfully. We can't do anything
469     // useful with bitcasts, ptrtoints or inttoptrs and it'd be unsafe to
470     // transform anything that relies on them.
471     if (isa<BitCastInst>(I) || isa<PtrToIntInst>(I) || isa<IntToPtrInst>(I) ||
472         !I->getType()->isIntegerTy()) {
473       DBits[Leader] |= ~0ULL;
474       continue;
475     }
476 
477     // We don't modify the types of PHIs. Reductions will already have been
478     // truncated if possible, and inductions' sizes will have been chosen by
479     // indvars.
480     if (isa<PHINode>(I))
481       continue;
482 
483     if (DBits[Leader] == ~0ULL)
484       // All bits demanded, no point continuing.
485       continue;
486 
487     for (Value *O : cast<User>(I)->operands()) {
488       ECs.unionSets(Leader, O);
489       Worklist.push_back(O);
490     }
491   }
492 
493   // Now we've discovered all values, walk them to see if there are
494   // any users we didn't see. If there are, we can't optimize that
495   // chain.
496   for (auto &I : DBits)
497     for (auto *U : I.first->users())
498       if (U->getType()->isIntegerTy() && DBits.count(U) == 0)
499         DBits[ECs.getOrInsertLeaderValue(I.first)] |= ~0ULL;
500 
501   for (auto I = ECs.begin(), E = ECs.end(); I != E; ++I) {
502     uint64_t LeaderDemandedBits = 0;
503     for (auto MI = ECs.member_begin(I), ME = ECs.member_end(); MI != ME; ++MI)
504       LeaderDemandedBits |= DBits[*MI];
505 
506     uint64_t MinBW = (sizeof(LeaderDemandedBits) * 8) -
507                      llvm::countLeadingZeros(LeaderDemandedBits);
508     // Round up to a power of 2
509     if (!isPowerOf2_64((uint64_t)MinBW))
510       MinBW = NextPowerOf2(MinBW);
511 
512     // We don't modify the types of PHIs. Reductions will already have been
513     // truncated if possible, and inductions' sizes will have been chosen by
514     // indvars.
515     // If we are required to shrink a PHI, abandon this entire equivalence class.
516     bool Abort = false;
517     for (auto MI = ECs.member_begin(I), ME = ECs.member_end(); MI != ME; ++MI)
518       if (isa<PHINode>(*MI) && MinBW < (*MI)->getType()->getScalarSizeInBits()) {
519         Abort = true;
520         break;
521       }
522     if (Abort)
523       continue;
524 
525     for (auto MI = ECs.member_begin(I), ME = ECs.member_end(); MI != ME; ++MI) {
526       if (!isa<Instruction>(*MI))
527         continue;
528       Type *Ty = (*MI)->getType();
529       if (Roots.count(*MI))
530         Ty = cast<Instruction>(*MI)->getOperand(0)->getType();
531       if (MinBW < Ty->getScalarSizeInBits())
532         MinBWs[cast<Instruction>(*MI)] = MinBW;
533     }
534   }
535 
536   return MinBWs;
537 }
538 
539 /// Add all access groups in @p AccGroups to @p List.
540 template <typename ListT>
541 static void addToAccessGroupList(ListT &List, MDNode *AccGroups) {
542   // Interpret an access group as a list containing itself.
543   if (AccGroups->getNumOperands() == 0) {
544     assert(isValidAsAccessGroup(AccGroups) && "Node must be an access group");
545     List.insert(AccGroups);
546     return;
547   }
548 
549   for (auto &AccGroupListOp : AccGroups->operands()) {
550     auto *Item = cast<MDNode>(AccGroupListOp.get());
551     assert(isValidAsAccessGroup(Item) && "List item must be an access group");
552     List.insert(Item);
553   }
554 }
555 
556 MDNode *llvm::uniteAccessGroups(MDNode *AccGroups1, MDNode *AccGroups2) {
557   if (!AccGroups1)
558     return AccGroups2;
559   if (!AccGroups2)
560     return AccGroups1;
561   if (AccGroups1 == AccGroups2)
562     return AccGroups1;
563 
564   SmallSetVector<Metadata *, 4> Union;
565   addToAccessGroupList(Union, AccGroups1);
566   addToAccessGroupList(Union, AccGroups2);
567 
568   if (Union.size() == 0)
569     return nullptr;
570   if (Union.size() == 1)
571     return cast<MDNode>(Union.front());
572 
573   LLVMContext &Ctx = AccGroups1->getContext();
574   return MDNode::get(Ctx, Union.getArrayRef());
575 }
576 
577 MDNode *llvm::intersectAccessGroups(const Instruction *Inst1,
578                                     const Instruction *Inst2) {
579   bool MayAccessMem1 = Inst1->mayReadOrWriteMemory();
580   bool MayAccessMem2 = Inst2->mayReadOrWriteMemory();
581 
582   if (!MayAccessMem1 && !MayAccessMem2)
583     return nullptr;
584   if (!MayAccessMem1)
585     return Inst2->getMetadata(LLVMContext::MD_access_group);
586   if (!MayAccessMem2)
587     return Inst1->getMetadata(LLVMContext::MD_access_group);
588 
589   MDNode *MD1 = Inst1->getMetadata(LLVMContext::MD_access_group);
590   MDNode *MD2 = Inst2->getMetadata(LLVMContext::MD_access_group);
591   if (!MD1 || !MD2)
592     return nullptr;
593   if (MD1 == MD2)
594     return MD1;
595 
596   // Use set for scalable 'contains' check.
597   SmallPtrSet<Metadata *, 4> AccGroupSet2;
598   addToAccessGroupList(AccGroupSet2, MD2);
599 
600   SmallVector<Metadata *, 4> Intersection;
601   if (MD1->getNumOperands() == 0) {
602     assert(isValidAsAccessGroup(MD1) && "Node must be an access group");
603     if (AccGroupSet2.count(MD1))
604       Intersection.push_back(MD1);
605   } else {
606     for (const MDOperand &Node : MD1->operands()) {
607       auto *Item = cast<MDNode>(Node.get());
608       assert(isValidAsAccessGroup(Item) && "List item must be an access group");
609       if (AccGroupSet2.count(Item))
610         Intersection.push_back(Item);
611     }
612   }
613 
614   if (Intersection.size() == 0)
615     return nullptr;
616   if (Intersection.size() == 1)
617     return cast<MDNode>(Intersection.front());
618 
619   LLVMContext &Ctx = Inst1->getContext();
620   return MDNode::get(Ctx, Intersection);
621 }
622 
623 /// \returns \p I after propagating metadata from \p VL.
624 Instruction *llvm::propagateMetadata(Instruction *Inst, ArrayRef<Value *> VL) {
625   Instruction *I0 = cast<Instruction>(VL[0]);
626   SmallVector<std::pair<unsigned, MDNode *>, 4> Metadata;
627   I0->getAllMetadataOtherThanDebugLoc(Metadata);
628 
629   for (auto Kind : {LLVMContext::MD_tbaa, LLVMContext::MD_alias_scope,
630                     LLVMContext::MD_noalias, LLVMContext::MD_fpmath,
631                     LLVMContext::MD_nontemporal, LLVMContext::MD_invariant_load,
632                     LLVMContext::MD_access_group}) {
633     MDNode *MD = I0->getMetadata(Kind);
634 
635     for (int J = 1, E = VL.size(); MD && J != E; ++J) {
636       const Instruction *IJ = cast<Instruction>(VL[J]);
637       MDNode *IMD = IJ->getMetadata(Kind);
638       switch (Kind) {
639       case LLVMContext::MD_tbaa:
640         MD = MDNode::getMostGenericTBAA(MD, IMD);
641         break;
642       case LLVMContext::MD_alias_scope:
643         MD = MDNode::getMostGenericAliasScope(MD, IMD);
644         break;
645       case LLVMContext::MD_fpmath:
646         MD = MDNode::getMostGenericFPMath(MD, IMD);
647         break;
648       case LLVMContext::MD_noalias:
649       case LLVMContext::MD_nontemporal:
650       case LLVMContext::MD_invariant_load:
651         MD = MDNode::intersect(MD, IMD);
652         break;
653       case LLVMContext::MD_access_group:
654         MD = intersectAccessGroups(Inst, IJ);
655         break;
656       default:
657         llvm_unreachable("unhandled metadata");
658       }
659     }
660 
661     Inst->setMetadata(Kind, MD);
662   }
663 
664   return Inst;
665 }
666 
667 Constant *
668 llvm::createBitMaskForGaps(IRBuilderBase &Builder, unsigned VF,
669                            const InterleaveGroup<Instruction> &Group) {
670   // All 1's means mask is not needed.
671   if (Group.getNumMembers() == Group.getFactor())
672     return nullptr;
673 
674   // TODO: support reversed access.
675   assert(!Group.isReverse() && "Reversed group not supported.");
676 
677   SmallVector<Constant *, 16> Mask;
678   for (unsigned i = 0; i < VF; i++)
679     for (unsigned j = 0; j < Group.getFactor(); ++j) {
680       unsigned HasMember = Group.getMember(j) ? 1 : 0;
681       Mask.push_back(Builder.getInt1(HasMember));
682     }
683 
684   return ConstantVector::get(Mask);
685 }
686 
687 Constant *llvm::createReplicatedMask(IRBuilderBase &Builder,
688                                      unsigned ReplicationFactor, unsigned VF) {
689   SmallVector<Constant *, 16> MaskVec;
690   for (unsigned i = 0; i < VF; i++)
691     for (unsigned j = 0; j < ReplicationFactor; j++)
692       MaskVec.push_back(Builder.getInt32(i));
693 
694   return ConstantVector::get(MaskVec);
695 }
696 
697 Constant *llvm::createInterleaveMask(IRBuilderBase &Builder, unsigned VF,
698                                      unsigned NumVecs) {
699   SmallVector<Constant *, 16> Mask;
700   for (unsigned i = 0; i < VF; i++)
701     for (unsigned j = 0; j < NumVecs; j++)
702       Mask.push_back(Builder.getInt32(j * VF + i));
703 
704   return ConstantVector::get(Mask);
705 }
706 
707 Constant *llvm::createStrideMask(IRBuilderBase &Builder, unsigned Start,
708                                  unsigned Stride, unsigned VF) {
709   SmallVector<Constant *, 16> Mask;
710   for (unsigned i = 0; i < VF; i++)
711     Mask.push_back(Builder.getInt32(Start + i * Stride));
712 
713   return ConstantVector::get(Mask);
714 }
715 
716 Constant *llvm::createSequentialMask(IRBuilderBase &Builder, unsigned Start,
717                                      unsigned NumInts, unsigned NumUndefs) {
718   SmallVector<Constant *, 16> Mask;
719   for (unsigned i = 0; i < NumInts; i++)
720     Mask.push_back(Builder.getInt32(Start + i));
721 
722   Constant *Undef = UndefValue::get(Builder.getInt32Ty());
723   for (unsigned i = 0; i < NumUndefs; i++)
724     Mask.push_back(Undef);
725 
726   return ConstantVector::get(Mask);
727 }
728 
729 /// A helper function for concatenating vectors. This function concatenates two
730 /// vectors having the same element type. If the second vector has fewer
731 /// elements than the first, it is padded with undefs.
732 static Value *concatenateTwoVectors(IRBuilderBase &Builder, Value *V1,
733                                     Value *V2) {
734   VectorType *VecTy1 = dyn_cast<VectorType>(V1->getType());
735   VectorType *VecTy2 = dyn_cast<VectorType>(V2->getType());
736   assert(VecTy1 && VecTy2 &&
737          VecTy1->getScalarType() == VecTy2->getScalarType() &&
738          "Expect two vectors with the same element type");
739 
740   unsigned NumElts1 = VecTy1->getNumElements();
741   unsigned NumElts2 = VecTy2->getNumElements();
742   assert(NumElts1 >= NumElts2 && "Unexpect the first vector has less elements");
743 
744   if (NumElts1 > NumElts2) {
745     // Extend with UNDEFs.
746     Constant *ExtMask =
747         createSequentialMask(Builder, 0, NumElts2, NumElts1 - NumElts2);
748     V2 = Builder.CreateShuffleVector(V2, UndefValue::get(VecTy2), ExtMask);
749   }
750 
751   Constant *Mask = createSequentialMask(Builder, 0, NumElts1 + NumElts2, 0);
752   return Builder.CreateShuffleVector(V1, V2, Mask);
753 }
754 
755 Value *llvm::concatenateVectors(IRBuilderBase &Builder,
756                                 ArrayRef<Value *> Vecs) {
757   unsigned NumVecs = Vecs.size();
758   assert(NumVecs > 1 && "Should be at least two vectors");
759 
760   SmallVector<Value *, 8> ResList;
761   ResList.append(Vecs.begin(), Vecs.end());
762   do {
763     SmallVector<Value *, 8> TmpList;
764     for (unsigned i = 0; i < NumVecs - 1; i += 2) {
765       Value *V0 = ResList[i], *V1 = ResList[i + 1];
766       assert((V0->getType() == V1->getType() || i == NumVecs - 2) &&
767              "Only the last vector may have a different type");
768 
769       TmpList.push_back(concatenateTwoVectors(Builder, V0, V1));
770     }
771 
772     // Push the last vector if the total number of vectors is odd.
773     if (NumVecs % 2 != 0)
774       TmpList.push_back(ResList[NumVecs - 1]);
775 
776     ResList = TmpList;
777     NumVecs = ResList.size();
778   } while (NumVecs > 1);
779 
780   return ResList[0];
781 }
782 
783 bool llvm::maskIsAllZeroOrUndef(Value *Mask) {
784   auto *ConstMask = dyn_cast<Constant>(Mask);
785   if (!ConstMask)
786     return false;
787   if (ConstMask->isNullValue() || isa<UndefValue>(ConstMask))
788     return true;
789   for (unsigned I = 0, E = ConstMask->getType()->getVectorNumElements(); I != E;
790        ++I) {
791     if (auto *MaskElt = ConstMask->getAggregateElement(I))
792       if (MaskElt->isNullValue() || isa<UndefValue>(MaskElt))
793         continue;
794     return false;
795   }
796   return true;
797 }
798 
799 
800 bool llvm::maskIsAllOneOrUndef(Value *Mask) {
801   auto *ConstMask = dyn_cast<Constant>(Mask);
802   if (!ConstMask)
803     return false;
804   if (ConstMask->isAllOnesValue() || isa<UndefValue>(ConstMask))
805     return true;
806   for (unsigned I = 0, E = ConstMask->getType()->getVectorNumElements(); I != E;
807        ++I) {
808     if (auto *MaskElt = ConstMask->getAggregateElement(I))
809       if (MaskElt->isAllOnesValue() || isa<UndefValue>(MaskElt))
810         continue;
811     return false;
812   }
813   return true;
814 }
815 
816 /// TODO: This is a lot like known bits, but for
817 /// vectors.  Is there something we can common this with?
818 APInt llvm::possiblyDemandedEltsInMask(Value *Mask) {
819 
820   const unsigned VWidth = cast<VectorType>(Mask->getType())->getNumElements();
821   APInt DemandedElts = APInt::getAllOnesValue(VWidth);
822   if (auto *CV = dyn_cast<ConstantVector>(Mask))
823     for (unsigned i = 0; i < VWidth; i++)
824       if (CV->getAggregateElement(i)->isNullValue())
825         DemandedElts.clearBit(i);
826   return DemandedElts;
827 }
828 
829 bool InterleavedAccessInfo::isStrided(int Stride) {
830   unsigned Factor = std::abs(Stride);
831   return Factor >= 2 && Factor <= MaxInterleaveGroupFactor;
832 }
833 
834 void InterleavedAccessInfo::collectConstStrideAccesses(
835     MapVector<Instruction *, StrideDescriptor> &AccessStrideInfo,
836     const ValueToValueMap &Strides) {
837   auto &DL = TheLoop->getHeader()->getModule()->getDataLayout();
838 
839   // Since it's desired that the load/store instructions be maintained in
840   // "program order" for the interleaved access analysis, we have to visit the
841   // blocks in the loop in reverse postorder (i.e., in a topological order).
842   // Such an ordering will ensure that any load/store that may be executed
843   // before a second load/store will precede the second load/store in
844   // AccessStrideInfo.
845   LoopBlocksDFS DFS(TheLoop);
846   DFS.perform(LI);
847   for (BasicBlock *BB : make_range(DFS.beginRPO(), DFS.endRPO()))
848     for (auto &I : *BB) {
849       auto *LI = dyn_cast<LoadInst>(&I);
850       auto *SI = dyn_cast<StoreInst>(&I);
851       if (!LI && !SI)
852         continue;
853 
854       Value *Ptr = getLoadStorePointerOperand(&I);
855       // We don't check wrapping here because we don't know yet if Ptr will be
856       // part of a full group or a group with gaps. Checking wrapping for all
857       // pointers (even those that end up in groups with no gaps) will be overly
858       // conservative. For full groups, wrapping should be ok since if we would
859       // wrap around the address space we would do a memory access at nullptr
860       // even without the transformation. The wrapping checks are therefore
861       // deferred until after we've formed the interleaved groups.
862       int64_t Stride = getPtrStride(PSE, Ptr, TheLoop, Strides,
863                                     /*Assume=*/true, /*ShouldCheckWrap=*/false);
864 
865       const SCEV *Scev = replaceSymbolicStrideSCEV(PSE, Strides, Ptr);
866       PointerType *PtrTy = cast<PointerType>(Ptr->getType());
867       uint64_t Size = DL.getTypeAllocSize(PtrTy->getElementType());
868 
869       // An alignment of 0 means target ABI alignment.
870       MaybeAlign Alignment = MaybeAlign(getLoadStoreAlignment(&I));
871       if (!Alignment)
872         Alignment = Align(DL.getABITypeAlignment(PtrTy->getElementType()));
873 
874       AccessStrideInfo[&I] = StrideDescriptor(Stride, Scev, Size, *Alignment);
875     }
876 }
877 
878 // Analyze interleaved accesses and collect them into interleaved load and
879 // store groups.
880 //
881 // When generating code for an interleaved load group, we effectively hoist all
882 // loads in the group to the location of the first load in program order. When
883 // generating code for an interleaved store group, we sink all stores to the
884 // location of the last store. This code motion can change the order of load
885 // and store instructions and may break dependences.
886 //
887 // The code generation strategy mentioned above ensures that we won't violate
888 // any write-after-read (WAR) dependences.
889 //
890 // E.g., for the WAR dependence:  a = A[i];      // (1)
891 //                                A[i] = b;      // (2)
892 //
893 // The store group of (2) is always inserted at or below (2), and the load
894 // group of (1) is always inserted at or above (1). Thus, the instructions will
895 // never be reordered. All other dependences are checked to ensure the
896 // correctness of the instruction reordering.
897 //
898 // The algorithm visits all memory accesses in the loop in bottom-up program
899 // order. Program order is established by traversing the blocks in the loop in
900 // reverse postorder when collecting the accesses.
901 //
902 // We visit the memory accesses in bottom-up order because it can simplify the
903 // construction of store groups in the presence of write-after-write (WAW)
904 // dependences.
905 //
906 // E.g., for the WAW dependence:  A[i] = a;      // (1)
907 //                                A[i] = b;      // (2)
908 //                                A[i + 1] = c;  // (3)
909 //
910 // We will first create a store group with (3) and (2). (1) can't be added to
911 // this group because it and (2) are dependent. However, (1) can be grouped
912 // with other accesses that may precede it in program order. Note that a
913 // bottom-up order does not imply that WAW dependences should not be checked.
914 void InterleavedAccessInfo::analyzeInterleaving(
915                                  bool EnablePredicatedInterleavedMemAccesses) {
916   LLVM_DEBUG(dbgs() << "LV: Analyzing interleaved accesses...\n");
917   const ValueToValueMap &Strides = LAI->getSymbolicStrides();
918 
919   // Holds all accesses with a constant stride.
920   MapVector<Instruction *, StrideDescriptor> AccessStrideInfo;
921   collectConstStrideAccesses(AccessStrideInfo, Strides);
922 
923   if (AccessStrideInfo.empty())
924     return;
925 
926   // Collect the dependences in the loop.
927   collectDependences();
928 
929   // Holds all interleaved store groups temporarily.
930   SmallSetVector<InterleaveGroup<Instruction> *, 4> StoreGroups;
931   // Holds all interleaved load groups temporarily.
932   SmallSetVector<InterleaveGroup<Instruction> *, 4> LoadGroups;
933 
934   // Search in bottom-up program order for pairs of accesses (A and B) that can
935   // form interleaved load or store groups. In the algorithm below, access A
936   // precedes access B in program order. We initialize a group for B in the
937   // outer loop of the algorithm, and then in the inner loop, we attempt to
938   // insert each A into B's group if:
939   //
940   //  1. A and B have the same stride,
941   //  2. A and B have the same memory object size, and
942   //  3. A belongs in B's group according to its distance from B.
943   //
944   // Special care is taken to ensure group formation will not break any
945   // dependences.
946   for (auto BI = AccessStrideInfo.rbegin(), E = AccessStrideInfo.rend();
947        BI != E; ++BI) {
948     Instruction *B = BI->first;
949     StrideDescriptor DesB = BI->second;
950 
951     // Initialize a group for B if it has an allowable stride. Even if we don't
952     // create a group for B, we continue with the bottom-up algorithm to ensure
953     // we don't break any of B's dependences.
954     InterleaveGroup<Instruction> *Group = nullptr;
955     if (isStrided(DesB.Stride) &&
956         (!isPredicated(B->getParent()) || EnablePredicatedInterleavedMemAccesses)) {
957       Group = getInterleaveGroup(B);
958       if (!Group) {
959         LLVM_DEBUG(dbgs() << "LV: Creating an interleave group with:" << *B
960                           << '\n');
961         Group = createInterleaveGroup(B, DesB.Stride, DesB.Alignment);
962       }
963       if (B->mayWriteToMemory())
964         StoreGroups.insert(Group);
965       else
966         LoadGroups.insert(Group);
967     }
968 
969     for (auto AI = std::next(BI); AI != E; ++AI) {
970       Instruction *A = AI->first;
971       StrideDescriptor DesA = AI->second;
972 
973       // Our code motion strategy implies that we can't have dependences
974       // between accesses in an interleaved group and other accesses located
975       // between the first and last member of the group. Note that this also
976       // means that a group can't have more than one member at a given offset.
977       // The accesses in a group can have dependences with other accesses, but
978       // we must ensure we don't extend the boundaries of the group such that
979       // we encompass those dependent accesses.
980       //
981       // For example, assume we have the sequence of accesses shown below in a
982       // stride-2 loop:
983       //
984       //  (1, 2) is a group | A[i]   = a;  // (1)
985       //                    | A[i-1] = b;  // (2) |
986       //                      A[i-3] = c;  // (3)
987       //                      A[i]   = d;  // (4) | (2, 4) is not a group
988       //
989       // Because accesses (2) and (3) are dependent, we can group (2) with (1)
990       // but not with (4). If we did, the dependent access (3) would be within
991       // the boundaries of the (2, 4) group.
992       if (!canReorderMemAccessesForInterleavedGroups(&*AI, &*BI)) {
993         // If a dependence exists and A is already in a group, we know that A
994         // must be a store since A precedes B and WAR dependences are allowed.
995         // Thus, A would be sunk below B. We release A's group to prevent this
996         // illegal code motion. A will then be free to form another group with
997         // instructions that precede it.
998         if (isInterleaved(A)) {
999           InterleaveGroup<Instruction> *StoreGroup = getInterleaveGroup(A);
1000 
1001           LLVM_DEBUG(dbgs() << "LV: Invalidated store group due to "
1002                                "dependence between " << *A << " and "<< *B << '\n');
1003 
1004           StoreGroups.remove(StoreGroup);
1005           releaseGroup(StoreGroup);
1006         }
1007 
1008         // If a dependence exists and A is not already in a group (or it was
1009         // and we just released it), B might be hoisted above A (if B is a
1010         // load) or another store might be sunk below A (if B is a store). In
1011         // either case, we can't add additional instructions to B's group. B
1012         // will only form a group with instructions that it precedes.
1013         break;
1014       }
1015 
1016       // At this point, we've checked for illegal code motion. If either A or B
1017       // isn't strided, there's nothing left to do.
1018       if (!isStrided(DesA.Stride) || !isStrided(DesB.Stride))
1019         continue;
1020 
1021       // Ignore A if it's already in a group or isn't the same kind of memory
1022       // operation as B.
1023       // Note that mayReadFromMemory() isn't mutually exclusive to
1024       // mayWriteToMemory in the case of atomic loads. We shouldn't see those
1025       // here, canVectorizeMemory() should have returned false - except for the
1026       // case we asked for optimization remarks.
1027       if (isInterleaved(A) ||
1028           (A->mayReadFromMemory() != B->mayReadFromMemory()) ||
1029           (A->mayWriteToMemory() != B->mayWriteToMemory()))
1030         continue;
1031 
1032       // Check rules 1 and 2. Ignore A if its stride or size is different from
1033       // that of B.
1034       if (DesA.Stride != DesB.Stride || DesA.Size != DesB.Size)
1035         continue;
1036 
1037       // Ignore A if the memory object of A and B don't belong to the same
1038       // address space
1039       if (getLoadStoreAddressSpace(A) != getLoadStoreAddressSpace(B))
1040         continue;
1041 
1042       // Calculate the distance from A to B.
1043       const SCEVConstant *DistToB = dyn_cast<SCEVConstant>(
1044           PSE.getSE()->getMinusSCEV(DesA.Scev, DesB.Scev));
1045       if (!DistToB)
1046         continue;
1047       int64_t DistanceToB = DistToB->getAPInt().getSExtValue();
1048 
1049       // Check rule 3. Ignore A if its distance to B is not a multiple of the
1050       // size.
1051       if (DistanceToB % static_cast<int64_t>(DesB.Size))
1052         continue;
1053 
1054       // All members of a predicated interleave-group must have the same predicate,
1055       // and currently must reside in the same BB.
1056       BasicBlock *BlockA = A->getParent();
1057       BasicBlock *BlockB = B->getParent();
1058       if ((isPredicated(BlockA) || isPredicated(BlockB)) &&
1059           (!EnablePredicatedInterleavedMemAccesses || BlockA != BlockB))
1060         continue;
1061 
1062       // The index of A is the index of B plus A's distance to B in multiples
1063       // of the size.
1064       int IndexA =
1065           Group->getIndex(B) + DistanceToB / static_cast<int64_t>(DesB.Size);
1066 
1067       // Try to insert A into B's group.
1068       if (Group->insertMember(A, IndexA, DesA.Alignment)) {
1069         LLVM_DEBUG(dbgs() << "LV: Inserted:" << *A << '\n'
1070                           << "    into the interleave group with" << *B
1071                           << '\n');
1072         InterleaveGroupMap[A] = Group;
1073 
1074         // Set the first load in program order as the insert position.
1075         if (A->mayReadFromMemory())
1076           Group->setInsertPos(A);
1077       }
1078     } // Iteration over A accesses.
1079   }   // Iteration over B accesses.
1080 
1081   // Remove interleaved store groups with gaps.
1082   for (auto *Group : StoreGroups)
1083     if (Group->getNumMembers() != Group->getFactor()) {
1084       LLVM_DEBUG(
1085           dbgs() << "LV: Invalidate candidate interleaved store group due "
1086                     "to gaps.\n");
1087       releaseGroup(Group);
1088     }
1089   // Remove interleaved groups with gaps (currently only loads) whose memory
1090   // accesses may wrap around. We have to revisit the getPtrStride analysis,
1091   // this time with ShouldCheckWrap=true, since collectConstStrideAccesses does
1092   // not check wrapping (see documentation there).
1093   // FORNOW we use Assume=false;
1094   // TODO: Change to Assume=true but making sure we don't exceed the threshold
1095   // of runtime SCEV assumptions checks (thereby potentially failing to
1096   // vectorize altogether).
1097   // Additional optional optimizations:
1098   // TODO: If we are peeling the loop and we know that the first pointer doesn't
1099   // wrap then we can deduce that all pointers in the group don't wrap.
1100   // This means that we can forcefully peel the loop in order to only have to
1101   // check the first pointer for no-wrap. When we'll change to use Assume=true
1102   // we'll only need at most one runtime check per interleaved group.
1103   for (auto *Group : LoadGroups) {
1104     // Case 1: A full group. Can Skip the checks; For full groups, if the wide
1105     // load would wrap around the address space we would do a memory access at
1106     // nullptr even without the transformation.
1107     if (Group->getNumMembers() == Group->getFactor())
1108       continue;
1109 
1110     // Case 2: If first and last members of the group don't wrap this implies
1111     // that all the pointers in the group don't wrap.
1112     // So we check only group member 0 (which is always guaranteed to exist),
1113     // and group member Factor - 1; If the latter doesn't exist we rely on
1114     // peeling (if it is a non-reversed accsess -- see Case 3).
1115     Value *FirstMemberPtr = getLoadStorePointerOperand(Group->getMember(0));
1116     if (!getPtrStride(PSE, FirstMemberPtr, TheLoop, Strides, /*Assume=*/false,
1117                       /*ShouldCheckWrap=*/true)) {
1118       LLVM_DEBUG(
1119           dbgs() << "LV: Invalidate candidate interleaved group due to "
1120                     "first group member potentially pointer-wrapping.\n");
1121       releaseGroup(Group);
1122       continue;
1123     }
1124     Instruction *LastMember = Group->getMember(Group->getFactor() - 1);
1125     if (LastMember) {
1126       Value *LastMemberPtr = getLoadStorePointerOperand(LastMember);
1127       if (!getPtrStride(PSE, LastMemberPtr, TheLoop, Strides, /*Assume=*/false,
1128                         /*ShouldCheckWrap=*/true)) {
1129         LLVM_DEBUG(
1130             dbgs() << "LV: Invalidate candidate interleaved group due to "
1131                       "last group member potentially pointer-wrapping.\n");
1132         releaseGroup(Group);
1133       }
1134     } else {
1135       // Case 3: A non-reversed interleaved load group with gaps: We need
1136       // to execute at least one scalar epilogue iteration. This will ensure
1137       // we don't speculatively access memory out-of-bounds. We only need
1138       // to look for a member at index factor - 1, since every group must have
1139       // a member at index zero.
1140       if (Group->isReverse()) {
1141         LLVM_DEBUG(
1142             dbgs() << "LV: Invalidate candidate interleaved group due to "
1143                       "a reverse access with gaps.\n");
1144         releaseGroup(Group);
1145         continue;
1146       }
1147       LLVM_DEBUG(
1148           dbgs() << "LV: Interleaved group requires epilogue iteration.\n");
1149       RequiresScalarEpilogue = true;
1150     }
1151   }
1152 }
1153 
1154 void InterleavedAccessInfo::invalidateGroupsRequiringScalarEpilogue() {
1155   // If no group had triggered the requirement to create an epilogue loop,
1156   // there is nothing to do.
1157   if (!requiresScalarEpilogue())
1158     return;
1159 
1160   // Avoid releasing a Group twice.
1161   SmallPtrSet<InterleaveGroup<Instruction> *, 4> DelSet;
1162   for (auto &I : InterleaveGroupMap) {
1163     InterleaveGroup<Instruction> *Group = I.second;
1164     if (Group->requiresScalarEpilogue())
1165       DelSet.insert(Group);
1166   }
1167   for (auto *Ptr : DelSet) {
1168     LLVM_DEBUG(
1169         dbgs()
1170         << "LV: Invalidate candidate interleaved group due to gaps that "
1171            "require a scalar epilogue (not allowed under optsize) and cannot "
1172            "be masked (not enabled). \n");
1173     releaseGroup(Ptr);
1174   }
1175 
1176   RequiresScalarEpilogue = false;
1177 }
1178 
1179 template <typename InstT>
1180 void InterleaveGroup<InstT>::addMetadata(InstT *NewInst) const {
1181   llvm_unreachable("addMetadata can only be used for Instruction");
1182 }
1183 
1184 namespace llvm {
1185 template <>
1186 void InterleaveGroup<Instruction>::addMetadata(Instruction *NewInst) const {
1187   SmallVector<Value *, 4> VL;
1188   std::transform(Members.begin(), Members.end(), std::back_inserter(VL),
1189                  [](std::pair<int, Instruction *> p) { return p.second; });
1190   propagateMetadata(NewInst, VL);
1191 }
1192 }
1193 
1194 void VFABI::getVectorVariantNames(
1195     const CallInst &CI, SmallVectorImpl<std::string> &VariantMappings) {
1196   const StringRef S =
1197       CI.getAttribute(AttributeList::FunctionIndex, VFABI::MappingsAttrName)
1198           .getValueAsString();
1199   if (S.empty())
1200     return;
1201 
1202   SmallVector<StringRef, 8> ListAttr;
1203   S.split(ListAttr, ",");
1204 
1205   for (auto &S : SetVector<StringRef>(ListAttr.begin(), ListAttr.end())) {
1206 #ifndef NDEBUG
1207     LLVM_DEBUG(dbgs() << "VFABI: adding mapping '" << S << "'\n");
1208     Optional<VFInfo> Info = VFABI::tryDemangleForVFABI(S, *(CI.getModule()));
1209     assert(Info.hasValue() && "Invalid name for a VFABI variant.");
1210     assert(CI.getModule()->getFunction(Info.getValue().VectorName) &&
1211            "Vector function is missing.");
1212 #endif
1213     VariantMappings.push_back(std::string(S));
1214   }
1215 }
1216 
1217 bool VFShape::hasValidParameterList() const {
1218   for (unsigned Pos = 0, NumParams = Parameters.size(); Pos < NumParams;
1219        ++Pos) {
1220     assert(Parameters[Pos].ParamPos == Pos && "Broken parameter list.");
1221 
1222     switch (Parameters[Pos].ParamKind) {
1223     default: // Nothing to check.
1224       break;
1225     case VFParamKind::OMP_Linear:
1226     case VFParamKind::OMP_LinearRef:
1227     case VFParamKind::OMP_LinearVal:
1228     case VFParamKind::OMP_LinearUVal:
1229       // Compile time linear steps must be non-zero.
1230       if (Parameters[Pos].LinearStepOrPos == 0)
1231         return false;
1232       break;
1233     case VFParamKind::OMP_LinearPos:
1234     case VFParamKind::OMP_LinearRefPos:
1235     case VFParamKind::OMP_LinearValPos:
1236     case VFParamKind::OMP_LinearUValPos:
1237       // The runtime linear step must be referring to some other
1238       // parameters in the signature.
1239       if (Parameters[Pos].LinearStepOrPos >= int(NumParams))
1240         return false;
1241       // The linear step parameter must be marked as uniform.
1242       if (Parameters[Parameters[Pos].LinearStepOrPos].ParamKind !=
1243           VFParamKind::OMP_Uniform)
1244         return false;
1245       // The linear step parameter can't point at itself.
1246       if (Parameters[Pos].LinearStepOrPos == int(Pos))
1247         return false;
1248       break;
1249     case VFParamKind::GlobalPredicate:
1250       // The global predicate must be the unique. Can be placed anywhere in the
1251       // signature.
1252       for (unsigned NextPos = Pos + 1; NextPos < NumParams; ++NextPos)
1253         if (Parameters[NextPos].ParamKind == VFParamKind::GlobalPredicate)
1254           return false;
1255       break;
1256     }
1257   }
1258   return true;
1259 }
1260