1 //===----------- VectorUtils.cpp - Vectorizer utility functions -----------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file defines vectorizer utilities.
10 //
11 //===----------------------------------------------------------------------===//
12
13 #include "llvm/Analysis/VectorUtils.h"
14 #include "llvm/ADT/EquivalenceClasses.h"
15 #include "llvm/Analysis/DemandedBits.h"
16 #include "llvm/Analysis/LoopInfo.h"
17 #include "llvm/Analysis/LoopIterator.h"
18 #include "llvm/Analysis/ScalarEvolution.h"
19 #include "llvm/Analysis/ScalarEvolutionExpressions.h"
20 #include "llvm/Analysis/TargetTransformInfo.h"
21 #include "llvm/Analysis/ValueTracking.h"
22 #include "llvm/IR/Constants.h"
23 #include "llvm/IR/GetElementPtrTypeIterator.h"
24 #include "llvm/IR/IRBuilder.h"
25 #include "llvm/IR/PatternMatch.h"
26 #include "llvm/IR/Value.h"
27 #include "llvm/Support/CommandLine.h"
28
29 #define DEBUG_TYPE "vectorutils"
30
31 using namespace llvm;
32 using namespace llvm::PatternMatch;
33
34 /// Maximum factor for an interleaved memory access.
35 static cl::opt<unsigned> MaxInterleaveGroupFactor(
36 "max-interleave-group-factor", cl::Hidden,
37 cl::desc("Maximum factor for an interleaved access group (default = 8)"),
38 cl::init(8));
39
40 /// Return true if all of the intrinsic's arguments and return type are scalars
41 /// for the scalar form of the intrinsic, and vectors for the vector form of the
42 /// intrinsic (except operands that are marked as always being scalar by
43 /// isVectorIntrinsicWithScalarOpAtArg).
isTriviallyVectorizable(Intrinsic::ID ID)44 bool llvm::isTriviallyVectorizable(Intrinsic::ID ID) {
45 switch (ID) {
46 case Intrinsic::abs: // Begin integer bit-manipulation.
47 case Intrinsic::bswap:
48 case Intrinsic::bitreverse:
49 case Intrinsic::ctpop:
50 case Intrinsic::ctlz:
51 case Intrinsic::cttz:
52 case Intrinsic::fshl:
53 case Intrinsic::fshr:
54 case Intrinsic::smax:
55 case Intrinsic::smin:
56 case Intrinsic::umax:
57 case Intrinsic::umin:
58 case Intrinsic::sadd_sat:
59 case Intrinsic::ssub_sat:
60 case Intrinsic::uadd_sat:
61 case Intrinsic::usub_sat:
62 case Intrinsic::smul_fix:
63 case Intrinsic::smul_fix_sat:
64 case Intrinsic::umul_fix:
65 case Intrinsic::umul_fix_sat:
66 case Intrinsic::sqrt: // Begin floating-point.
67 case Intrinsic::sin:
68 case Intrinsic::cos:
69 case Intrinsic::exp:
70 case Intrinsic::exp2:
71 case Intrinsic::log:
72 case Intrinsic::log10:
73 case Intrinsic::log2:
74 case Intrinsic::fabs:
75 case Intrinsic::minnum:
76 case Intrinsic::maxnum:
77 case Intrinsic::minimum:
78 case Intrinsic::maximum:
79 case Intrinsic::copysign:
80 case Intrinsic::floor:
81 case Intrinsic::ceil:
82 case Intrinsic::trunc:
83 case Intrinsic::rint:
84 case Intrinsic::nearbyint:
85 case Intrinsic::round:
86 case Intrinsic::roundeven:
87 case Intrinsic::pow:
88 case Intrinsic::fma:
89 case Intrinsic::fmuladd:
90 case Intrinsic::powi:
91 case Intrinsic::canonicalize:
92 case Intrinsic::fptosi_sat:
93 case Intrinsic::fptoui_sat:
94 return true;
95 default:
96 return false;
97 }
98 }
99
100 /// Identifies if the vector form of the intrinsic has a scalar operand.
isVectorIntrinsicWithScalarOpAtArg(Intrinsic::ID ID,unsigned ScalarOpdIdx)101 bool llvm::isVectorIntrinsicWithScalarOpAtArg(Intrinsic::ID ID,
102 unsigned ScalarOpdIdx) {
103 switch (ID) {
104 case Intrinsic::abs:
105 case Intrinsic::ctlz:
106 case Intrinsic::cttz:
107 case Intrinsic::powi:
108 return (ScalarOpdIdx == 1);
109 case Intrinsic::smul_fix:
110 case Intrinsic::smul_fix_sat:
111 case Intrinsic::umul_fix:
112 case Intrinsic::umul_fix_sat:
113 return (ScalarOpdIdx == 2);
114 default:
115 return false;
116 }
117 }
118
isVectorIntrinsicWithOverloadTypeAtArg(Intrinsic::ID ID,unsigned OpdIdx)119 bool llvm::isVectorIntrinsicWithOverloadTypeAtArg(Intrinsic::ID ID,
120 unsigned OpdIdx) {
121 switch (ID) {
122 case Intrinsic::fptosi_sat:
123 case Intrinsic::fptoui_sat:
124 return OpdIdx == 0;
125 case Intrinsic::powi:
126 return OpdIdx == 1;
127 default:
128 return false;
129 }
130 }
131
132 /// Returns intrinsic ID for call.
133 /// For the input call instruction it finds mapping intrinsic and returns
134 /// its ID, in case it does not found it return not_intrinsic.
getVectorIntrinsicIDForCall(const CallInst * CI,const TargetLibraryInfo * TLI)135 Intrinsic::ID llvm::getVectorIntrinsicIDForCall(const CallInst *CI,
136 const TargetLibraryInfo *TLI) {
137 Intrinsic::ID ID = getIntrinsicForCallSite(*CI, TLI);
138 if (ID == Intrinsic::not_intrinsic)
139 return Intrinsic::not_intrinsic;
140
141 if (isTriviallyVectorizable(ID) || ID == Intrinsic::lifetime_start ||
142 ID == Intrinsic::lifetime_end || ID == Intrinsic::assume ||
143 ID == Intrinsic::experimental_noalias_scope_decl ||
144 ID == Intrinsic::sideeffect || ID == Intrinsic::pseudoprobe)
145 return ID;
146 return Intrinsic::not_intrinsic;
147 }
148
149 /// Find the operand of the GEP that should be checked for consecutive
150 /// stores. This ignores trailing indices that have no effect on the final
151 /// pointer.
getGEPInductionOperand(const GetElementPtrInst * Gep)152 unsigned llvm::getGEPInductionOperand(const GetElementPtrInst *Gep) {
153 const DataLayout &DL = Gep->getModule()->getDataLayout();
154 unsigned LastOperand = Gep->getNumOperands() - 1;
155 TypeSize GEPAllocSize = DL.getTypeAllocSize(Gep->getResultElementType());
156
157 // Walk backwards and try to peel off zeros.
158 while (LastOperand > 1 && match(Gep->getOperand(LastOperand), m_Zero())) {
159 // Find the type we're currently indexing into.
160 gep_type_iterator GEPTI = gep_type_begin(Gep);
161 std::advance(GEPTI, LastOperand - 2);
162
163 // If it's a type with the same allocation size as the result of the GEP we
164 // can peel off the zero index.
165 if (DL.getTypeAllocSize(GEPTI.getIndexedType()) != GEPAllocSize)
166 break;
167 --LastOperand;
168 }
169
170 return LastOperand;
171 }
172
173 /// If the argument is a GEP, then returns the operand identified by
174 /// getGEPInductionOperand. However, if there is some other non-loop-invariant
175 /// operand, it returns that instead.
stripGetElementPtr(Value * Ptr,ScalarEvolution * SE,Loop * Lp)176 Value *llvm::stripGetElementPtr(Value *Ptr, ScalarEvolution *SE, Loop *Lp) {
177 GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(Ptr);
178 if (!GEP)
179 return Ptr;
180
181 unsigned InductionOperand = getGEPInductionOperand(GEP);
182
183 // Check that all of the gep indices are uniform except for our induction
184 // operand.
185 for (unsigned i = 0, e = GEP->getNumOperands(); i != e; ++i)
186 if (i != InductionOperand &&
187 !SE->isLoopInvariant(SE->getSCEV(GEP->getOperand(i)), Lp))
188 return Ptr;
189 return GEP->getOperand(InductionOperand);
190 }
191
192 /// If a value has only one user that is a CastInst, return it.
getUniqueCastUse(Value * Ptr,Loop * Lp,Type * Ty)193 Value *llvm::getUniqueCastUse(Value *Ptr, Loop *Lp, Type *Ty) {
194 Value *UniqueCast = nullptr;
195 for (User *U : Ptr->users()) {
196 CastInst *CI = dyn_cast<CastInst>(U);
197 if (CI && CI->getType() == Ty) {
198 if (!UniqueCast)
199 UniqueCast = CI;
200 else
201 return nullptr;
202 }
203 }
204 return UniqueCast;
205 }
206
207 /// Get the stride of a pointer access in a loop. Looks for symbolic
208 /// strides "a[i*stride]". Returns the symbolic stride, or null otherwise.
getStrideFromPointer(Value * Ptr,ScalarEvolution * SE,Loop * Lp)209 Value *llvm::getStrideFromPointer(Value *Ptr, ScalarEvolution *SE, Loop *Lp) {
210 auto *PtrTy = dyn_cast<PointerType>(Ptr->getType());
211 if (!PtrTy || PtrTy->isAggregateType())
212 return nullptr;
213
214 // Try to remove a gep instruction to make the pointer (actually index at this
215 // point) easier analyzable. If OrigPtr is equal to Ptr we are analyzing the
216 // pointer, otherwise, we are analyzing the index.
217 Value *OrigPtr = Ptr;
218
219 // The size of the pointer access.
220 int64_t PtrAccessSize = 1;
221
222 Ptr = stripGetElementPtr(Ptr, SE, Lp);
223 const SCEV *V = SE->getSCEV(Ptr);
224
225 if (Ptr != OrigPtr)
226 // Strip off casts.
227 while (const SCEVIntegralCastExpr *C = dyn_cast<SCEVIntegralCastExpr>(V))
228 V = C->getOperand();
229
230 const SCEVAddRecExpr *S = dyn_cast<SCEVAddRecExpr>(V);
231 if (!S)
232 return nullptr;
233
234 V = S->getStepRecurrence(*SE);
235 if (!V)
236 return nullptr;
237
238 // Strip off the size of access multiplication if we are still analyzing the
239 // pointer.
240 if (OrigPtr == Ptr) {
241 if (const SCEVMulExpr *M = dyn_cast<SCEVMulExpr>(V)) {
242 if (M->getOperand(0)->getSCEVType() != scConstant)
243 return nullptr;
244
245 const APInt &APStepVal = cast<SCEVConstant>(M->getOperand(0))->getAPInt();
246
247 // Huge step value - give up.
248 if (APStepVal.getBitWidth() > 64)
249 return nullptr;
250
251 int64_t StepVal = APStepVal.getSExtValue();
252 if (PtrAccessSize != StepVal)
253 return nullptr;
254 V = M->getOperand(1);
255 }
256 }
257
258 // Strip off casts.
259 Type *StripedOffRecurrenceCast = nullptr;
260 if (const SCEVIntegralCastExpr *C = dyn_cast<SCEVIntegralCastExpr>(V)) {
261 StripedOffRecurrenceCast = C->getType();
262 V = C->getOperand();
263 }
264
265 // Look for the loop invariant symbolic value.
266 const SCEVUnknown *U = dyn_cast<SCEVUnknown>(V);
267 if (!U)
268 return nullptr;
269
270 Value *Stride = U->getValue();
271 if (!Lp->isLoopInvariant(Stride))
272 return nullptr;
273
274 // If we have stripped off the recurrence cast we have to make sure that we
275 // return the value that is used in this loop so that we can replace it later.
276 if (StripedOffRecurrenceCast)
277 Stride = getUniqueCastUse(Stride, Lp, StripedOffRecurrenceCast);
278
279 return Stride;
280 }
281
282 /// Given a vector and an element number, see if the scalar value is
283 /// already around as a register, for example if it were inserted then extracted
284 /// from the vector.
findScalarElement(Value * V,unsigned EltNo)285 Value *llvm::findScalarElement(Value *V, unsigned EltNo) {
286 assert(V->getType()->isVectorTy() && "Not looking at a vector?");
287 VectorType *VTy = cast<VectorType>(V->getType());
288 // For fixed-length vector, return undef for out of range access.
289 if (auto *FVTy = dyn_cast<FixedVectorType>(VTy)) {
290 unsigned Width = FVTy->getNumElements();
291 if (EltNo >= Width)
292 return UndefValue::get(FVTy->getElementType());
293 }
294
295 if (Constant *C = dyn_cast<Constant>(V))
296 return C->getAggregateElement(EltNo);
297
298 if (InsertElementInst *III = dyn_cast<InsertElementInst>(V)) {
299 // If this is an insert to a variable element, we don't know what it is.
300 if (!isa<ConstantInt>(III->getOperand(2)))
301 return nullptr;
302 unsigned IIElt = cast<ConstantInt>(III->getOperand(2))->getZExtValue();
303
304 // If this is an insert to the element we are looking for, return the
305 // inserted value.
306 if (EltNo == IIElt)
307 return III->getOperand(1);
308
309 // Guard against infinite loop on malformed, unreachable IR.
310 if (III == III->getOperand(0))
311 return nullptr;
312
313 // Otherwise, the insertelement doesn't modify the value, recurse on its
314 // vector input.
315 return findScalarElement(III->getOperand(0), EltNo);
316 }
317
318 ShuffleVectorInst *SVI = dyn_cast<ShuffleVectorInst>(V);
319 // Restrict the following transformation to fixed-length vector.
320 if (SVI && isa<FixedVectorType>(SVI->getType())) {
321 unsigned LHSWidth =
322 cast<FixedVectorType>(SVI->getOperand(0)->getType())->getNumElements();
323 int InEl = SVI->getMaskValue(EltNo);
324 if (InEl < 0)
325 return UndefValue::get(VTy->getElementType());
326 if (InEl < (int)LHSWidth)
327 return findScalarElement(SVI->getOperand(0), InEl);
328 return findScalarElement(SVI->getOperand(1), InEl - LHSWidth);
329 }
330
331 // Extract a value from a vector add operation with a constant zero.
332 // TODO: Use getBinOpIdentity() to generalize this.
333 Value *Val; Constant *C;
334 if (match(V, m_Add(m_Value(Val), m_Constant(C))))
335 if (Constant *Elt = C->getAggregateElement(EltNo))
336 if (Elt->isNullValue())
337 return findScalarElement(Val, EltNo);
338
339 // If the vector is a splat then we can trivially find the scalar element.
340 if (isa<ScalableVectorType>(VTy))
341 if (Value *Splat = getSplatValue(V))
342 if (EltNo < VTy->getElementCount().getKnownMinValue())
343 return Splat;
344
345 // Otherwise, we don't know.
346 return nullptr;
347 }
348
getSplatIndex(ArrayRef<int> Mask)349 int llvm::getSplatIndex(ArrayRef<int> Mask) {
350 int SplatIndex = -1;
351 for (int M : Mask) {
352 // Ignore invalid (undefined) mask elements.
353 if (M < 0)
354 continue;
355
356 // There can be only 1 non-negative mask element value if this is a splat.
357 if (SplatIndex != -1 && SplatIndex != M)
358 return -1;
359
360 // Initialize the splat index to the 1st non-negative mask element.
361 SplatIndex = M;
362 }
363 assert((SplatIndex == -1 || SplatIndex >= 0) && "Negative index?");
364 return SplatIndex;
365 }
366
367 /// Get splat value if the input is a splat vector or return nullptr.
368 /// This function is not fully general. It checks only 2 cases:
369 /// the input value is (1) a splat constant vector or (2) a sequence
370 /// of instructions that broadcasts a scalar at element 0.
getSplatValue(const Value * V)371 Value *llvm::getSplatValue(const Value *V) {
372 if (isa<VectorType>(V->getType()))
373 if (auto *C = dyn_cast<Constant>(V))
374 return C->getSplatValue();
375
376 // shuf (inselt ?, Splat, 0), ?, <0, undef, 0, ...>
377 Value *Splat;
378 if (match(V,
379 m_Shuffle(m_InsertElt(m_Value(), m_Value(Splat), m_ZeroInt()),
380 m_Value(), m_ZeroMask())))
381 return Splat;
382
383 return nullptr;
384 }
385
isSplatValue(const Value * V,int Index,unsigned Depth)386 bool llvm::isSplatValue(const Value *V, int Index, unsigned Depth) {
387 assert(Depth <= MaxAnalysisRecursionDepth && "Limit Search Depth");
388
389 if (isa<VectorType>(V->getType())) {
390 if (isa<UndefValue>(V))
391 return true;
392 // FIXME: We can allow undefs, but if Index was specified, we may want to
393 // check that the constant is defined at that index.
394 if (auto *C = dyn_cast<Constant>(V))
395 return C->getSplatValue() != nullptr;
396 }
397
398 if (auto *Shuf = dyn_cast<ShuffleVectorInst>(V)) {
399 // FIXME: We can safely allow undefs here. If Index was specified, we will
400 // check that the mask elt is defined at the required index.
401 if (!is_splat(Shuf->getShuffleMask()))
402 return false;
403
404 // Match any index.
405 if (Index == -1)
406 return true;
407
408 // Match a specific element. The mask should be defined at and match the
409 // specified index.
410 return Shuf->getMaskValue(Index) == Index;
411 }
412
413 // The remaining tests are all recursive, so bail out if we hit the limit.
414 if (Depth++ == MaxAnalysisRecursionDepth)
415 return false;
416
417 // If both operands of a binop are splats, the result is a splat.
418 Value *X, *Y, *Z;
419 if (match(V, m_BinOp(m_Value(X), m_Value(Y))))
420 return isSplatValue(X, Index, Depth) && isSplatValue(Y, Index, Depth);
421
422 // If all operands of a select are splats, the result is a splat.
423 if (match(V, m_Select(m_Value(X), m_Value(Y), m_Value(Z))))
424 return isSplatValue(X, Index, Depth) && isSplatValue(Y, Index, Depth) &&
425 isSplatValue(Z, Index, Depth);
426
427 // TODO: Add support for unary ops (fneg), casts, intrinsics (overflow ops).
428
429 return false;
430 }
431
narrowShuffleMaskElts(int Scale,ArrayRef<int> Mask,SmallVectorImpl<int> & ScaledMask)432 void llvm::narrowShuffleMaskElts(int Scale, ArrayRef<int> Mask,
433 SmallVectorImpl<int> &ScaledMask) {
434 assert(Scale > 0 && "Unexpected scaling factor");
435
436 // Fast-path: if no scaling, then it is just a copy.
437 if (Scale == 1) {
438 ScaledMask.assign(Mask.begin(), Mask.end());
439 return;
440 }
441
442 ScaledMask.clear();
443 for (int MaskElt : Mask) {
444 if (MaskElt >= 0) {
445 assert(((uint64_t)Scale * MaskElt + (Scale - 1)) <= INT32_MAX &&
446 "Overflowed 32-bits");
447 }
448 for (int SliceElt = 0; SliceElt != Scale; ++SliceElt)
449 ScaledMask.push_back(MaskElt < 0 ? MaskElt : Scale * MaskElt + SliceElt);
450 }
451 }
452
widenShuffleMaskElts(int Scale,ArrayRef<int> Mask,SmallVectorImpl<int> & ScaledMask)453 bool llvm::widenShuffleMaskElts(int Scale, ArrayRef<int> Mask,
454 SmallVectorImpl<int> &ScaledMask) {
455 assert(Scale > 0 && "Unexpected scaling factor");
456
457 // Fast-path: if no scaling, then it is just a copy.
458 if (Scale == 1) {
459 ScaledMask.assign(Mask.begin(), Mask.end());
460 return true;
461 }
462
463 // We must map the original elements down evenly to a type with less elements.
464 int NumElts = Mask.size();
465 if (NumElts % Scale != 0)
466 return false;
467
468 ScaledMask.clear();
469 ScaledMask.reserve(NumElts / Scale);
470
471 // Step through the input mask by splitting into Scale-sized slices.
472 do {
473 ArrayRef<int> MaskSlice = Mask.take_front(Scale);
474 assert((int)MaskSlice.size() == Scale && "Expected Scale-sized slice.");
475
476 // The first element of the slice determines how we evaluate this slice.
477 int SliceFront = MaskSlice.front();
478 if (SliceFront < 0) {
479 // Negative values (undef or other "sentinel" values) must be equal across
480 // the entire slice.
481 if (!is_splat(MaskSlice))
482 return false;
483 ScaledMask.push_back(SliceFront);
484 } else {
485 // A positive mask element must be cleanly divisible.
486 if (SliceFront % Scale != 0)
487 return false;
488 // Elements of the slice must be consecutive.
489 for (int i = 1; i < Scale; ++i)
490 if (MaskSlice[i] != SliceFront + i)
491 return false;
492 ScaledMask.push_back(SliceFront / Scale);
493 }
494 Mask = Mask.drop_front(Scale);
495 } while (!Mask.empty());
496
497 assert((int)ScaledMask.size() * Scale == NumElts && "Unexpected scaled mask");
498
499 // All elements of the original mask can be scaled down to map to the elements
500 // of a mask with wider elements.
501 return true;
502 }
503
processShuffleMasks(ArrayRef<int> Mask,unsigned NumOfSrcRegs,unsigned NumOfDestRegs,unsigned NumOfUsedRegs,function_ref<void ()> NoInputAction,function_ref<void (ArrayRef<int>,unsigned,unsigned)> SingleInputAction,function_ref<void (ArrayRef<int>,unsigned,unsigned)> ManyInputsAction)504 void llvm::processShuffleMasks(
505 ArrayRef<int> Mask, unsigned NumOfSrcRegs, unsigned NumOfDestRegs,
506 unsigned NumOfUsedRegs, function_ref<void()> NoInputAction,
507 function_ref<void(ArrayRef<int>, unsigned, unsigned)> SingleInputAction,
508 function_ref<void(ArrayRef<int>, unsigned, unsigned)> ManyInputsAction) {
509 SmallVector<SmallVector<SmallVector<int>>> Res(NumOfDestRegs);
510 // Try to perform better estimation of the permutation.
511 // 1. Split the source/destination vectors into real registers.
512 // 2. Do the mask analysis to identify which real registers are
513 // permuted.
514 int Sz = Mask.size();
515 unsigned SzDest = Sz / NumOfDestRegs;
516 unsigned SzSrc = Sz / NumOfSrcRegs;
517 for (unsigned I = 0; I < NumOfDestRegs; ++I) {
518 auto &RegMasks = Res[I];
519 RegMasks.assign(NumOfSrcRegs, {});
520 // Check that the values in dest registers are in the one src
521 // register.
522 for (unsigned K = 0; K < SzDest; ++K) {
523 int Idx = I * SzDest + K;
524 if (Idx == Sz)
525 break;
526 if (Mask[Idx] >= Sz || Mask[Idx] == UndefMaskElem)
527 continue;
528 int SrcRegIdx = Mask[Idx] / SzSrc;
529 // Add a cost of PermuteTwoSrc for each new source register permute,
530 // if we have more than one source registers.
531 if (RegMasks[SrcRegIdx].empty())
532 RegMasks[SrcRegIdx].assign(SzDest, UndefMaskElem);
533 RegMasks[SrcRegIdx][K] = Mask[Idx] % SzSrc;
534 }
535 }
536 // Process split mask.
537 for (unsigned I = 0; I < NumOfUsedRegs; ++I) {
538 auto &Dest = Res[I];
539 int NumSrcRegs =
540 count_if(Dest, [](ArrayRef<int> Mask) { return !Mask.empty(); });
541 switch (NumSrcRegs) {
542 case 0:
543 // No input vectors were used!
544 NoInputAction();
545 break;
546 case 1: {
547 // Find the only mask with at least single undef mask elem.
548 auto *It =
549 find_if(Dest, [](ArrayRef<int> Mask) { return !Mask.empty(); });
550 unsigned SrcReg = std::distance(Dest.begin(), It);
551 SingleInputAction(*It, SrcReg, I);
552 break;
553 }
554 default: {
555 // The first mask is a permutation of a single register. Since we have >2
556 // input registers to shuffle, we merge the masks for 2 first registers
557 // and generate a shuffle of 2 registers rather than the reordering of the
558 // first register and then shuffle with the second register. Next,
559 // generate the shuffles of the resulting register + the remaining
560 // registers from the list.
561 auto &&CombineMasks = [](MutableArrayRef<int> FirstMask,
562 ArrayRef<int> SecondMask) {
563 for (int Idx = 0, VF = FirstMask.size(); Idx < VF; ++Idx) {
564 if (SecondMask[Idx] != UndefMaskElem) {
565 assert(FirstMask[Idx] == UndefMaskElem &&
566 "Expected undefined mask element.");
567 FirstMask[Idx] = SecondMask[Idx] + VF;
568 }
569 }
570 };
571 auto &&NormalizeMask = [](MutableArrayRef<int> Mask) {
572 for (int Idx = 0, VF = Mask.size(); Idx < VF; ++Idx) {
573 if (Mask[Idx] != UndefMaskElem)
574 Mask[Idx] = Idx;
575 }
576 };
577 int SecondIdx;
578 do {
579 int FirstIdx = -1;
580 SecondIdx = -1;
581 MutableArrayRef<int> FirstMask, SecondMask;
582 for (unsigned I = 0; I < NumOfDestRegs; ++I) {
583 SmallVectorImpl<int> &RegMask = Dest[I];
584 if (RegMask.empty())
585 continue;
586
587 if (FirstIdx == SecondIdx) {
588 FirstIdx = I;
589 FirstMask = RegMask;
590 continue;
591 }
592 SecondIdx = I;
593 SecondMask = RegMask;
594 CombineMasks(FirstMask, SecondMask);
595 ManyInputsAction(FirstMask, FirstIdx, SecondIdx);
596 NormalizeMask(FirstMask);
597 RegMask.clear();
598 SecondMask = FirstMask;
599 SecondIdx = FirstIdx;
600 }
601 if (FirstIdx != SecondIdx && SecondIdx >= 0) {
602 CombineMasks(SecondMask, FirstMask);
603 ManyInputsAction(SecondMask, SecondIdx, FirstIdx);
604 Dest[FirstIdx].clear();
605 NormalizeMask(SecondMask);
606 }
607 } while (SecondIdx >= 0);
608 break;
609 }
610 }
611 }
612 }
613
614 MapVector<Instruction *, uint64_t>
computeMinimumValueSizes(ArrayRef<BasicBlock * > Blocks,DemandedBits & DB,const TargetTransformInfo * TTI)615 llvm::computeMinimumValueSizes(ArrayRef<BasicBlock *> Blocks, DemandedBits &DB,
616 const TargetTransformInfo *TTI) {
617
618 // DemandedBits will give us every value's live-out bits. But we want
619 // to ensure no extra casts would need to be inserted, so every DAG
620 // of connected values must have the same minimum bitwidth.
621 EquivalenceClasses<Value *> ECs;
622 SmallVector<Value *, 16> Worklist;
623 SmallPtrSet<Value *, 4> Roots;
624 SmallPtrSet<Value *, 16> Visited;
625 DenseMap<Value *, uint64_t> DBits;
626 SmallPtrSet<Instruction *, 4> InstructionSet;
627 MapVector<Instruction *, uint64_t> MinBWs;
628
629 // Determine the roots. We work bottom-up, from truncs or icmps.
630 bool SeenExtFromIllegalType = false;
631 for (auto *BB : Blocks)
632 for (auto &I : *BB) {
633 InstructionSet.insert(&I);
634
635 if (TTI && (isa<ZExtInst>(&I) || isa<SExtInst>(&I)) &&
636 !TTI->isTypeLegal(I.getOperand(0)->getType()))
637 SeenExtFromIllegalType = true;
638
639 // Only deal with non-vector integers up to 64-bits wide.
640 if ((isa<TruncInst>(&I) || isa<ICmpInst>(&I)) &&
641 !I.getType()->isVectorTy() &&
642 I.getOperand(0)->getType()->getScalarSizeInBits() <= 64) {
643 // Don't make work for ourselves. If we know the loaded type is legal,
644 // don't add it to the worklist.
645 if (TTI && isa<TruncInst>(&I) && TTI->isTypeLegal(I.getType()))
646 continue;
647
648 Worklist.push_back(&I);
649 Roots.insert(&I);
650 }
651 }
652 // Early exit.
653 if (Worklist.empty() || (TTI && !SeenExtFromIllegalType))
654 return MinBWs;
655
656 // Now proceed breadth-first, unioning values together.
657 while (!Worklist.empty()) {
658 Value *Val = Worklist.pop_back_val();
659 Value *Leader = ECs.getOrInsertLeaderValue(Val);
660
661 if (!Visited.insert(Val).second)
662 continue;
663
664 // Non-instructions terminate a chain successfully.
665 if (!isa<Instruction>(Val))
666 continue;
667 Instruction *I = cast<Instruction>(Val);
668
669 // If we encounter a type that is larger than 64 bits, we can't represent
670 // it so bail out.
671 if (DB.getDemandedBits(I).getBitWidth() > 64)
672 return MapVector<Instruction *, uint64_t>();
673
674 uint64_t V = DB.getDemandedBits(I).getZExtValue();
675 DBits[Leader] |= V;
676 DBits[I] = V;
677
678 // Casts, loads and instructions outside of our range terminate a chain
679 // successfully.
680 if (isa<SExtInst>(I) || isa<ZExtInst>(I) || isa<LoadInst>(I) ||
681 !InstructionSet.count(I))
682 continue;
683
684 // Unsafe casts terminate a chain unsuccessfully. We can't do anything
685 // useful with bitcasts, ptrtoints or inttoptrs and it'd be unsafe to
686 // transform anything that relies on them.
687 if (isa<BitCastInst>(I) || isa<PtrToIntInst>(I) || isa<IntToPtrInst>(I) ||
688 !I->getType()->isIntegerTy()) {
689 DBits[Leader] |= ~0ULL;
690 continue;
691 }
692
693 // We don't modify the types of PHIs. Reductions will already have been
694 // truncated if possible, and inductions' sizes will have been chosen by
695 // indvars.
696 if (isa<PHINode>(I))
697 continue;
698
699 if (DBits[Leader] == ~0ULL)
700 // All bits demanded, no point continuing.
701 continue;
702
703 for (Value *O : cast<User>(I)->operands()) {
704 ECs.unionSets(Leader, O);
705 Worklist.push_back(O);
706 }
707 }
708
709 // Now we've discovered all values, walk them to see if there are
710 // any users we didn't see. If there are, we can't optimize that
711 // chain.
712 for (auto &I : DBits)
713 for (auto *U : I.first->users())
714 if (U->getType()->isIntegerTy() && DBits.count(U) == 0)
715 DBits[ECs.getOrInsertLeaderValue(I.first)] |= ~0ULL;
716
717 for (auto I = ECs.begin(), E = ECs.end(); I != E; ++I) {
718 uint64_t LeaderDemandedBits = 0;
719 for (Value *M : llvm::make_range(ECs.member_begin(I), ECs.member_end()))
720 LeaderDemandedBits |= DBits[M];
721
722 uint64_t MinBW = (sizeof(LeaderDemandedBits) * 8) -
723 llvm::countLeadingZeros(LeaderDemandedBits);
724 // Round up to a power of 2
725 if (!isPowerOf2_64((uint64_t)MinBW))
726 MinBW = NextPowerOf2(MinBW);
727
728 // We don't modify the types of PHIs. Reductions will already have been
729 // truncated if possible, and inductions' sizes will have been chosen by
730 // indvars.
731 // If we are required to shrink a PHI, abandon this entire equivalence class.
732 bool Abort = false;
733 for (Value *M : llvm::make_range(ECs.member_begin(I), ECs.member_end()))
734 if (isa<PHINode>(M) && MinBW < M->getType()->getScalarSizeInBits()) {
735 Abort = true;
736 break;
737 }
738 if (Abort)
739 continue;
740
741 for (Value *M : llvm::make_range(ECs.member_begin(I), ECs.member_end())) {
742 if (!isa<Instruction>(M))
743 continue;
744 Type *Ty = M->getType();
745 if (Roots.count(M))
746 Ty = cast<Instruction>(M)->getOperand(0)->getType();
747 if (MinBW < Ty->getScalarSizeInBits())
748 MinBWs[cast<Instruction>(M)] = MinBW;
749 }
750 }
751
752 return MinBWs;
753 }
754
755 /// Add all access groups in @p AccGroups to @p List.
756 template <typename ListT>
addToAccessGroupList(ListT & List,MDNode * AccGroups)757 static void addToAccessGroupList(ListT &List, MDNode *AccGroups) {
758 // Interpret an access group as a list containing itself.
759 if (AccGroups->getNumOperands() == 0) {
760 assert(isValidAsAccessGroup(AccGroups) && "Node must be an access group");
761 List.insert(AccGroups);
762 return;
763 }
764
765 for (const auto &AccGroupListOp : AccGroups->operands()) {
766 auto *Item = cast<MDNode>(AccGroupListOp.get());
767 assert(isValidAsAccessGroup(Item) && "List item must be an access group");
768 List.insert(Item);
769 }
770 }
771
uniteAccessGroups(MDNode * AccGroups1,MDNode * AccGroups2)772 MDNode *llvm::uniteAccessGroups(MDNode *AccGroups1, MDNode *AccGroups2) {
773 if (!AccGroups1)
774 return AccGroups2;
775 if (!AccGroups2)
776 return AccGroups1;
777 if (AccGroups1 == AccGroups2)
778 return AccGroups1;
779
780 SmallSetVector<Metadata *, 4> Union;
781 addToAccessGroupList(Union, AccGroups1);
782 addToAccessGroupList(Union, AccGroups2);
783
784 if (Union.size() == 0)
785 return nullptr;
786 if (Union.size() == 1)
787 return cast<MDNode>(Union.front());
788
789 LLVMContext &Ctx = AccGroups1->getContext();
790 return MDNode::get(Ctx, Union.getArrayRef());
791 }
792
intersectAccessGroups(const Instruction * Inst1,const Instruction * Inst2)793 MDNode *llvm::intersectAccessGroups(const Instruction *Inst1,
794 const Instruction *Inst2) {
795 bool MayAccessMem1 = Inst1->mayReadOrWriteMemory();
796 bool MayAccessMem2 = Inst2->mayReadOrWriteMemory();
797
798 if (!MayAccessMem1 && !MayAccessMem2)
799 return nullptr;
800 if (!MayAccessMem1)
801 return Inst2->getMetadata(LLVMContext::MD_access_group);
802 if (!MayAccessMem2)
803 return Inst1->getMetadata(LLVMContext::MD_access_group);
804
805 MDNode *MD1 = Inst1->getMetadata(LLVMContext::MD_access_group);
806 MDNode *MD2 = Inst2->getMetadata(LLVMContext::MD_access_group);
807 if (!MD1 || !MD2)
808 return nullptr;
809 if (MD1 == MD2)
810 return MD1;
811
812 // Use set for scalable 'contains' check.
813 SmallPtrSet<Metadata *, 4> AccGroupSet2;
814 addToAccessGroupList(AccGroupSet2, MD2);
815
816 SmallVector<Metadata *, 4> Intersection;
817 if (MD1->getNumOperands() == 0) {
818 assert(isValidAsAccessGroup(MD1) && "Node must be an access group");
819 if (AccGroupSet2.count(MD1))
820 Intersection.push_back(MD1);
821 } else {
822 for (const MDOperand &Node : MD1->operands()) {
823 auto *Item = cast<MDNode>(Node.get());
824 assert(isValidAsAccessGroup(Item) && "List item must be an access group");
825 if (AccGroupSet2.count(Item))
826 Intersection.push_back(Item);
827 }
828 }
829
830 if (Intersection.size() == 0)
831 return nullptr;
832 if (Intersection.size() == 1)
833 return cast<MDNode>(Intersection.front());
834
835 LLVMContext &Ctx = Inst1->getContext();
836 return MDNode::get(Ctx, Intersection);
837 }
838
839 /// \returns \p I after propagating metadata from \p VL.
propagateMetadata(Instruction * Inst,ArrayRef<Value * > VL)840 Instruction *llvm::propagateMetadata(Instruction *Inst, ArrayRef<Value *> VL) {
841 if (VL.empty())
842 return Inst;
843 Instruction *I0 = cast<Instruction>(VL[0]);
844 SmallVector<std::pair<unsigned, MDNode *>, 4> Metadata;
845 I0->getAllMetadataOtherThanDebugLoc(Metadata);
846
847 for (auto Kind : {LLVMContext::MD_tbaa, LLVMContext::MD_alias_scope,
848 LLVMContext::MD_noalias, LLVMContext::MD_fpmath,
849 LLVMContext::MD_nontemporal, LLVMContext::MD_invariant_load,
850 LLVMContext::MD_access_group}) {
851 MDNode *MD = I0->getMetadata(Kind);
852
853 for (int J = 1, E = VL.size(); MD && J != E; ++J) {
854 const Instruction *IJ = cast<Instruction>(VL[J]);
855 MDNode *IMD = IJ->getMetadata(Kind);
856 switch (Kind) {
857 case LLVMContext::MD_tbaa:
858 MD = MDNode::getMostGenericTBAA(MD, IMD);
859 break;
860 case LLVMContext::MD_alias_scope:
861 MD = MDNode::getMostGenericAliasScope(MD, IMD);
862 break;
863 case LLVMContext::MD_fpmath:
864 MD = MDNode::getMostGenericFPMath(MD, IMD);
865 break;
866 case LLVMContext::MD_noalias:
867 case LLVMContext::MD_nontemporal:
868 case LLVMContext::MD_invariant_load:
869 MD = MDNode::intersect(MD, IMD);
870 break;
871 case LLVMContext::MD_access_group:
872 MD = intersectAccessGroups(Inst, IJ);
873 break;
874 default:
875 llvm_unreachable("unhandled metadata");
876 }
877 }
878
879 Inst->setMetadata(Kind, MD);
880 }
881
882 return Inst;
883 }
884
885 Constant *
createBitMaskForGaps(IRBuilderBase & Builder,unsigned VF,const InterleaveGroup<Instruction> & Group)886 llvm::createBitMaskForGaps(IRBuilderBase &Builder, unsigned VF,
887 const InterleaveGroup<Instruction> &Group) {
888 // All 1's means mask is not needed.
889 if (Group.getNumMembers() == Group.getFactor())
890 return nullptr;
891
892 // TODO: support reversed access.
893 assert(!Group.isReverse() && "Reversed group not supported.");
894
895 SmallVector<Constant *, 16> Mask;
896 for (unsigned i = 0; i < VF; i++)
897 for (unsigned j = 0; j < Group.getFactor(); ++j) {
898 unsigned HasMember = Group.getMember(j) ? 1 : 0;
899 Mask.push_back(Builder.getInt1(HasMember));
900 }
901
902 return ConstantVector::get(Mask);
903 }
904
905 llvm::SmallVector<int, 16>
createReplicatedMask(unsigned ReplicationFactor,unsigned VF)906 llvm::createReplicatedMask(unsigned ReplicationFactor, unsigned VF) {
907 SmallVector<int, 16> MaskVec;
908 for (unsigned i = 0; i < VF; i++)
909 for (unsigned j = 0; j < ReplicationFactor; j++)
910 MaskVec.push_back(i);
911
912 return MaskVec;
913 }
914
createInterleaveMask(unsigned VF,unsigned NumVecs)915 llvm::SmallVector<int, 16> llvm::createInterleaveMask(unsigned VF,
916 unsigned NumVecs) {
917 SmallVector<int, 16> Mask;
918 for (unsigned i = 0; i < VF; i++)
919 for (unsigned j = 0; j < NumVecs; j++)
920 Mask.push_back(j * VF + i);
921
922 return Mask;
923 }
924
925 llvm::SmallVector<int, 16>
createStrideMask(unsigned Start,unsigned Stride,unsigned VF)926 llvm::createStrideMask(unsigned Start, unsigned Stride, unsigned VF) {
927 SmallVector<int, 16> Mask;
928 for (unsigned i = 0; i < VF; i++)
929 Mask.push_back(Start + i * Stride);
930
931 return Mask;
932 }
933
createSequentialMask(unsigned Start,unsigned NumInts,unsigned NumUndefs)934 llvm::SmallVector<int, 16> llvm::createSequentialMask(unsigned Start,
935 unsigned NumInts,
936 unsigned NumUndefs) {
937 SmallVector<int, 16> Mask;
938 for (unsigned i = 0; i < NumInts; i++)
939 Mask.push_back(Start + i);
940
941 for (unsigned i = 0; i < NumUndefs; i++)
942 Mask.push_back(-1);
943
944 return Mask;
945 }
946
createUnaryMask(ArrayRef<int> Mask,unsigned NumElts)947 llvm::SmallVector<int, 16> llvm::createUnaryMask(ArrayRef<int> Mask,
948 unsigned NumElts) {
949 // Avoid casts in the loop and make sure we have a reasonable number.
950 int NumEltsSigned = NumElts;
951 assert(NumEltsSigned > 0 && "Expected smaller or non-zero element count");
952
953 // If the mask chooses an element from operand 1, reduce it to choose from the
954 // corresponding element of operand 0. Undef mask elements are unchanged.
955 SmallVector<int, 16> UnaryMask;
956 for (int MaskElt : Mask) {
957 assert((MaskElt < NumEltsSigned * 2) && "Expected valid shuffle mask");
958 int UnaryElt = MaskElt >= NumEltsSigned ? MaskElt - NumEltsSigned : MaskElt;
959 UnaryMask.push_back(UnaryElt);
960 }
961 return UnaryMask;
962 }
963
964 /// A helper function for concatenating vectors. This function concatenates two
965 /// vectors having the same element type. If the second vector has fewer
966 /// elements than the first, it is padded with undefs.
concatenateTwoVectors(IRBuilderBase & Builder,Value * V1,Value * V2)967 static Value *concatenateTwoVectors(IRBuilderBase &Builder, Value *V1,
968 Value *V2) {
969 VectorType *VecTy1 = dyn_cast<VectorType>(V1->getType());
970 VectorType *VecTy2 = dyn_cast<VectorType>(V2->getType());
971 assert(VecTy1 && VecTy2 &&
972 VecTy1->getScalarType() == VecTy2->getScalarType() &&
973 "Expect two vectors with the same element type");
974
975 unsigned NumElts1 = cast<FixedVectorType>(VecTy1)->getNumElements();
976 unsigned NumElts2 = cast<FixedVectorType>(VecTy2)->getNumElements();
977 assert(NumElts1 >= NumElts2 && "Unexpect the first vector has less elements");
978
979 if (NumElts1 > NumElts2) {
980 // Extend with UNDEFs.
981 V2 = Builder.CreateShuffleVector(
982 V2, createSequentialMask(0, NumElts2, NumElts1 - NumElts2));
983 }
984
985 return Builder.CreateShuffleVector(
986 V1, V2, createSequentialMask(0, NumElts1 + NumElts2, 0));
987 }
988
concatenateVectors(IRBuilderBase & Builder,ArrayRef<Value * > Vecs)989 Value *llvm::concatenateVectors(IRBuilderBase &Builder,
990 ArrayRef<Value *> Vecs) {
991 unsigned NumVecs = Vecs.size();
992 assert(NumVecs > 1 && "Should be at least two vectors");
993
994 SmallVector<Value *, 8> ResList;
995 ResList.append(Vecs.begin(), Vecs.end());
996 do {
997 SmallVector<Value *, 8> TmpList;
998 for (unsigned i = 0; i < NumVecs - 1; i += 2) {
999 Value *V0 = ResList[i], *V1 = ResList[i + 1];
1000 assert((V0->getType() == V1->getType() || i == NumVecs - 2) &&
1001 "Only the last vector may have a different type");
1002
1003 TmpList.push_back(concatenateTwoVectors(Builder, V0, V1));
1004 }
1005
1006 // Push the last vector if the total number of vectors is odd.
1007 if (NumVecs % 2 != 0)
1008 TmpList.push_back(ResList[NumVecs - 1]);
1009
1010 ResList = TmpList;
1011 NumVecs = ResList.size();
1012 } while (NumVecs > 1);
1013
1014 return ResList[0];
1015 }
1016
maskIsAllZeroOrUndef(Value * Mask)1017 bool llvm::maskIsAllZeroOrUndef(Value *Mask) {
1018 assert(isa<VectorType>(Mask->getType()) &&
1019 isa<IntegerType>(Mask->getType()->getScalarType()) &&
1020 cast<IntegerType>(Mask->getType()->getScalarType())->getBitWidth() ==
1021 1 &&
1022 "Mask must be a vector of i1");
1023
1024 auto *ConstMask = dyn_cast<Constant>(Mask);
1025 if (!ConstMask)
1026 return false;
1027 if (ConstMask->isNullValue() || isa<UndefValue>(ConstMask))
1028 return true;
1029 if (isa<ScalableVectorType>(ConstMask->getType()))
1030 return false;
1031 for (unsigned
1032 I = 0,
1033 E = cast<FixedVectorType>(ConstMask->getType())->getNumElements();
1034 I != E; ++I) {
1035 if (auto *MaskElt = ConstMask->getAggregateElement(I))
1036 if (MaskElt->isNullValue() || isa<UndefValue>(MaskElt))
1037 continue;
1038 return false;
1039 }
1040 return true;
1041 }
1042
maskIsAllOneOrUndef(Value * Mask)1043 bool llvm::maskIsAllOneOrUndef(Value *Mask) {
1044 assert(isa<VectorType>(Mask->getType()) &&
1045 isa<IntegerType>(Mask->getType()->getScalarType()) &&
1046 cast<IntegerType>(Mask->getType()->getScalarType())->getBitWidth() ==
1047 1 &&
1048 "Mask must be a vector of i1");
1049
1050 auto *ConstMask = dyn_cast<Constant>(Mask);
1051 if (!ConstMask)
1052 return false;
1053 if (ConstMask->isAllOnesValue() || isa<UndefValue>(ConstMask))
1054 return true;
1055 if (isa<ScalableVectorType>(ConstMask->getType()))
1056 return false;
1057 for (unsigned
1058 I = 0,
1059 E = cast<FixedVectorType>(ConstMask->getType())->getNumElements();
1060 I != E; ++I) {
1061 if (auto *MaskElt = ConstMask->getAggregateElement(I))
1062 if (MaskElt->isAllOnesValue() || isa<UndefValue>(MaskElt))
1063 continue;
1064 return false;
1065 }
1066 return true;
1067 }
1068
1069 /// TODO: This is a lot like known bits, but for
1070 /// vectors. Is there something we can common this with?
possiblyDemandedEltsInMask(Value * Mask)1071 APInt llvm::possiblyDemandedEltsInMask(Value *Mask) {
1072 assert(isa<FixedVectorType>(Mask->getType()) &&
1073 isa<IntegerType>(Mask->getType()->getScalarType()) &&
1074 cast<IntegerType>(Mask->getType()->getScalarType())->getBitWidth() ==
1075 1 &&
1076 "Mask must be a fixed width vector of i1");
1077
1078 const unsigned VWidth =
1079 cast<FixedVectorType>(Mask->getType())->getNumElements();
1080 APInt DemandedElts = APInt::getAllOnes(VWidth);
1081 if (auto *CV = dyn_cast<ConstantVector>(Mask))
1082 for (unsigned i = 0; i < VWidth; i++)
1083 if (CV->getAggregateElement(i)->isNullValue())
1084 DemandedElts.clearBit(i);
1085 return DemandedElts;
1086 }
1087
isStrided(int Stride)1088 bool InterleavedAccessInfo::isStrided(int Stride) {
1089 unsigned Factor = std::abs(Stride);
1090 return Factor >= 2 && Factor <= MaxInterleaveGroupFactor;
1091 }
1092
collectConstStrideAccesses(MapVector<Instruction *,StrideDescriptor> & AccessStrideInfo,const ValueToValueMap & Strides)1093 void InterleavedAccessInfo::collectConstStrideAccesses(
1094 MapVector<Instruction *, StrideDescriptor> &AccessStrideInfo,
1095 const ValueToValueMap &Strides) {
1096 auto &DL = TheLoop->getHeader()->getModule()->getDataLayout();
1097
1098 // Since it's desired that the load/store instructions be maintained in
1099 // "program order" for the interleaved access analysis, we have to visit the
1100 // blocks in the loop in reverse postorder (i.e., in a topological order).
1101 // Such an ordering will ensure that any load/store that may be executed
1102 // before a second load/store will precede the second load/store in
1103 // AccessStrideInfo.
1104 LoopBlocksDFS DFS(TheLoop);
1105 DFS.perform(LI);
1106 for (BasicBlock *BB : make_range(DFS.beginRPO(), DFS.endRPO()))
1107 for (auto &I : *BB) {
1108 Value *Ptr = getLoadStorePointerOperand(&I);
1109 if (!Ptr)
1110 continue;
1111 Type *ElementTy = getLoadStoreType(&I);
1112
1113 // Currently, codegen doesn't support cases where the type size doesn't
1114 // match the alloc size. Skip them for now.
1115 uint64_t Size = DL.getTypeAllocSize(ElementTy);
1116 if (Size * 8 != DL.getTypeSizeInBits(ElementTy))
1117 continue;
1118
1119 // We don't check wrapping here because we don't know yet if Ptr will be
1120 // part of a full group or a group with gaps. Checking wrapping for all
1121 // pointers (even those that end up in groups with no gaps) will be overly
1122 // conservative. For full groups, wrapping should be ok since if we would
1123 // wrap around the address space we would do a memory access at nullptr
1124 // even without the transformation. The wrapping checks are therefore
1125 // deferred until after we've formed the interleaved groups.
1126 int64_t Stride = getPtrStride(PSE, ElementTy, Ptr, TheLoop, Strides,
1127 /*Assume=*/true, /*ShouldCheckWrap=*/false);
1128
1129 const SCEV *Scev = replaceSymbolicStrideSCEV(PSE, Strides, Ptr);
1130 AccessStrideInfo[&I] = StrideDescriptor(Stride, Scev, Size,
1131 getLoadStoreAlignment(&I));
1132 }
1133 }
1134
1135 // Analyze interleaved accesses and collect them into interleaved load and
1136 // store groups.
1137 //
1138 // When generating code for an interleaved load group, we effectively hoist all
1139 // loads in the group to the location of the first load in program order. When
1140 // generating code for an interleaved store group, we sink all stores to the
1141 // location of the last store. This code motion can change the order of load
1142 // and store instructions and may break dependences.
1143 //
1144 // The code generation strategy mentioned above ensures that we won't violate
1145 // any write-after-read (WAR) dependences.
1146 //
1147 // E.g., for the WAR dependence: a = A[i]; // (1)
1148 // A[i] = b; // (2)
1149 //
1150 // The store group of (2) is always inserted at or below (2), and the load
1151 // group of (1) is always inserted at or above (1). Thus, the instructions will
1152 // never be reordered. All other dependences are checked to ensure the
1153 // correctness of the instruction reordering.
1154 //
1155 // The algorithm visits all memory accesses in the loop in bottom-up program
1156 // order. Program order is established by traversing the blocks in the loop in
1157 // reverse postorder when collecting the accesses.
1158 //
1159 // We visit the memory accesses in bottom-up order because it can simplify the
1160 // construction of store groups in the presence of write-after-write (WAW)
1161 // dependences.
1162 //
1163 // E.g., for the WAW dependence: A[i] = a; // (1)
1164 // A[i] = b; // (2)
1165 // A[i + 1] = c; // (3)
1166 //
1167 // We will first create a store group with (3) and (2). (1) can't be added to
1168 // this group because it and (2) are dependent. However, (1) can be grouped
1169 // with other accesses that may precede it in program order. Note that a
1170 // bottom-up order does not imply that WAW dependences should not be checked.
analyzeInterleaving(bool EnablePredicatedInterleavedMemAccesses)1171 void InterleavedAccessInfo::analyzeInterleaving(
1172 bool EnablePredicatedInterleavedMemAccesses) {
1173 LLVM_DEBUG(dbgs() << "LV: Analyzing interleaved accesses...\n");
1174 const ValueToValueMap &Strides = LAI->getSymbolicStrides();
1175
1176 // Holds all accesses with a constant stride.
1177 MapVector<Instruction *, StrideDescriptor> AccessStrideInfo;
1178 collectConstStrideAccesses(AccessStrideInfo, Strides);
1179
1180 if (AccessStrideInfo.empty())
1181 return;
1182
1183 // Collect the dependences in the loop.
1184 collectDependences();
1185
1186 // Holds all interleaved store groups temporarily.
1187 SmallSetVector<InterleaveGroup<Instruction> *, 4> StoreGroups;
1188 // Holds all interleaved load groups temporarily.
1189 SmallSetVector<InterleaveGroup<Instruction> *, 4> LoadGroups;
1190
1191 // Search in bottom-up program order for pairs of accesses (A and B) that can
1192 // form interleaved load or store groups. In the algorithm below, access A
1193 // precedes access B in program order. We initialize a group for B in the
1194 // outer loop of the algorithm, and then in the inner loop, we attempt to
1195 // insert each A into B's group if:
1196 //
1197 // 1. A and B have the same stride,
1198 // 2. A and B have the same memory object size, and
1199 // 3. A belongs in B's group according to its distance from B.
1200 //
1201 // Special care is taken to ensure group formation will not break any
1202 // dependences.
1203 for (auto BI = AccessStrideInfo.rbegin(), E = AccessStrideInfo.rend();
1204 BI != E; ++BI) {
1205 Instruction *B = BI->first;
1206 StrideDescriptor DesB = BI->second;
1207
1208 // Initialize a group for B if it has an allowable stride. Even if we don't
1209 // create a group for B, we continue with the bottom-up algorithm to ensure
1210 // we don't break any of B's dependences.
1211 InterleaveGroup<Instruction> *Group = nullptr;
1212 if (isStrided(DesB.Stride) &&
1213 (!isPredicated(B->getParent()) || EnablePredicatedInterleavedMemAccesses)) {
1214 Group = getInterleaveGroup(B);
1215 if (!Group) {
1216 LLVM_DEBUG(dbgs() << "LV: Creating an interleave group with:" << *B
1217 << '\n');
1218 Group = createInterleaveGroup(B, DesB.Stride, DesB.Alignment);
1219 }
1220 if (B->mayWriteToMemory())
1221 StoreGroups.insert(Group);
1222 else
1223 LoadGroups.insert(Group);
1224 }
1225
1226 for (auto AI = std::next(BI); AI != E; ++AI) {
1227 Instruction *A = AI->first;
1228 StrideDescriptor DesA = AI->second;
1229
1230 // Our code motion strategy implies that we can't have dependences
1231 // between accesses in an interleaved group and other accesses located
1232 // between the first and last member of the group. Note that this also
1233 // means that a group can't have more than one member at a given offset.
1234 // The accesses in a group can have dependences with other accesses, but
1235 // we must ensure we don't extend the boundaries of the group such that
1236 // we encompass those dependent accesses.
1237 //
1238 // For example, assume we have the sequence of accesses shown below in a
1239 // stride-2 loop:
1240 //
1241 // (1, 2) is a group | A[i] = a; // (1)
1242 // | A[i-1] = b; // (2) |
1243 // A[i-3] = c; // (3)
1244 // A[i] = d; // (4) | (2, 4) is not a group
1245 //
1246 // Because accesses (2) and (3) are dependent, we can group (2) with (1)
1247 // but not with (4). If we did, the dependent access (3) would be within
1248 // the boundaries of the (2, 4) group.
1249 if (!canReorderMemAccessesForInterleavedGroups(&*AI, &*BI)) {
1250 // If a dependence exists and A is already in a group, we know that A
1251 // must be a store since A precedes B and WAR dependences are allowed.
1252 // Thus, A would be sunk below B. We release A's group to prevent this
1253 // illegal code motion. A will then be free to form another group with
1254 // instructions that precede it.
1255 if (isInterleaved(A)) {
1256 InterleaveGroup<Instruction> *StoreGroup = getInterleaveGroup(A);
1257
1258 LLVM_DEBUG(dbgs() << "LV: Invalidated store group due to "
1259 "dependence between " << *A << " and "<< *B << '\n');
1260
1261 StoreGroups.remove(StoreGroup);
1262 releaseGroup(StoreGroup);
1263 }
1264
1265 // If a dependence exists and A is not already in a group (or it was
1266 // and we just released it), B might be hoisted above A (if B is a
1267 // load) or another store might be sunk below A (if B is a store). In
1268 // either case, we can't add additional instructions to B's group. B
1269 // will only form a group with instructions that it precedes.
1270 break;
1271 }
1272
1273 // At this point, we've checked for illegal code motion. If either A or B
1274 // isn't strided, there's nothing left to do.
1275 if (!isStrided(DesA.Stride) || !isStrided(DesB.Stride))
1276 continue;
1277
1278 // Ignore A if it's already in a group or isn't the same kind of memory
1279 // operation as B.
1280 // Note that mayReadFromMemory() isn't mutually exclusive to
1281 // mayWriteToMemory in the case of atomic loads. We shouldn't see those
1282 // here, canVectorizeMemory() should have returned false - except for the
1283 // case we asked for optimization remarks.
1284 if (isInterleaved(A) ||
1285 (A->mayReadFromMemory() != B->mayReadFromMemory()) ||
1286 (A->mayWriteToMemory() != B->mayWriteToMemory()))
1287 continue;
1288
1289 // Check rules 1 and 2. Ignore A if its stride or size is different from
1290 // that of B.
1291 if (DesA.Stride != DesB.Stride || DesA.Size != DesB.Size)
1292 continue;
1293
1294 // Ignore A if the memory object of A and B don't belong to the same
1295 // address space
1296 if (getLoadStoreAddressSpace(A) != getLoadStoreAddressSpace(B))
1297 continue;
1298
1299 // Calculate the distance from A to B.
1300 const SCEVConstant *DistToB = dyn_cast<SCEVConstant>(
1301 PSE.getSE()->getMinusSCEV(DesA.Scev, DesB.Scev));
1302 if (!DistToB)
1303 continue;
1304 int64_t DistanceToB = DistToB->getAPInt().getSExtValue();
1305
1306 // Check rule 3. Ignore A if its distance to B is not a multiple of the
1307 // size.
1308 if (DistanceToB % static_cast<int64_t>(DesB.Size))
1309 continue;
1310
1311 // All members of a predicated interleave-group must have the same predicate,
1312 // and currently must reside in the same BB.
1313 BasicBlock *BlockA = A->getParent();
1314 BasicBlock *BlockB = B->getParent();
1315 if ((isPredicated(BlockA) || isPredicated(BlockB)) &&
1316 (!EnablePredicatedInterleavedMemAccesses || BlockA != BlockB))
1317 continue;
1318
1319 // The index of A is the index of B plus A's distance to B in multiples
1320 // of the size.
1321 int IndexA =
1322 Group->getIndex(B) + DistanceToB / static_cast<int64_t>(DesB.Size);
1323
1324 // Try to insert A into B's group.
1325 if (Group->insertMember(A, IndexA, DesA.Alignment)) {
1326 LLVM_DEBUG(dbgs() << "LV: Inserted:" << *A << '\n'
1327 << " into the interleave group with" << *B
1328 << '\n');
1329 InterleaveGroupMap[A] = Group;
1330
1331 // Set the first load in program order as the insert position.
1332 if (A->mayReadFromMemory())
1333 Group->setInsertPos(A);
1334 }
1335 } // Iteration over A accesses.
1336 } // Iteration over B accesses.
1337
1338 auto InvalidateGroupIfMemberMayWrap = [&](InterleaveGroup<Instruction> *Group,
1339 int Index,
1340 std::string FirstOrLast) -> bool {
1341 Instruction *Member = Group->getMember(Index);
1342 assert(Member && "Group member does not exist");
1343 Value *MemberPtr = getLoadStorePointerOperand(Member);
1344 Type *AccessTy = getLoadStoreType(Member);
1345 if (getPtrStride(PSE, AccessTy, MemberPtr, TheLoop, Strides,
1346 /*Assume=*/false, /*ShouldCheckWrap=*/true))
1347 return false;
1348 LLVM_DEBUG(dbgs() << "LV: Invalidate candidate interleaved group due to "
1349 << FirstOrLast
1350 << " group member potentially pointer-wrapping.\n");
1351 releaseGroup(Group);
1352 return true;
1353 };
1354
1355 // Remove interleaved groups with gaps whose memory
1356 // accesses may wrap around. We have to revisit the getPtrStride analysis,
1357 // this time with ShouldCheckWrap=true, since collectConstStrideAccesses does
1358 // not check wrapping (see documentation there).
1359 // FORNOW we use Assume=false;
1360 // TODO: Change to Assume=true but making sure we don't exceed the threshold
1361 // of runtime SCEV assumptions checks (thereby potentially failing to
1362 // vectorize altogether).
1363 // Additional optional optimizations:
1364 // TODO: If we are peeling the loop and we know that the first pointer doesn't
1365 // wrap then we can deduce that all pointers in the group don't wrap.
1366 // This means that we can forcefully peel the loop in order to only have to
1367 // check the first pointer for no-wrap. When we'll change to use Assume=true
1368 // we'll only need at most one runtime check per interleaved group.
1369 for (auto *Group : LoadGroups) {
1370 // Case 1: A full group. Can Skip the checks; For full groups, if the wide
1371 // load would wrap around the address space we would do a memory access at
1372 // nullptr even without the transformation.
1373 if (Group->getNumMembers() == Group->getFactor())
1374 continue;
1375
1376 // Case 2: If first and last members of the group don't wrap this implies
1377 // that all the pointers in the group don't wrap.
1378 // So we check only group member 0 (which is always guaranteed to exist),
1379 // and group member Factor - 1; If the latter doesn't exist we rely on
1380 // peeling (if it is a non-reversed accsess -- see Case 3).
1381 if (InvalidateGroupIfMemberMayWrap(Group, 0, std::string("first")))
1382 continue;
1383 if (Group->getMember(Group->getFactor() - 1))
1384 InvalidateGroupIfMemberMayWrap(Group, Group->getFactor() - 1,
1385 std::string("last"));
1386 else {
1387 // Case 3: A non-reversed interleaved load group with gaps: We need
1388 // to execute at least one scalar epilogue iteration. This will ensure
1389 // we don't speculatively access memory out-of-bounds. We only need
1390 // to look for a member at index factor - 1, since every group must have
1391 // a member at index zero.
1392 if (Group->isReverse()) {
1393 LLVM_DEBUG(
1394 dbgs() << "LV: Invalidate candidate interleaved group due to "
1395 "a reverse access with gaps.\n");
1396 releaseGroup(Group);
1397 continue;
1398 }
1399 LLVM_DEBUG(
1400 dbgs() << "LV: Interleaved group requires epilogue iteration.\n");
1401 RequiresScalarEpilogue = true;
1402 }
1403 }
1404
1405 for (auto *Group : StoreGroups) {
1406 // Case 1: A full group. Can Skip the checks; For full groups, if the wide
1407 // store would wrap around the address space we would do a memory access at
1408 // nullptr even without the transformation.
1409 if (Group->getNumMembers() == Group->getFactor())
1410 continue;
1411
1412 // Interleave-store-group with gaps is implemented using masked wide store.
1413 // Remove interleaved store groups with gaps if
1414 // masked-interleaved-accesses are not enabled by the target.
1415 if (!EnablePredicatedInterleavedMemAccesses) {
1416 LLVM_DEBUG(
1417 dbgs() << "LV: Invalidate candidate interleaved store group due "
1418 "to gaps.\n");
1419 releaseGroup(Group);
1420 continue;
1421 }
1422
1423 // Case 2: If first and last members of the group don't wrap this implies
1424 // that all the pointers in the group don't wrap.
1425 // So we check only group member 0 (which is always guaranteed to exist),
1426 // and the last group member. Case 3 (scalar epilog) is not relevant for
1427 // stores with gaps, which are implemented with masked-store (rather than
1428 // speculative access, as in loads).
1429 if (InvalidateGroupIfMemberMayWrap(Group, 0, std::string("first")))
1430 continue;
1431 for (int Index = Group->getFactor() - 1; Index > 0; Index--)
1432 if (Group->getMember(Index)) {
1433 InvalidateGroupIfMemberMayWrap(Group, Index, std::string("last"));
1434 break;
1435 }
1436 }
1437 }
1438
invalidateGroupsRequiringScalarEpilogue()1439 void InterleavedAccessInfo::invalidateGroupsRequiringScalarEpilogue() {
1440 // If no group had triggered the requirement to create an epilogue loop,
1441 // there is nothing to do.
1442 if (!requiresScalarEpilogue())
1443 return;
1444
1445 bool ReleasedGroup = false;
1446 // Release groups requiring scalar epilogues. Note that this also removes them
1447 // from InterleaveGroups.
1448 for (auto *Group : make_early_inc_range(InterleaveGroups)) {
1449 if (!Group->requiresScalarEpilogue())
1450 continue;
1451 LLVM_DEBUG(
1452 dbgs()
1453 << "LV: Invalidate candidate interleaved group due to gaps that "
1454 "require a scalar epilogue (not allowed under optsize) and cannot "
1455 "be masked (not enabled). \n");
1456 releaseGroup(Group);
1457 ReleasedGroup = true;
1458 }
1459 assert(ReleasedGroup && "At least one group must be invalidated, as a "
1460 "scalar epilogue was required");
1461 (void)ReleasedGroup;
1462 RequiresScalarEpilogue = false;
1463 }
1464
1465 template <typename InstT>
addMetadata(InstT * NewInst) const1466 void InterleaveGroup<InstT>::addMetadata(InstT *NewInst) const {
1467 llvm_unreachable("addMetadata can only be used for Instruction");
1468 }
1469
1470 namespace llvm {
1471 template <>
addMetadata(Instruction * NewInst) const1472 void InterleaveGroup<Instruction>::addMetadata(Instruction *NewInst) const {
1473 SmallVector<Value *, 4> VL;
1474 std::transform(Members.begin(), Members.end(), std::back_inserter(VL),
1475 [](std::pair<int, Instruction *> p) { return p.second; });
1476 propagateMetadata(NewInst, VL);
1477 }
1478 }
1479
mangleTLIVectorName(StringRef VectorName,StringRef ScalarName,unsigned numArgs,ElementCount VF)1480 std::string VFABI::mangleTLIVectorName(StringRef VectorName,
1481 StringRef ScalarName, unsigned numArgs,
1482 ElementCount VF) {
1483 SmallString<256> Buffer;
1484 llvm::raw_svector_ostream Out(Buffer);
1485 Out << "_ZGV" << VFABI::_LLVM_ << "N";
1486 if (VF.isScalable())
1487 Out << 'x';
1488 else
1489 Out << VF.getFixedValue();
1490 for (unsigned I = 0; I < numArgs; ++I)
1491 Out << "v";
1492 Out << "_" << ScalarName << "(" << VectorName << ")";
1493 return std::string(Out.str());
1494 }
1495
getVectorVariantNames(const CallInst & CI,SmallVectorImpl<std::string> & VariantMappings)1496 void VFABI::getVectorVariantNames(
1497 const CallInst &CI, SmallVectorImpl<std::string> &VariantMappings) {
1498 const StringRef S = CI.getFnAttr(VFABI::MappingsAttrName).getValueAsString();
1499 if (S.empty())
1500 return;
1501
1502 SmallVector<StringRef, 8> ListAttr;
1503 S.split(ListAttr, ",");
1504
1505 for (const auto &S : SetVector<StringRef>(ListAttr.begin(), ListAttr.end())) {
1506 #ifndef NDEBUG
1507 LLVM_DEBUG(dbgs() << "VFABI: adding mapping '" << S << "'\n");
1508 Optional<VFInfo> Info = VFABI::tryDemangleForVFABI(S, *(CI.getModule()));
1509 assert(Info && "Invalid name for a VFABI variant.");
1510 assert(CI.getModule()->getFunction(Info.value().VectorName) &&
1511 "Vector function is missing.");
1512 #endif
1513 VariantMappings.push_back(std::string(S));
1514 }
1515 }
1516
hasValidParameterList() const1517 bool VFShape::hasValidParameterList() const {
1518 for (unsigned Pos = 0, NumParams = Parameters.size(); Pos < NumParams;
1519 ++Pos) {
1520 assert(Parameters[Pos].ParamPos == Pos && "Broken parameter list.");
1521
1522 switch (Parameters[Pos].ParamKind) {
1523 default: // Nothing to check.
1524 break;
1525 case VFParamKind::OMP_Linear:
1526 case VFParamKind::OMP_LinearRef:
1527 case VFParamKind::OMP_LinearVal:
1528 case VFParamKind::OMP_LinearUVal:
1529 // Compile time linear steps must be non-zero.
1530 if (Parameters[Pos].LinearStepOrPos == 0)
1531 return false;
1532 break;
1533 case VFParamKind::OMP_LinearPos:
1534 case VFParamKind::OMP_LinearRefPos:
1535 case VFParamKind::OMP_LinearValPos:
1536 case VFParamKind::OMP_LinearUValPos:
1537 // The runtime linear step must be referring to some other
1538 // parameters in the signature.
1539 if (Parameters[Pos].LinearStepOrPos >= int(NumParams))
1540 return false;
1541 // The linear step parameter must be marked as uniform.
1542 if (Parameters[Parameters[Pos].LinearStepOrPos].ParamKind !=
1543 VFParamKind::OMP_Uniform)
1544 return false;
1545 // The linear step parameter can't point at itself.
1546 if (Parameters[Pos].LinearStepOrPos == int(Pos))
1547 return false;
1548 break;
1549 case VFParamKind::GlobalPredicate:
1550 // The global predicate must be the unique. Can be placed anywhere in the
1551 // signature.
1552 for (unsigned NextPos = Pos + 1; NextPos < NumParams; ++NextPos)
1553 if (Parameters[NextPos].ParamKind == VFParamKind::GlobalPredicate)
1554 return false;
1555 break;
1556 }
1557 }
1558 return true;
1559 }
1560