1 //===- LoadStoreVectorizer.cpp - GPU Load & Store Vectorizer --------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This pass merges loads/stores to/from sequential memory addresses into vector
10 // loads/stores.  Although there's nothing GPU-specific in here, this pass is
11 // motivated by the microarchitectural quirks of nVidia and AMD GPUs.
12 //
13 // (For simplicity below we talk about loads only, but everything also applies
14 // to stores.)
15 //
16 // This pass is intended to be run late in the pipeline, after other
17 // vectorization opportunities have been exploited.  So the assumption here is
18 // that immediately following our new vector load we'll need to extract out the
19 // individual elements of the load, so we can operate on them individually.
20 //
21 // On CPUs this transformation is usually not beneficial, because extracting the
22 // elements of a vector register is expensive on most architectures.  It's
23 // usually better just to load each element individually into its own scalar
24 // register.
25 //
26 // However, nVidia and AMD GPUs don't have proper vector registers.  Instead, a
27 // "vector load" loads directly into a series of scalar registers.  In effect,
28 // extracting the elements of the vector is free.  It's therefore always
29 // beneficial to vectorize a sequence of loads on these architectures.
30 //
31 // Vectorizing (perhaps a better name might be "coalescing") loads can have
32 // large performance impacts on GPU kernels, and opportunities for vectorizing
33 // are common in GPU code.  This pass tries very hard to find such
34 // opportunities; its runtime is quadratic in the number of loads in a BB.
35 //
36 // Some CPU architectures, such as ARM, have instructions that load into
37 // multiple scalar registers, similar to a GPU vectorized load.  In theory ARM
38 // could use this pass (with some modifications), but currently it implements
39 // its own pass to do something similar to what we do here.
40 
41 #include "llvm/Transforms/Vectorize/LoadStoreVectorizer.h"
42 #include "llvm/ADT/APInt.h"
43 #include "llvm/ADT/ArrayRef.h"
44 #include "llvm/ADT/MapVector.h"
45 #include "llvm/ADT/PostOrderIterator.h"
46 #include "llvm/ADT/STLExtras.h"
47 #include "llvm/ADT/SmallPtrSet.h"
48 #include "llvm/ADT/SmallVector.h"
49 #include "llvm/ADT/Statistic.h"
50 #include "llvm/ADT/iterator_range.h"
51 #include "llvm/Analysis/AliasAnalysis.h"
52 #include "llvm/Analysis/MemoryLocation.h"
53 #include "llvm/Analysis/ScalarEvolution.h"
54 #include "llvm/Analysis/TargetTransformInfo.h"
55 #include "llvm/Analysis/ValueTracking.h"
56 #include "llvm/Analysis/VectorUtils.h"
57 #include "llvm/IR/Attributes.h"
58 #include "llvm/IR/BasicBlock.h"
59 #include "llvm/IR/Constants.h"
60 #include "llvm/IR/DataLayout.h"
61 #include "llvm/IR/DerivedTypes.h"
62 #include "llvm/IR/Dominators.h"
63 #include "llvm/IR/Function.h"
64 #include "llvm/IR/IRBuilder.h"
65 #include "llvm/IR/InstrTypes.h"
66 #include "llvm/IR/Instruction.h"
67 #include "llvm/IR/Instructions.h"
68 #include "llvm/IR/IntrinsicInst.h"
69 #include "llvm/IR/Module.h"
70 #include "llvm/IR/Type.h"
71 #include "llvm/IR/User.h"
72 #include "llvm/IR/Value.h"
73 #include "llvm/InitializePasses.h"
74 #include "llvm/Pass.h"
75 #include "llvm/Support/Casting.h"
76 #include "llvm/Support/Debug.h"
77 #include "llvm/Support/KnownBits.h"
78 #include "llvm/Support/MathExtras.h"
79 #include "llvm/Support/raw_ostream.h"
80 #include "llvm/Transforms/Utils/Local.h"
81 #include "llvm/Transforms/Vectorize.h"
82 #include <algorithm>
83 #include <cassert>
84 #include <cstdlib>
85 #include <tuple>
86 #include <utility>
87 
88 using namespace llvm;
89 
90 #define DEBUG_TYPE "load-store-vectorizer"
91 
92 STATISTIC(NumVectorInstructions, "Number of vector accesses generated");
93 STATISTIC(NumScalarsVectorized, "Number of scalar accesses vectorized");
94 
95 // FIXME: Assuming stack alignment of 4 is always good enough
96 static const unsigned StackAdjustedAlignment = 4;
97 
98 namespace {
99 
100 /// ChainID is an arbitrary token that is allowed to be different only for the
101 /// accesses that are guaranteed to be considered non-consecutive by
102 /// Vectorizer::isConsecutiveAccess. It's used for grouping instructions
103 /// together and reducing the number of instructions the main search operates on
104 /// at a time, i.e. this is to reduce compile time and nothing else as the main
105 /// search has O(n^2) time complexity. The underlying type of ChainID should not
106 /// be relied upon.
107 using ChainID = const Value *;
108 using InstrList = SmallVector<Instruction *, 8>;
109 using InstrListMap = MapVector<ChainID, InstrList>;
110 
111 class Vectorizer {
112   Function &F;
113   AliasAnalysis &AA;
114   DominatorTree &DT;
115   ScalarEvolution &SE;
116   TargetTransformInfo &TTI;
117   const DataLayout &DL;
118   IRBuilder<> Builder;
119 
120 public:
121   Vectorizer(Function &F, AliasAnalysis &AA, DominatorTree &DT,
122              ScalarEvolution &SE, TargetTransformInfo &TTI)
123       : F(F), AA(AA), DT(DT), SE(SE), TTI(TTI),
124         DL(F.getParent()->getDataLayout()), Builder(SE.getContext()) {}
125 
126   bool run();
127 
128 private:
129   unsigned getPointerAddressSpace(Value *I);
130 
131   Align getAlign(LoadInst *LI) const {
132     return DL.getValueOrABITypeAlignment(LI->getAlign(), LI->getType());
133   }
134 
135   Align getAlign(StoreInst *SI) const {
136     return DL.getValueOrABITypeAlignment(SI->getAlign(),
137                                          SI->getValueOperand()->getType());
138   }
139 
140   static const unsigned MaxDepth = 3;
141 
142   bool isConsecutiveAccess(Value *A, Value *B);
143   bool areConsecutivePointers(Value *PtrA, Value *PtrB, APInt PtrDelta,
144                               unsigned Depth = 0) const;
145   bool lookThroughComplexAddresses(Value *PtrA, Value *PtrB, APInt PtrDelta,
146                                    unsigned Depth) const;
147   bool lookThroughSelects(Value *PtrA, Value *PtrB, const APInt &PtrDelta,
148                           unsigned Depth) const;
149 
150   /// After vectorization, reorder the instructions that I depends on
151   /// (the instructions defining its operands), to ensure they dominate I.
152   void reorder(Instruction *I);
153 
154   /// Returns the first and the last instructions in Chain.
155   std::pair<BasicBlock::iterator, BasicBlock::iterator>
156   getBoundaryInstrs(ArrayRef<Instruction *> Chain);
157 
158   /// Erases the original instructions after vectorizing.
159   void eraseInstructions(ArrayRef<Instruction *> Chain);
160 
161   /// "Legalize" the vector type that would be produced by combining \p
162   /// ElementSizeBits elements in \p Chain. Break into two pieces such that the
163   /// total size of each piece is 1, 2 or a multiple of 4 bytes. \p Chain is
164   /// expected to have more than 4 elements.
165   std::pair<ArrayRef<Instruction *>, ArrayRef<Instruction *>>
166   splitOddVectorElts(ArrayRef<Instruction *> Chain, unsigned ElementSizeBits);
167 
168   /// Finds the largest prefix of Chain that's vectorizable, checking for
169   /// intervening instructions which may affect the memory accessed by the
170   /// instructions within Chain.
171   ///
172   /// The elements of \p Chain must be all loads or all stores and must be in
173   /// address order.
174   ArrayRef<Instruction *> getVectorizablePrefix(ArrayRef<Instruction *> Chain);
175 
176   /// Collects load and store instructions to vectorize.
177   std::pair<InstrListMap, InstrListMap> collectInstructions(BasicBlock *BB);
178 
179   /// Processes the collected instructions, the \p Map. The values of \p Map
180   /// should be all loads or all stores.
181   bool vectorizeChains(InstrListMap &Map);
182 
183   /// Finds the load/stores to consecutive memory addresses and vectorizes them.
184   bool vectorizeInstructions(ArrayRef<Instruction *> Instrs);
185 
186   /// Vectorizes the load instructions in Chain.
187   bool
188   vectorizeLoadChain(ArrayRef<Instruction *> Chain,
189                      SmallPtrSet<Instruction *, 16> *InstructionsProcessed);
190 
191   /// Vectorizes the store instructions in Chain.
192   bool
193   vectorizeStoreChain(ArrayRef<Instruction *> Chain,
194                       SmallPtrSet<Instruction *, 16> *InstructionsProcessed);
195 
196   /// Check if this load/store access is misaligned accesses.
197   bool accessIsMisaligned(unsigned SzInBytes, unsigned AddressSpace,
198                           unsigned Alignment);
199 };
200 
201 class LoadStoreVectorizerLegacyPass : public FunctionPass {
202 public:
203   static char ID;
204 
205   LoadStoreVectorizerLegacyPass() : FunctionPass(ID) {
206     initializeLoadStoreVectorizerLegacyPassPass(*PassRegistry::getPassRegistry());
207   }
208 
209   bool runOnFunction(Function &F) override;
210 
211   StringRef getPassName() const override {
212     return "GPU Load and Store Vectorizer";
213   }
214 
215   void getAnalysisUsage(AnalysisUsage &AU) const override {
216     AU.addRequired<AAResultsWrapperPass>();
217     AU.addRequired<ScalarEvolutionWrapperPass>();
218     AU.addRequired<DominatorTreeWrapperPass>();
219     AU.addRequired<TargetTransformInfoWrapperPass>();
220     AU.setPreservesCFG();
221   }
222 };
223 
224 } // end anonymous namespace
225 
226 char LoadStoreVectorizerLegacyPass::ID = 0;
227 
228 INITIALIZE_PASS_BEGIN(LoadStoreVectorizerLegacyPass, DEBUG_TYPE,
229                       "Vectorize load and Store instructions", false, false)
230 INITIALIZE_PASS_DEPENDENCY(SCEVAAWrapperPass)
231 INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)
232 INITIALIZE_PASS_DEPENDENCY(AAResultsWrapperPass)
233 INITIALIZE_PASS_DEPENDENCY(GlobalsAAWrapperPass)
234 INITIALIZE_PASS_DEPENDENCY(TargetTransformInfoWrapperPass)
235 INITIALIZE_PASS_END(LoadStoreVectorizerLegacyPass, DEBUG_TYPE,
236                     "Vectorize load and store instructions", false, false)
237 
238 Pass *llvm::createLoadStoreVectorizerPass() {
239   return new LoadStoreVectorizerLegacyPass();
240 }
241 
242 bool LoadStoreVectorizerLegacyPass::runOnFunction(Function &F) {
243   // Don't vectorize when the attribute NoImplicitFloat is used.
244   if (skipFunction(F) || F.hasFnAttribute(Attribute::NoImplicitFloat))
245     return false;
246 
247   AliasAnalysis &AA = getAnalysis<AAResultsWrapperPass>().getAAResults();
248   DominatorTree &DT = getAnalysis<DominatorTreeWrapperPass>().getDomTree();
249   ScalarEvolution &SE = getAnalysis<ScalarEvolutionWrapperPass>().getSE();
250   TargetTransformInfo &TTI =
251       getAnalysis<TargetTransformInfoWrapperPass>().getTTI(F);
252 
253   Vectorizer V(F, AA, DT, SE, TTI);
254   return V.run();
255 }
256 
257 PreservedAnalyses LoadStoreVectorizerPass::run(Function &F, FunctionAnalysisManager &AM) {
258   // Don't vectorize when the attribute NoImplicitFloat is used.
259   if (F.hasFnAttribute(Attribute::NoImplicitFloat))
260     return PreservedAnalyses::all();
261 
262   AliasAnalysis &AA = AM.getResult<AAManager>(F);
263   DominatorTree &DT = AM.getResult<DominatorTreeAnalysis>(F);
264   ScalarEvolution &SE = AM.getResult<ScalarEvolutionAnalysis>(F);
265   TargetTransformInfo &TTI = AM.getResult<TargetIRAnalysis>(F);
266 
267   Vectorizer V(F, AA, DT, SE, TTI);
268   bool Changed = V.run();
269   PreservedAnalyses PA;
270   PA.preserveSet<CFGAnalyses>();
271   return Changed ? PA : PreservedAnalyses::all();
272 }
273 
274 // The real propagateMetadata expects a SmallVector<Value*>, but we deal in
275 // vectors of Instructions.
276 static void propagateMetadata(Instruction *I, ArrayRef<Instruction *> IL) {
277   SmallVector<Value *, 8> VL(IL.begin(), IL.end());
278   propagateMetadata(I, VL);
279 }
280 
281 // Vectorizer Implementation
282 bool Vectorizer::run() {
283   bool Changed = false;
284 
285   // Scan the blocks in the function in post order.
286   for (BasicBlock *BB : post_order(&F)) {
287     InstrListMap LoadRefs, StoreRefs;
288     std::tie(LoadRefs, StoreRefs) = collectInstructions(BB);
289     Changed |= vectorizeChains(LoadRefs);
290     Changed |= vectorizeChains(StoreRefs);
291   }
292 
293   return Changed;
294 }
295 
296 unsigned Vectorizer::getPointerAddressSpace(Value *I) {
297   if (LoadInst *L = dyn_cast<LoadInst>(I))
298     return L->getPointerAddressSpace();
299   if (StoreInst *S = dyn_cast<StoreInst>(I))
300     return S->getPointerAddressSpace();
301   return -1;
302 }
303 
304 // FIXME: Merge with llvm::isConsecutiveAccess
305 bool Vectorizer::isConsecutiveAccess(Value *A, Value *B) {
306   Value *PtrA = getLoadStorePointerOperand(A);
307   Value *PtrB = getLoadStorePointerOperand(B);
308   unsigned ASA = getPointerAddressSpace(A);
309   unsigned ASB = getPointerAddressSpace(B);
310 
311   // Check that the address spaces match and that the pointers are valid.
312   if (!PtrA || !PtrB || (ASA != ASB))
313     return false;
314 
315   // Make sure that A and B are different pointers of the same size type.
316   Type *PtrATy = PtrA->getType()->getPointerElementType();
317   Type *PtrBTy = PtrB->getType()->getPointerElementType();
318   if (PtrA == PtrB ||
319       PtrATy->isVectorTy() != PtrBTy->isVectorTy() ||
320       DL.getTypeStoreSize(PtrATy) != DL.getTypeStoreSize(PtrBTy) ||
321       DL.getTypeStoreSize(PtrATy->getScalarType()) !=
322           DL.getTypeStoreSize(PtrBTy->getScalarType()))
323     return false;
324 
325   unsigned PtrBitWidth = DL.getPointerSizeInBits(ASA);
326   APInt Size(PtrBitWidth, DL.getTypeStoreSize(PtrATy));
327 
328   return areConsecutivePointers(PtrA, PtrB, Size);
329 }
330 
331 bool Vectorizer::areConsecutivePointers(Value *PtrA, Value *PtrB,
332                                         APInt PtrDelta, unsigned Depth) const {
333   unsigned PtrBitWidth = DL.getPointerTypeSizeInBits(PtrA->getType());
334   APInt OffsetA(PtrBitWidth, 0);
335   APInt OffsetB(PtrBitWidth, 0);
336   PtrA = PtrA->stripAndAccumulateInBoundsConstantOffsets(DL, OffsetA);
337   PtrB = PtrB->stripAndAccumulateInBoundsConstantOffsets(DL, OffsetB);
338 
339   unsigned NewPtrBitWidth = DL.getTypeStoreSizeInBits(PtrA->getType());
340 
341   if (NewPtrBitWidth != DL.getTypeStoreSizeInBits(PtrB->getType()))
342     return false;
343 
344   // In case if we have to shrink the pointer
345   // stripAndAccumulateInBoundsConstantOffsets should properly handle a
346   // possible overflow and the value should fit into a smallest data type
347   // used in the cast/gep chain.
348   assert(OffsetA.getMinSignedBits() <= NewPtrBitWidth &&
349          OffsetB.getMinSignedBits() <= NewPtrBitWidth);
350 
351   OffsetA = OffsetA.sextOrTrunc(NewPtrBitWidth);
352   OffsetB = OffsetB.sextOrTrunc(NewPtrBitWidth);
353   PtrDelta = PtrDelta.sextOrTrunc(NewPtrBitWidth);
354 
355   APInt OffsetDelta = OffsetB - OffsetA;
356 
357   // Check if they are based on the same pointer. That makes the offsets
358   // sufficient.
359   if (PtrA == PtrB)
360     return OffsetDelta == PtrDelta;
361 
362   // Compute the necessary base pointer delta to have the necessary final delta
363   // equal to the pointer delta requested.
364   APInt BaseDelta = PtrDelta - OffsetDelta;
365 
366   // Compute the distance with SCEV between the base pointers.
367   const SCEV *PtrSCEVA = SE.getSCEV(PtrA);
368   const SCEV *PtrSCEVB = SE.getSCEV(PtrB);
369   const SCEV *C = SE.getConstant(BaseDelta);
370   const SCEV *X = SE.getAddExpr(PtrSCEVA, C);
371   if (X == PtrSCEVB)
372     return true;
373 
374   // The above check will not catch the cases where one of the pointers is
375   // factorized but the other one is not, such as (C + (S * (A + B))) vs
376   // (AS + BS). Get the minus scev. That will allow re-combining the expresions
377   // and getting the simplified difference.
378   const SCEV *Dist = SE.getMinusSCEV(PtrSCEVB, PtrSCEVA);
379   if (C == Dist)
380     return true;
381 
382   // Sometimes even this doesn't work, because SCEV can't always see through
383   // patterns that look like (gep (ext (add (shl X, C1), C2))). Try checking
384   // things the hard way.
385   return lookThroughComplexAddresses(PtrA, PtrB, BaseDelta, Depth);
386 }
387 
388 bool Vectorizer::lookThroughComplexAddresses(Value *PtrA, Value *PtrB,
389                                              APInt PtrDelta,
390                                              unsigned Depth) const {
391   auto *GEPA = dyn_cast<GetElementPtrInst>(PtrA);
392   auto *GEPB = dyn_cast<GetElementPtrInst>(PtrB);
393   if (!GEPA || !GEPB)
394     return lookThroughSelects(PtrA, PtrB, PtrDelta, Depth);
395 
396   // Look through GEPs after checking they're the same except for the last
397   // index.
398   if (GEPA->getNumOperands() != GEPB->getNumOperands() ||
399       GEPA->getPointerOperand() != GEPB->getPointerOperand())
400     return false;
401   gep_type_iterator GTIA = gep_type_begin(GEPA);
402   gep_type_iterator GTIB = gep_type_begin(GEPB);
403   for (unsigned I = 0, E = GEPA->getNumIndices() - 1; I < E; ++I) {
404     if (GTIA.getOperand() != GTIB.getOperand())
405       return false;
406     ++GTIA;
407     ++GTIB;
408   }
409 
410   Instruction *OpA = dyn_cast<Instruction>(GTIA.getOperand());
411   Instruction *OpB = dyn_cast<Instruction>(GTIB.getOperand());
412   if (!OpA || !OpB || OpA->getOpcode() != OpB->getOpcode() ||
413       OpA->getType() != OpB->getType())
414     return false;
415 
416   if (PtrDelta.isNegative()) {
417     if (PtrDelta.isMinSignedValue())
418       return false;
419     PtrDelta.negate();
420     std::swap(OpA, OpB);
421   }
422   uint64_t Stride = DL.getTypeAllocSize(GTIA.getIndexedType());
423   if (PtrDelta.urem(Stride) != 0)
424     return false;
425   unsigned IdxBitWidth = OpA->getType()->getScalarSizeInBits();
426   APInt IdxDiff = PtrDelta.udiv(Stride).zextOrSelf(IdxBitWidth);
427 
428   // Only look through a ZExt/SExt.
429   if (!isa<SExtInst>(OpA) && !isa<ZExtInst>(OpA))
430     return false;
431 
432   bool Signed = isa<SExtInst>(OpA);
433 
434   // At this point A could be a function parameter, i.e. not an instruction
435   Value *ValA = OpA->getOperand(0);
436   OpB = dyn_cast<Instruction>(OpB->getOperand(0));
437   if (!OpB || ValA->getType() != OpB->getType())
438     return false;
439 
440   // Now we need to prove that adding IdxDiff to ValA won't overflow.
441   bool Safe = false;
442   // First attempt: if OpB is an add with NSW/NUW, and OpB is IdxDiff added to
443   // ValA, we're okay.
444   if (OpB->getOpcode() == Instruction::Add &&
445       isa<ConstantInt>(OpB->getOperand(1)) &&
446       IdxDiff.sle(cast<ConstantInt>(OpB->getOperand(1))->getSExtValue())) {
447     if (Signed)
448       Safe = cast<BinaryOperator>(OpB)->hasNoSignedWrap();
449     else
450       Safe = cast<BinaryOperator>(OpB)->hasNoUnsignedWrap();
451   }
452 
453   unsigned BitWidth = ValA->getType()->getScalarSizeInBits();
454 
455   // Second attempt:
456   // If all set bits of IdxDiff or any higher order bit other than the sign bit
457   // are known to be zero in ValA, we can add Diff to it while guaranteeing no
458   // overflow of any sort.
459   if (!Safe) {
460     OpA = dyn_cast<Instruction>(ValA);
461     if (!OpA)
462       return false;
463     KnownBits Known(BitWidth);
464     computeKnownBits(OpA, Known, DL, 0, nullptr, OpA, &DT);
465     APInt BitsAllowedToBeSet = Known.Zero.zext(IdxDiff.getBitWidth());
466     if (Signed)
467       BitsAllowedToBeSet.clearBit(BitWidth - 1);
468     if (BitsAllowedToBeSet.ult(IdxDiff))
469       return false;
470   }
471 
472   const SCEV *OffsetSCEVA = SE.getSCEV(ValA);
473   const SCEV *OffsetSCEVB = SE.getSCEV(OpB);
474   const SCEV *C = SE.getConstant(IdxDiff.trunc(BitWidth));
475   const SCEV *X = SE.getAddExpr(OffsetSCEVA, C);
476   return X == OffsetSCEVB;
477 }
478 
479 bool Vectorizer::lookThroughSelects(Value *PtrA, Value *PtrB,
480                                     const APInt &PtrDelta,
481                                     unsigned Depth) const {
482   if (Depth++ == MaxDepth)
483     return false;
484 
485   if (auto *SelectA = dyn_cast<SelectInst>(PtrA)) {
486     if (auto *SelectB = dyn_cast<SelectInst>(PtrB)) {
487       return SelectA->getCondition() == SelectB->getCondition() &&
488              areConsecutivePointers(SelectA->getTrueValue(),
489                                     SelectB->getTrueValue(), PtrDelta, Depth) &&
490              areConsecutivePointers(SelectA->getFalseValue(),
491                                     SelectB->getFalseValue(), PtrDelta, Depth);
492     }
493   }
494   return false;
495 }
496 
497 void Vectorizer::reorder(Instruction *I) {
498   SmallPtrSet<Instruction *, 16> InstructionsToMove;
499   SmallVector<Instruction *, 16> Worklist;
500 
501   Worklist.push_back(I);
502   while (!Worklist.empty()) {
503     Instruction *IW = Worklist.pop_back_val();
504     int NumOperands = IW->getNumOperands();
505     for (int i = 0; i < NumOperands; i++) {
506       Instruction *IM = dyn_cast<Instruction>(IW->getOperand(i));
507       if (!IM || IM->getOpcode() == Instruction::PHI)
508         continue;
509 
510       // If IM is in another BB, no need to move it, because this pass only
511       // vectorizes instructions within one BB.
512       if (IM->getParent() != I->getParent())
513         continue;
514 
515       if (!IM->comesBefore(I)) {
516         InstructionsToMove.insert(IM);
517         Worklist.push_back(IM);
518       }
519     }
520   }
521 
522   // All instructions to move should follow I. Start from I, not from begin().
523   for (auto BBI = I->getIterator(), E = I->getParent()->end(); BBI != E;
524        ++BBI) {
525     if (!InstructionsToMove.count(&*BBI))
526       continue;
527     Instruction *IM = &*BBI;
528     --BBI;
529     IM->removeFromParent();
530     IM->insertBefore(I);
531   }
532 }
533 
534 std::pair<BasicBlock::iterator, BasicBlock::iterator>
535 Vectorizer::getBoundaryInstrs(ArrayRef<Instruction *> Chain) {
536   Instruction *C0 = Chain[0];
537   BasicBlock::iterator FirstInstr = C0->getIterator();
538   BasicBlock::iterator LastInstr = C0->getIterator();
539 
540   BasicBlock *BB = C0->getParent();
541   unsigned NumFound = 0;
542   for (Instruction &I : *BB) {
543     if (!is_contained(Chain, &I))
544       continue;
545 
546     ++NumFound;
547     if (NumFound == 1) {
548       FirstInstr = I.getIterator();
549     }
550     if (NumFound == Chain.size()) {
551       LastInstr = I.getIterator();
552       break;
553     }
554   }
555 
556   // Range is [first, last).
557   return std::make_pair(FirstInstr, ++LastInstr);
558 }
559 
560 void Vectorizer::eraseInstructions(ArrayRef<Instruction *> Chain) {
561   SmallVector<Instruction *, 16> Instrs;
562   for (Instruction *I : Chain) {
563     Value *PtrOperand = getLoadStorePointerOperand(I);
564     assert(PtrOperand && "Instruction must have a pointer operand.");
565     Instrs.push_back(I);
566     if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(PtrOperand))
567       Instrs.push_back(GEP);
568   }
569 
570   // Erase instructions.
571   for (Instruction *I : Instrs)
572     if (I->use_empty())
573       I->eraseFromParent();
574 }
575 
576 std::pair<ArrayRef<Instruction *>, ArrayRef<Instruction *>>
577 Vectorizer::splitOddVectorElts(ArrayRef<Instruction *> Chain,
578                                unsigned ElementSizeBits) {
579   unsigned ElementSizeBytes = ElementSizeBits / 8;
580   unsigned SizeBytes = ElementSizeBytes * Chain.size();
581   unsigned NumLeft = (SizeBytes - (SizeBytes % 4)) / ElementSizeBytes;
582   if (NumLeft == Chain.size()) {
583     if ((NumLeft & 1) == 0)
584       NumLeft /= 2; // Split even in half
585     else
586       --NumLeft;    // Split off last element
587   } else if (NumLeft == 0)
588     NumLeft = 1;
589   return std::make_pair(Chain.slice(0, NumLeft), Chain.slice(NumLeft));
590 }
591 
592 ArrayRef<Instruction *>
593 Vectorizer::getVectorizablePrefix(ArrayRef<Instruction *> Chain) {
594   // These are in BB order, unlike Chain, which is in address order.
595   SmallVector<Instruction *, 16> MemoryInstrs;
596   SmallVector<Instruction *, 16> ChainInstrs;
597 
598   bool IsLoadChain = isa<LoadInst>(Chain[0]);
599   LLVM_DEBUG({
600     for (Instruction *I : Chain) {
601       if (IsLoadChain)
602         assert(isa<LoadInst>(I) &&
603                "All elements of Chain must be loads, or all must be stores.");
604       else
605         assert(isa<StoreInst>(I) &&
606                "All elements of Chain must be loads, or all must be stores.");
607     }
608   });
609 
610   for (Instruction &I : make_range(getBoundaryInstrs(Chain))) {
611     if (isa<LoadInst>(I) || isa<StoreInst>(I)) {
612       if (!is_contained(Chain, &I))
613         MemoryInstrs.push_back(&I);
614       else
615         ChainInstrs.push_back(&I);
616     } else if (isa<IntrinsicInst>(&I) &&
617                cast<IntrinsicInst>(&I)->getIntrinsicID() ==
618                    Intrinsic::sideeffect) {
619       // Ignore llvm.sideeffect calls.
620     } else if (IsLoadChain && (I.mayWriteToMemory() || I.mayThrow())) {
621       LLVM_DEBUG(dbgs() << "LSV: Found may-write/throw operation: " << I
622                         << '\n');
623       break;
624     } else if (!IsLoadChain && (I.mayReadOrWriteMemory() || I.mayThrow())) {
625       LLVM_DEBUG(dbgs() << "LSV: Found may-read/write/throw operation: " << I
626                         << '\n');
627       break;
628     }
629   }
630 
631   // Loop until we find an instruction in ChainInstrs that we can't vectorize.
632   unsigned ChainInstrIdx = 0;
633   Instruction *BarrierMemoryInstr = nullptr;
634 
635   for (unsigned E = ChainInstrs.size(); ChainInstrIdx < E; ++ChainInstrIdx) {
636     Instruction *ChainInstr = ChainInstrs[ChainInstrIdx];
637 
638     // If a barrier memory instruction was found, chain instructions that follow
639     // will not be added to the valid prefix.
640     if (BarrierMemoryInstr && BarrierMemoryInstr->comesBefore(ChainInstr))
641       break;
642 
643     // Check (in BB order) if any instruction prevents ChainInstr from being
644     // vectorized. Find and store the first such "conflicting" instruction.
645     for (Instruction *MemInstr : MemoryInstrs) {
646       // If a barrier memory instruction was found, do not check past it.
647       if (BarrierMemoryInstr && BarrierMemoryInstr->comesBefore(MemInstr))
648         break;
649 
650       auto *MemLoad = dyn_cast<LoadInst>(MemInstr);
651       auto *ChainLoad = dyn_cast<LoadInst>(ChainInstr);
652       if (MemLoad && ChainLoad)
653         continue;
654 
655       // We can ignore the alias if the we have a load store pair and the load
656       // is known to be invariant. The load cannot be clobbered by the store.
657       auto IsInvariantLoad = [](const LoadInst *LI) -> bool {
658         return LI->hasMetadata(LLVMContext::MD_invariant_load);
659       };
660 
661       // We can ignore the alias as long as the load comes before the store,
662       // because that means we won't be moving the load past the store to
663       // vectorize it (the vectorized load is inserted at the location of the
664       // first load in the chain).
665       if (isa<StoreInst>(MemInstr) && ChainLoad &&
666           (IsInvariantLoad(ChainLoad) || ChainLoad->comesBefore(MemInstr)))
667         continue;
668 
669       // Same case, but in reverse.
670       if (MemLoad && isa<StoreInst>(ChainInstr) &&
671           (IsInvariantLoad(MemLoad) || MemLoad->comesBefore(ChainInstr)))
672         continue;
673 
674       if (!AA.isNoAlias(MemoryLocation::get(MemInstr),
675                         MemoryLocation::get(ChainInstr))) {
676         LLVM_DEBUG({
677           dbgs() << "LSV: Found alias:\n"
678                     "  Aliasing instruction and pointer:\n"
679                  << "  " << *MemInstr << '\n'
680                  << "  " << *getLoadStorePointerOperand(MemInstr) << '\n'
681                  << "  Aliased instruction and pointer:\n"
682                  << "  " << *ChainInstr << '\n'
683                  << "  " << *getLoadStorePointerOperand(ChainInstr) << '\n';
684         });
685         // Save this aliasing memory instruction as a barrier, but allow other
686         // instructions that precede the barrier to be vectorized with this one.
687         BarrierMemoryInstr = MemInstr;
688         break;
689       }
690     }
691     // Continue the search only for store chains, since vectorizing stores that
692     // precede an aliasing load is valid. Conversely, vectorizing loads is valid
693     // up to an aliasing store, but should not pull loads from further down in
694     // the basic block.
695     if (IsLoadChain && BarrierMemoryInstr) {
696       // The BarrierMemoryInstr is a store that precedes ChainInstr.
697       assert(BarrierMemoryInstr->comesBefore(ChainInstr));
698       break;
699     }
700   }
701 
702   // Find the largest prefix of Chain whose elements are all in
703   // ChainInstrs[0, ChainInstrIdx).  This is the largest vectorizable prefix of
704   // Chain.  (Recall that Chain is in address order, but ChainInstrs is in BB
705   // order.)
706   SmallPtrSet<Instruction *, 8> VectorizableChainInstrs(
707       ChainInstrs.begin(), ChainInstrs.begin() + ChainInstrIdx);
708   unsigned ChainIdx = 0;
709   for (unsigned ChainLen = Chain.size(); ChainIdx < ChainLen; ++ChainIdx) {
710     if (!VectorizableChainInstrs.count(Chain[ChainIdx]))
711       break;
712   }
713   return Chain.slice(0, ChainIdx);
714 }
715 
716 static ChainID getChainID(const Value *Ptr, const DataLayout &DL) {
717   const Value *ObjPtr = GetUnderlyingObject(Ptr, DL);
718   if (const auto *Sel = dyn_cast<SelectInst>(ObjPtr)) {
719     // The select's themselves are distinct instructions even if they share the
720     // same condition and evaluate to consecutive pointers for true and false
721     // values of the condition. Therefore using the select's themselves for
722     // grouping instructions would put consecutive accesses into different lists
723     // and they won't be even checked for being consecutive, and won't be
724     // vectorized.
725     return Sel->getCondition();
726   }
727   return ObjPtr;
728 }
729 
730 std::pair<InstrListMap, InstrListMap>
731 Vectorizer::collectInstructions(BasicBlock *BB) {
732   InstrListMap LoadRefs;
733   InstrListMap StoreRefs;
734 
735   for (Instruction &I : *BB) {
736     if (!I.mayReadOrWriteMemory())
737       continue;
738 
739     if (LoadInst *LI = dyn_cast<LoadInst>(&I)) {
740       if (!LI->isSimple())
741         continue;
742 
743       // Skip if it's not legal.
744       if (!TTI.isLegalToVectorizeLoad(LI))
745         continue;
746 
747       Type *Ty = LI->getType();
748       if (!VectorType::isValidElementType(Ty->getScalarType()))
749         continue;
750 
751       // Skip weird non-byte sizes. They probably aren't worth the effort of
752       // handling correctly.
753       unsigned TySize = DL.getTypeSizeInBits(Ty);
754       if ((TySize % 8) != 0)
755         continue;
756 
757       // Skip vectors of pointers. The vectorizeLoadChain/vectorizeStoreChain
758       // functions are currently using an integer type for the vectorized
759       // load/store, and does not support casting between the integer type and a
760       // vector of pointers (e.g. i64 to <2 x i16*>)
761       if (Ty->isVectorTy() && Ty->isPtrOrPtrVectorTy())
762         continue;
763 
764       Value *Ptr = LI->getPointerOperand();
765       unsigned AS = Ptr->getType()->getPointerAddressSpace();
766       unsigned VecRegSize = TTI.getLoadStoreVecRegBitWidth(AS);
767 
768       unsigned VF = VecRegSize / TySize;
769       VectorType *VecTy = dyn_cast<VectorType>(Ty);
770 
771       // No point in looking at these if they're too big to vectorize.
772       if (TySize > VecRegSize / 2 ||
773           (VecTy && TTI.getLoadVectorFactor(VF, TySize, TySize / 8, VecTy) == 0))
774         continue;
775 
776       // Make sure all the users of a vector are constant-index extracts.
777       if (isa<VectorType>(Ty) && !llvm::all_of(LI->users(), [](const User *U) {
778             const ExtractElementInst *EEI = dyn_cast<ExtractElementInst>(U);
779             return EEI && isa<ConstantInt>(EEI->getOperand(1));
780           }))
781         continue;
782 
783       // Save the load locations.
784       const ChainID ID = getChainID(Ptr, DL);
785       LoadRefs[ID].push_back(LI);
786     } else if (StoreInst *SI = dyn_cast<StoreInst>(&I)) {
787       if (!SI->isSimple())
788         continue;
789 
790       // Skip if it's not legal.
791       if (!TTI.isLegalToVectorizeStore(SI))
792         continue;
793 
794       Type *Ty = SI->getValueOperand()->getType();
795       if (!VectorType::isValidElementType(Ty->getScalarType()))
796         continue;
797 
798       // Skip vectors of pointers. The vectorizeLoadChain/vectorizeStoreChain
799       // functions are currently using an integer type for the vectorized
800       // load/store, and does not support casting between the integer type and a
801       // vector of pointers (e.g. i64 to <2 x i16*>)
802       if (Ty->isVectorTy() && Ty->isPtrOrPtrVectorTy())
803         continue;
804 
805       // Skip weird non-byte sizes. They probably aren't worth the effort of
806       // handling correctly.
807       unsigned TySize = DL.getTypeSizeInBits(Ty);
808       if ((TySize % 8) != 0)
809         continue;
810 
811       Value *Ptr = SI->getPointerOperand();
812       unsigned AS = Ptr->getType()->getPointerAddressSpace();
813       unsigned VecRegSize = TTI.getLoadStoreVecRegBitWidth(AS);
814 
815       unsigned VF = VecRegSize / TySize;
816       VectorType *VecTy = dyn_cast<VectorType>(Ty);
817 
818       // No point in looking at these if they're too big to vectorize.
819       if (TySize > VecRegSize / 2 ||
820           (VecTy && TTI.getStoreVectorFactor(VF, TySize, TySize / 8, VecTy) == 0))
821         continue;
822 
823       if (isa<VectorType>(Ty) && !llvm::all_of(SI->users(), [](const User *U) {
824             const ExtractElementInst *EEI = dyn_cast<ExtractElementInst>(U);
825             return EEI && isa<ConstantInt>(EEI->getOperand(1));
826           }))
827         continue;
828 
829       // Save store location.
830       const ChainID ID = getChainID(Ptr, DL);
831       StoreRefs[ID].push_back(SI);
832     }
833   }
834 
835   return {LoadRefs, StoreRefs};
836 }
837 
838 bool Vectorizer::vectorizeChains(InstrListMap &Map) {
839   bool Changed = false;
840 
841   for (const std::pair<ChainID, InstrList> &Chain : Map) {
842     unsigned Size = Chain.second.size();
843     if (Size < 2)
844       continue;
845 
846     LLVM_DEBUG(dbgs() << "LSV: Analyzing a chain of length " << Size << ".\n");
847 
848     // Process the stores in chunks of 64.
849     for (unsigned CI = 0, CE = Size; CI < CE; CI += 64) {
850       unsigned Len = std::min<unsigned>(CE - CI, 64);
851       ArrayRef<Instruction *> Chunk(&Chain.second[CI], Len);
852       Changed |= vectorizeInstructions(Chunk);
853     }
854   }
855 
856   return Changed;
857 }
858 
859 bool Vectorizer::vectorizeInstructions(ArrayRef<Instruction *> Instrs) {
860   LLVM_DEBUG(dbgs() << "LSV: Vectorizing " << Instrs.size()
861                     << " instructions.\n");
862   SmallVector<int, 16> Heads, Tails;
863   int ConsecutiveChain[64];
864 
865   // Do a quadratic search on all of the given loads/stores and find all of the
866   // pairs of loads/stores that follow each other.
867   for (int i = 0, e = Instrs.size(); i < e; ++i) {
868     ConsecutiveChain[i] = -1;
869     for (int j = e - 1; j >= 0; --j) {
870       if (i == j)
871         continue;
872 
873       if (isConsecutiveAccess(Instrs[i], Instrs[j])) {
874         if (ConsecutiveChain[i] != -1) {
875           int CurDistance = std::abs(ConsecutiveChain[i] - i);
876           int NewDistance = std::abs(ConsecutiveChain[i] - j);
877           if (j < i || NewDistance > CurDistance)
878             continue; // Should not insert.
879         }
880 
881         Tails.push_back(j);
882         Heads.push_back(i);
883         ConsecutiveChain[i] = j;
884       }
885     }
886   }
887 
888   bool Changed = false;
889   SmallPtrSet<Instruction *, 16> InstructionsProcessed;
890 
891   for (int Head : Heads) {
892     if (InstructionsProcessed.count(Instrs[Head]))
893       continue;
894     bool LongerChainExists = false;
895     for (unsigned TIt = 0; TIt < Tails.size(); TIt++)
896       if (Head == Tails[TIt] &&
897           !InstructionsProcessed.count(Instrs[Heads[TIt]])) {
898         LongerChainExists = true;
899         break;
900       }
901     if (LongerChainExists)
902       continue;
903 
904     // We found an instr that starts a chain. Now follow the chain and try to
905     // vectorize it.
906     SmallVector<Instruction *, 16> Operands;
907     int I = Head;
908     while (I != -1 && (is_contained(Tails, I) || is_contained(Heads, I))) {
909       if (InstructionsProcessed.count(Instrs[I]))
910         break;
911 
912       Operands.push_back(Instrs[I]);
913       I = ConsecutiveChain[I];
914     }
915 
916     bool Vectorized = false;
917     if (isa<LoadInst>(*Operands.begin()))
918       Vectorized = vectorizeLoadChain(Operands, &InstructionsProcessed);
919     else
920       Vectorized = vectorizeStoreChain(Operands, &InstructionsProcessed);
921 
922     Changed |= Vectorized;
923   }
924 
925   return Changed;
926 }
927 
928 bool Vectorizer::vectorizeStoreChain(
929     ArrayRef<Instruction *> Chain,
930     SmallPtrSet<Instruction *, 16> *InstructionsProcessed) {
931   StoreInst *S0 = cast<StoreInst>(Chain[0]);
932 
933   // If the vector has an int element, default to int for the whole store.
934   Type *StoreTy = nullptr;
935   for (Instruction *I : Chain) {
936     StoreTy = cast<StoreInst>(I)->getValueOperand()->getType();
937     if (StoreTy->isIntOrIntVectorTy())
938       break;
939 
940     if (StoreTy->isPtrOrPtrVectorTy()) {
941       StoreTy = Type::getIntNTy(F.getParent()->getContext(),
942                                 DL.getTypeSizeInBits(StoreTy));
943       break;
944     }
945   }
946   assert(StoreTy && "Failed to find store type");
947 
948   unsigned Sz = DL.getTypeSizeInBits(StoreTy);
949   unsigned AS = S0->getPointerAddressSpace();
950   unsigned VecRegSize = TTI.getLoadStoreVecRegBitWidth(AS);
951   unsigned VF = VecRegSize / Sz;
952   unsigned ChainSize = Chain.size();
953   Align Alignment = getAlign(S0);
954 
955   if (!isPowerOf2_32(Sz) || VF < 2 || ChainSize < 2) {
956     InstructionsProcessed->insert(Chain.begin(), Chain.end());
957     return false;
958   }
959 
960   ArrayRef<Instruction *> NewChain = getVectorizablePrefix(Chain);
961   if (NewChain.empty()) {
962     // No vectorization possible.
963     InstructionsProcessed->insert(Chain.begin(), Chain.end());
964     return false;
965   }
966   if (NewChain.size() == 1) {
967     // Failed after the first instruction. Discard it and try the smaller chain.
968     InstructionsProcessed->insert(NewChain.front());
969     return false;
970   }
971 
972   // Update Chain to the valid vectorizable subchain.
973   Chain = NewChain;
974   ChainSize = Chain.size();
975 
976   // Check if it's legal to vectorize this chain. If not, split the chain and
977   // try again.
978   unsigned EltSzInBytes = Sz / 8;
979   unsigned SzInBytes = EltSzInBytes * ChainSize;
980 
981   VectorType *VecTy;
982   VectorType *VecStoreTy = dyn_cast<VectorType>(StoreTy);
983   if (VecStoreTy)
984     VecTy = VectorType::get(StoreTy->getScalarType(),
985                             Chain.size() * VecStoreTy->getNumElements());
986   else
987     VecTy = VectorType::get(StoreTy, Chain.size());
988 
989   // If it's more than the max vector size or the target has a better
990   // vector factor, break it into two pieces.
991   unsigned TargetVF = TTI.getStoreVectorFactor(VF, Sz, SzInBytes, VecTy);
992   if (ChainSize > VF || (VF != TargetVF && TargetVF < ChainSize)) {
993     LLVM_DEBUG(dbgs() << "LSV: Chain doesn't match with the vector factor."
994                          " Creating two separate arrays.\n");
995     return vectorizeStoreChain(Chain.slice(0, TargetVF),
996                                InstructionsProcessed) |
997            vectorizeStoreChain(Chain.slice(TargetVF), InstructionsProcessed);
998   }
999 
1000   LLVM_DEBUG({
1001     dbgs() << "LSV: Stores to vectorize:\n";
1002     for (Instruction *I : Chain)
1003       dbgs() << "  " << *I << "\n";
1004   });
1005 
1006   // We won't try again to vectorize the elements of the chain, regardless of
1007   // whether we succeed below.
1008   InstructionsProcessed->insert(Chain.begin(), Chain.end());
1009 
1010   // If the store is going to be misaligned, don't vectorize it.
1011   if (accessIsMisaligned(SzInBytes, AS, Alignment.value())) {
1012     if (S0->getPointerAddressSpace() != DL.getAllocaAddrSpace()) {
1013       auto Chains = splitOddVectorElts(Chain, Sz);
1014       return vectorizeStoreChain(Chains.first, InstructionsProcessed) |
1015              vectorizeStoreChain(Chains.second, InstructionsProcessed);
1016     }
1017 
1018     Align NewAlign = getOrEnforceKnownAlignment(S0->getPointerOperand(),
1019                                                 Align(StackAdjustedAlignment),
1020                                                 DL, S0, nullptr, &DT);
1021     if (NewAlign >= Alignment)
1022       Alignment = NewAlign;
1023     else
1024       return false;
1025   }
1026 
1027   if (!TTI.isLegalToVectorizeStoreChain(SzInBytes, Alignment.value(), AS)) {
1028     auto Chains = splitOddVectorElts(Chain, Sz);
1029     return vectorizeStoreChain(Chains.first, InstructionsProcessed) |
1030            vectorizeStoreChain(Chains.second, InstructionsProcessed);
1031   }
1032 
1033   BasicBlock::iterator First, Last;
1034   std::tie(First, Last) = getBoundaryInstrs(Chain);
1035   Builder.SetInsertPoint(&*Last);
1036 
1037   Value *Vec = UndefValue::get(VecTy);
1038 
1039   if (VecStoreTy) {
1040     unsigned VecWidth = VecStoreTy->getNumElements();
1041     for (unsigned I = 0, E = Chain.size(); I != E; ++I) {
1042       StoreInst *Store = cast<StoreInst>(Chain[I]);
1043       for (unsigned J = 0, NE = VecStoreTy->getNumElements(); J != NE; ++J) {
1044         unsigned NewIdx = J + I * VecWidth;
1045         Value *Extract = Builder.CreateExtractElement(Store->getValueOperand(),
1046                                                       Builder.getInt32(J));
1047         if (Extract->getType() != StoreTy->getScalarType())
1048           Extract = Builder.CreateBitCast(Extract, StoreTy->getScalarType());
1049 
1050         Value *Insert =
1051             Builder.CreateInsertElement(Vec, Extract, Builder.getInt32(NewIdx));
1052         Vec = Insert;
1053       }
1054     }
1055   } else {
1056     for (unsigned I = 0, E = Chain.size(); I != E; ++I) {
1057       StoreInst *Store = cast<StoreInst>(Chain[I]);
1058       Value *Extract = Store->getValueOperand();
1059       if (Extract->getType() != StoreTy->getScalarType())
1060         Extract =
1061             Builder.CreateBitOrPointerCast(Extract, StoreTy->getScalarType());
1062 
1063       Value *Insert =
1064           Builder.CreateInsertElement(Vec, Extract, Builder.getInt32(I));
1065       Vec = Insert;
1066     }
1067   }
1068 
1069   StoreInst *SI = Builder.CreateAlignedStore(
1070     Vec,
1071     Builder.CreateBitCast(S0->getPointerOperand(), VecTy->getPointerTo(AS)),
1072     Alignment);
1073   propagateMetadata(SI, Chain);
1074 
1075   eraseInstructions(Chain);
1076   ++NumVectorInstructions;
1077   NumScalarsVectorized += Chain.size();
1078   return true;
1079 }
1080 
1081 bool Vectorizer::vectorizeLoadChain(
1082     ArrayRef<Instruction *> Chain,
1083     SmallPtrSet<Instruction *, 16> *InstructionsProcessed) {
1084   LoadInst *L0 = cast<LoadInst>(Chain[0]);
1085 
1086   // If the vector has an int element, default to int for the whole load.
1087   Type *LoadTy = nullptr;
1088   for (const auto &V : Chain) {
1089     LoadTy = cast<LoadInst>(V)->getType();
1090     if (LoadTy->isIntOrIntVectorTy())
1091       break;
1092 
1093     if (LoadTy->isPtrOrPtrVectorTy()) {
1094       LoadTy = Type::getIntNTy(F.getParent()->getContext(),
1095                                DL.getTypeSizeInBits(LoadTy));
1096       break;
1097     }
1098   }
1099   assert(LoadTy && "Can't determine LoadInst type from chain");
1100 
1101   unsigned Sz = DL.getTypeSizeInBits(LoadTy);
1102   unsigned AS = L0->getPointerAddressSpace();
1103   unsigned VecRegSize = TTI.getLoadStoreVecRegBitWidth(AS);
1104   unsigned VF = VecRegSize / Sz;
1105   unsigned ChainSize = Chain.size();
1106   Align Alignment = getAlign(L0);
1107 
1108   if (!isPowerOf2_32(Sz) || VF < 2 || ChainSize < 2) {
1109     InstructionsProcessed->insert(Chain.begin(), Chain.end());
1110     return false;
1111   }
1112 
1113   ArrayRef<Instruction *> NewChain = getVectorizablePrefix(Chain);
1114   if (NewChain.empty()) {
1115     // No vectorization possible.
1116     InstructionsProcessed->insert(Chain.begin(), Chain.end());
1117     return false;
1118   }
1119   if (NewChain.size() == 1) {
1120     // Failed after the first instruction. Discard it and try the smaller chain.
1121     InstructionsProcessed->insert(NewChain.front());
1122     return false;
1123   }
1124 
1125   // Update Chain to the valid vectorizable subchain.
1126   Chain = NewChain;
1127   ChainSize = Chain.size();
1128 
1129   // Check if it's legal to vectorize this chain. If not, split the chain and
1130   // try again.
1131   unsigned EltSzInBytes = Sz / 8;
1132   unsigned SzInBytes = EltSzInBytes * ChainSize;
1133   VectorType *VecTy;
1134   VectorType *VecLoadTy = dyn_cast<VectorType>(LoadTy);
1135   if (VecLoadTy)
1136     VecTy = VectorType::get(LoadTy->getScalarType(),
1137                             Chain.size() * VecLoadTy->getNumElements());
1138   else
1139     VecTy = VectorType::get(LoadTy, Chain.size());
1140 
1141   // If it's more than the max vector size or the target has a better
1142   // vector factor, break it into two pieces.
1143   unsigned TargetVF = TTI.getLoadVectorFactor(VF, Sz, SzInBytes, VecTy);
1144   if (ChainSize > VF || (VF != TargetVF && TargetVF < ChainSize)) {
1145     LLVM_DEBUG(dbgs() << "LSV: Chain doesn't match with the vector factor."
1146                          " Creating two separate arrays.\n");
1147     return vectorizeLoadChain(Chain.slice(0, TargetVF), InstructionsProcessed) |
1148            vectorizeLoadChain(Chain.slice(TargetVF), InstructionsProcessed);
1149   }
1150 
1151   // We won't try again to vectorize the elements of the chain, regardless of
1152   // whether we succeed below.
1153   InstructionsProcessed->insert(Chain.begin(), Chain.end());
1154 
1155   // If the load is going to be misaligned, don't vectorize it.
1156   if (accessIsMisaligned(SzInBytes, AS, Alignment.value())) {
1157     if (L0->getPointerAddressSpace() != DL.getAllocaAddrSpace()) {
1158       auto Chains = splitOddVectorElts(Chain, Sz);
1159       return vectorizeLoadChain(Chains.first, InstructionsProcessed) |
1160              vectorizeLoadChain(Chains.second, InstructionsProcessed);
1161     }
1162 
1163     Align NewAlign = getOrEnforceKnownAlignment(L0->getPointerOperand(),
1164                                                 Align(StackAdjustedAlignment),
1165                                                 DL, L0, nullptr, &DT);
1166     if (NewAlign >= Alignment)
1167       Alignment = NewAlign;
1168     else
1169       return false;
1170   }
1171 
1172   if (!TTI.isLegalToVectorizeLoadChain(SzInBytes, Alignment.value(), AS)) {
1173     auto Chains = splitOddVectorElts(Chain, Sz);
1174     return vectorizeLoadChain(Chains.first, InstructionsProcessed) |
1175            vectorizeLoadChain(Chains.second, InstructionsProcessed);
1176   }
1177 
1178   LLVM_DEBUG({
1179     dbgs() << "LSV: Loads to vectorize:\n";
1180     for (Instruction *I : Chain)
1181       I->dump();
1182   });
1183 
1184   // getVectorizablePrefix already computed getBoundaryInstrs.  The value of
1185   // Last may have changed since then, but the value of First won't have.  If it
1186   // matters, we could compute getBoundaryInstrs only once and reuse it here.
1187   BasicBlock::iterator First, Last;
1188   std::tie(First, Last) = getBoundaryInstrs(Chain);
1189   Builder.SetInsertPoint(&*First);
1190 
1191   Value *Bitcast =
1192       Builder.CreateBitCast(L0->getPointerOperand(), VecTy->getPointerTo(AS));
1193   LoadInst *LI =
1194       Builder.CreateAlignedLoad(VecTy, Bitcast, MaybeAlign(Alignment));
1195   propagateMetadata(LI, Chain);
1196 
1197   if (VecLoadTy) {
1198     SmallVector<Instruction *, 16> InstrsToErase;
1199 
1200     unsigned VecWidth = VecLoadTy->getNumElements();
1201     for (unsigned I = 0, E = Chain.size(); I != E; ++I) {
1202       for (auto Use : Chain[I]->users()) {
1203         // All users of vector loads are ExtractElement instructions with
1204         // constant indices, otherwise we would have bailed before now.
1205         Instruction *UI = cast<Instruction>(Use);
1206         unsigned Idx = cast<ConstantInt>(UI->getOperand(1))->getZExtValue();
1207         unsigned NewIdx = Idx + I * VecWidth;
1208         Value *V = Builder.CreateExtractElement(LI, Builder.getInt32(NewIdx),
1209                                                 UI->getName());
1210         if (V->getType() != UI->getType())
1211           V = Builder.CreateBitCast(V, UI->getType());
1212 
1213         // Replace the old instruction.
1214         UI->replaceAllUsesWith(V);
1215         InstrsToErase.push_back(UI);
1216       }
1217     }
1218 
1219     // Bitcast might not be an Instruction, if the value being loaded is a
1220     // constant.  In that case, no need to reorder anything.
1221     if (Instruction *BitcastInst = dyn_cast<Instruction>(Bitcast))
1222       reorder(BitcastInst);
1223 
1224     for (auto I : InstrsToErase)
1225       I->eraseFromParent();
1226   } else {
1227     for (unsigned I = 0, E = Chain.size(); I != E; ++I) {
1228       Value *CV = Chain[I];
1229       Value *V =
1230           Builder.CreateExtractElement(LI, Builder.getInt32(I), CV->getName());
1231       if (V->getType() != CV->getType()) {
1232         V = Builder.CreateBitOrPointerCast(V, CV->getType());
1233       }
1234 
1235       // Replace the old instruction.
1236       CV->replaceAllUsesWith(V);
1237     }
1238 
1239     if (Instruction *BitcastInst = dyn_cast<Instruction>(Bitcast))
1240       reorder(BitcastInst);
1241   }
1242 
1243   eraseInstructions(Chain);
1244 
1245   ++NumVectorInstructions;
1246   NumScalarsVectorized += Chain.size();
1247   return true;
1248 }
1249 
1250 bool Vectorizer::accessIsMisaligned(unsigned SzInBytes, unsigned AddressSpace,
1251                                     unsigned Alignment) {
1252   if (Alignment % SzInBytes == 0)
1253     return false;
1254 
1255   bool Fast = false;
1256   bool Allows = TTI.allowsMisalignedMemoryAccesses(F.getParent()->getContext(),
1257                                                    SzInBytes * 8, AddressSpace,
1258                                                    Alignment, &Fast);
1259   LLVM_DEBUG(dbgs() << "LSV: Target said misaligned is allowed? " << Allows
1260                     << " and fast? " << Fast << "\n";);
1261   return !Allows || !Fast;
1262 }
1263