1 //===- LoadStoreVectorizer.cpp - GPU Load & Store Vectorizer --------------===//
2 //
3 //                     The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This pass merges loads/stores to/from sequential memory addresses into vector
11 // loads/stores.  Although there's nothing GPU-specific in here, this pass is
12 // motivated by the microarchitectural quirks of nVidia and AMD GPUs.
13 //
14 // (For simplicity below we talk about loads only, but everything also applies
15 // to stores.)
16 //
17 // This pass is intended to be run late in the pipeline, after other
18 // vectorization opportunities have been exploited.  So the assumption here is
19 // that immediately following our new vector load we'll need to extract out the
20 // individual elements of the load, so we can operate on them individually.
21 //
22 // On CPUs this transformation is usually not beneficial, because extracting the
23 // elements of a vector register is expensive on most architectures.  It's
24 // usually better just to load each element individually into its own scalar
25 // register.
26 //
27 // However, nVidia and AMD GPUs don't have proper vector registers.  Instead, a
28 // "vector load" loads directly into a series of scalar registers.  In effect,
29 // extracting the elements of the vector is free.  It's therefore always
30 // beneficial to vectorize a sequence of loads on these architectures.
31 //
32 // Vectorizing (perhaps a better name might be "coalescing") loads can have
33 // large performance impacts on GPU kernels, and opportunities for vectorizing
34 // are common in GPU code.  This pass tries very hard to find such
35 // opportunities; its runtime is quadratic in the number of loads in a BB.
36 //
37 // Some CPU architectures, such as ARM, have instructions that load into
38 // multiple scalar registers, similar to a GPU vectorized load.  In theory ARM
39 // could use this pass (with some modifications), but currently it implements
40 // its own pass to do something similar to what we do here.
41 
42 #include "llvm/ADT/APInt.h"
43 #include "llvm/ADT/ArrayRef.h"
44 #include "llvm/ADT/MapVector.h"
45 #include "llvm/ADT/PostOrderIterator.h"
46 #include "llvm/ADT/STLExtras.h"
47 #include "llvm/ADT/SmallPtrSet.h"
48 #include "llvm/ADT/SmallVector.h"
49 #include "llvm/ADT/Statistic.h"
50 #include "llvm/ADT/iterator_range.h"
51 #include "llvm/Analysis/AliasAnalysis.h"
52 #include "llvm/Analysis/MemoryLocation.h"
53 #include "llvm/Analysis/OrderedBasicBlock.h"
54 #include "llvm/Analysis/ScalarEvolution.h"
55 #include "llvm/Analysis/TargetTransformInfo.h"
56 #include "llvm/Analysis/ValueTracking.h"
57 #include "llvm/Analysis/VectorUtils.h"
58 #include "llvm/IR/Attributes.h"
59 #include "llvm/IR/BasicBlock.h"
60 #include "llvm/IR/Constants.h"
61 #include "llvm/IR/DataLayout.h"
62 #include "llvm/IR/DerivedTypes.h"
63 #include "llvm/IR/Dominators.h"
64 #include "llvm/IR/Function.h"
65 #include "llvm/IR/IRBuilder.h"
66 #include "llvm/IR/InstrTypes.h"
67 #include "llvm/IR/Instruction.h"
68 #include "llvm/IR/Instructions.h"
69 #include "llvm/IR/IntrinsicInst.h"
70 #include "llvm/IR/Module.h"
71 #include "llvm/IR/Type.h"
72 #include "llvm/IR/User.h"
73 #include "llvm/IR/Value.h"
74 #include "llvm/Pass.h"
75 #include "llvm/Support/Casting.h"
76 #include "llvm/Support/Debug.h"
77 #include "llvm/Support/KnownBits.h"
78 #include "llvm/Support/MathExtras.h"
79 #include "llvm/Support/raw_ostream.h"
80 #include "llvm/Transforms/Utils/Local.h"
81 #include "llvm/Transforms/Vectorize.h"
82 #include <algorithm>
83 #include <cassert>
84 #include <cstdlib>
85 #include <tuple>
86 #include <utility>
87 
88 using namespace llvm;
89 
90 #define DEBUG_TYPE "load-store-vectorizer"
91 
92 STATISTIC(NumVectorInstructions, "Number of vector accesses generated");
93 STATISTIC(NumScalarsVectorized, "Number of scalar accesses vectorized");
94 
95 // FIXME: Assuming stack alignment of 4 is always good enough
96 static const unsigned StackAdjustedAlignment = 4;
97 
98 namespace {
99 
100 using InstrList = SmallVector<Instruction *, 8>;
101 using InstrListMap = MapVector<Value *, InstrList>;
102 
103 class Vectorizer {
104   Function &F;
105   AliasAnalysis &AA;
106   DominatorTree &DT;
107   ScalarEvolution &SE;
108   TargetTransformInfo &TTI;
109   const DataLayout &DL;
110   IRBuilder<> Builder;
111 
112 public:
113   Vectorizer(Function &F, AliasAnalysis &AA, DominatorTree &DT,
114              ScalarEvolution &SE, TargetTransformInfo &TTI)
115       : F(F), AA(AA), DT(DT), SE(SE), TTI(TTI),
116         DL(F.getParent()->getDataLayout()), Builder(SE.getContext()) {}
117 
118   bool run();
119 
120 private:
121   Value *getPointerOperand(Value *I) const;
122 
123   GetElementPtrInst *getSourceGEP(Value *Src) const;
124 
125   unsigned getPointerAddressSpace(Value *I);
126 
127   unsigned getAlignment(LoadInst *LI) const {
128     unsigned Align = LI->getAlignment();
129     if (Align != 0)
130       return Align;
131 
132     return DL.getABITypeAlignment(LI->getType());
133   }
134 
135   unsigned getAlignment(StoreInst *SI) const {
136     unsigned Align = SI->getAlignment();
137     if (Align != 0)
138       return Align;
139 
140     return DL.getABITypeAlignment(SI->getValueOperand()->getType());
141   }
142 
143   bool isConsecutiveAccess(Value *A, Value *B);
144 
145   /// After vectorization, reorder the instructions that I depends on
146   /// (the instructions defining its operands), to ensure they dominate I.
147   void reorder(Instruction *I);
148 
149   /// Returns the first and the last instructions in Chain.
150   std::pair<BasicBlock::iterator, BasicBlock::iterator>
151   getBoundaryInstrs(ArrayRef<Instruction *> Chain);
152 
153   /// Erases the original instructions after vectorizing.
154   void eraseInstructions(ArrayRef<Instruction *> Chain);
155 
156   /// "Legalize" the vector type that would be produced by combining \p
157   /// ElementSizeBits elements in \p Chain. Break into two pieces such that the
158   /// total size of each piece is 1, 2 or a multiple of 4 bytes. \p Chain is
159   /// expected to have more than 4 elements.
160   std::pair<ArrayRef<Instruction *>, ArrayRef<Instruction *>>
161   splitOddVectorElts(ArrayRef<Instruction *> Chain, unsigned ElementSizeBits);
162 
163   /// Finds the largest prefix of Chain that's vectorizable, checking for
164   /// intervening instructions which may affect the memory accessed by the
165   /// instructions within Chain.
166   ///
167   /// The elements of \p Chain must be all loads or all stores and must be in
168   /// address order.
169   ArrayRef<Instruction *> getVectorizablePrefix(ArrayRef<Instruction *> Chain);
170 
171   /// Collects load and store instructions to vectorize.
172   std::pair<InstrListMap, InstrListMap> collectInstructions(BasicBlock *BB);
173 
174   /// Processes the collected instructions, the \p Map. The values of \p Map
175   /// should be all loads or all stores.
176   bool vectorizeChains(InstrListMap &Map);
177 
178   /// Finds the load/stores to consecutive memory addresses and vectorizes them.
179   bool vectorizeInstructions(ArrayRef<Instruction *> Instrs);
180 
181   /// Vectorizes the load instructions in Chain.
182   bool
183   vectorizeLoadChain(ArrayRef<Instruction *> Chain,
184                      SmallPtrSet<Instruction *, 16> *InstructionsProcessed);
185 
186   /// Vectorizes the store instructions in Chain.
187   bool
188   vectorizeStoreChain(ArrayRef<Instruction *> Chain,
189                       SmallPtrSet<Instruction *, 16> *InstructionsProcessed);
190 
191   /// Check if this load/store access is misaligned accesses.
192   bool accessIsMisaligned(unsigned SzInBytes, unsigned AddressSpace,
193                           unsigned Alignment);
194 };
195 
196 class LoadStoreVectorizer : public FunctionPass {
197 public:
198   static char ID;
199 
200   LoadStoreVectorizer() : FunctionPass(ID) {
201     initializeLoadStoreVectorizerPass(*PassRegistry::getPassRegistry());
202   }
203 
204   bool runOnFunction(Function &F) override;
205 
206   StringRef getPassName() const override {
207     return "GPU Load and Store Vectorizer";
208   }
209 
210   void getAnalysisUsage(AnalysisUsage &AU) const override {
211     AU.addRequired<AAResultsWrapperPass>();
212     AU.addRequired<ScalarEvolutionWrapperPass>();
213     AU.addRequired<DominatorTreeWrapperPass>();
214     AU.addRequired<TargetTransformInfoWrapperPass>();
215     AU.setPreservesCFG();
216   }
217 };
218 
219 } // end anonymous namespace
220 
221 char LoadStoreVectorizer::ID = 0;
222 
223 INITIALIZE_PASS_BEGIN(LoadStoreVectorizer, DEBUG_TYPE,
224                       "Vectorize load and Store instructions", false, false)
225 INITIALIZE_PASS_DEPENDENCY(SCEVAAWrapperPass)
226 INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)
227 INITIALIZE_PASS_DEPENDENCY(AAResultsWrapperPass)
228 INITIALIZE_PASS_DEPENDENCY(GlobalsAAWrapperPass)
229 INITIALIZE_PASS_DEPENDENCY(TargetTransformInfoWrapperPass)
230 INITIALIZE_PASS_END(LoadStoreVectorizer, DEBUG_TYPE,
231                     "Vectorize load and store instructions", false, false)
232 
233 Pass *llvm::createLoadStoreVectorizerPass() {
234   return new LoadStoreVectorizer();
235 }
236 
237 // The real propagateMetadata expects a SmallVector<Value*>, but we deal in
238 // vectors of Instructions.
239 static void propagateMetadata(Instruction *I, ArrayRef<Instruction *> IL) {
240   SmallVector<Value *, 8> VL(IL.begin(), IL.end());
241   propagateMetadata(I, VL);
242 }
243 
244 bool LoadStoreVectorizer::runOnFunction(Function &F) {
245   // Don't vectorize when the attribute NoImplicitFloat is used.
246   if (skipFunction(F) || F.hasFnAttribute(Attribute::NoImplicitFloat))
247     return false;
248 
249   AliasAnalysis &AA = getAnalysis<AAResultsWrapperPass>().getAAResults();
250   DominatorTree &DT = getAnalysis<DominatorTreeWrapperPass>().getDomTree();
251   ScalarEvolution &SE = getAnalysis<ScalarEvolutionWrapperPass>().getSE();
252   TargetTransformInfo &TTI =
253       getAnalysis<TargetTransformInfoWrapperPass>().getTTI(F);
254 
255   Vectorizer V(F, AA, DT, SE, TTI);
256   return V.run();
257 }
258 
259 // Vectorizer Implementation
260 bool Vectorizer::run() {
261   bool Changed = false;
262 
263   // Scan the blocks in the function in post order.
264   for (BasicBlock *BB : post_order(&F)) {
265     InstrListMap LoadRefs, StoreRefs;
266     std::tie(LoadRefs, StoreRefs) = collectInstructions(BB);
267     Changed |= vectorizeChains(LoadRefs);
268     Changed |= vectorizeChains(StoreRefs);
269   }
270 
271   return Changed;
272 }
273 
274 Value *Vectorizer::getPointerOperand(Value *I) const {
275   if (LoadInst *LI = dyn_cast<LoadInst>(I))
276     return LI->getPointerOperand();
277   if (StoreInst *SI = dyn_cast<StoreInst>(I))
278     return SI->getPointerOperand();
279   return nullptr;
280 }
281 
282 unsigned Vectorizer::getPointerAddressSpace(Value *I) {
283   if (LoadInst *L = dyn_cast<LoadInst>(I))
284     return L->getPointerAddressSpace();
285   if (StoreInst *S = dyn_cast<StoreInst>(I))
286     return S->getPointerAddressSpace();
287   return -1;
288 }
289 
290 GetElementPtrInst *Vectorizer::getSourceGEP(Value *Src) const {
291   // First strip pointer bitcasts. Make sure pointee size is the same with
292   // and without casts.
293   // TODO: a stride set by the add instruction below can match the difference
294   // in pointee type size here. Currently it will not be vectorized.
295   Value *SrcPtr = getPointerOperand(Src);
296   Value *SrcBase = SrcPtr->stripPointerCasts();
297   if (DL.getTypeStoreSize(SrcPtr->getType()->getPointerElementType()) ==
298       DL.getTypeStoreSize(SrcBase->getType()->getPointerElementType()))
299     SrcPtr = SrcBase;
300   return dyn_cast<GetElementPtrInst>(SrcPtr);
301 }
302 
303 // FIXME: Merge with llvm::isConsecutiveAccess
304 bool Vectorizer::isConsecutiveAccess(Value *A, Value *B) {
305   Value *PtrA = getPointerOperand(A);
306   Value *PtrB = getPointerOperand(B);
307   unsigned ASA = getPointerAddressSpace(A);
308   unsigned ASB = getPointerAddressSpace(B);
309 
310   // Check that the address spaces match and that the pointers are valid.
311   if (!PtrA || !PtrB || (ASA != ASB))
312     return false;
313 
314   // Make sure that A and B are different pointers of the same size type.
315   unsigned PtrBitWidth = DL.getPointerSizeInBits(ASA);
316   Type *PtrATy = PtrA->getType()->getPointerElementType();
317   Type *PtrBTy = PtrB->getType()->getPointerElementType();
318   if (PtrA == PtrB ||
319       PtrATy->isVectorTy() != PtrBTy->isVectorTy() ||
320       DL.getTypeStoreSize(PtrATy) != DL.getTypeStoreSize(PtrBTy) ||
321       DL.getTypeStoreSize(PtrATy->getScalarType()) !=
322           DL.getTypeStoreSize(PtrBTy->getScalarType()))
323     return false;
324 
325   APInt Size(PtrBitWidth, DL.getTypeStoreSize(PtrATy));
326 
327   unsigned IdxWidth = DL.getIndexSizeInBits(ASA);
328   APInt OffsetA(IdxWidth, 0), OffsetB(IdxWidth, 0);
329   PtrA = PtrA->stripAndAccumulateInBoundsConstantOffsets(DL, OffsetA);
330   PtrB = PtrB->stripAndAccumulateInBoundsConstantOffsets(DL, OffsetB);
331 
332   APInt OffsetDelta = OffsetB - OffsetA;
333 
334   // Check if they are based on the same pointer. That makes the offsets
335   // sufficient.
336   if (PtrA == PtrB)
337     return OffsetDelta == Size;
338 
339   // Compute the necessary base pointer delta to have the necessary final delta
340   // equal to the size.
341   APInt BaseDelta = Size - OffsetDelta;
342 
343   // Compute the distance with SCEV between the base pointers.
344   const SCEV *PtrSCEVA = SE.getSCEV(PtrA);
345   const SCEV *PtrSCEVB = SE.getSCEV(PtrB);
346   const SCEV *C = SE.getConstant(BaseDelta);
347   const SCEV *X = SE.getAddExpr(PtrSCEVA, C);
348   if (X == PtrSCEVB)
349     return true;
350 
351   // Sometimes even this doesn't work, because SCEV can't always see through
352   // patterns that look like (gep (ext (add (shl X, C1), C2))). Try checking
353   // things the hard way.
354 
355   // Look through GEPs after checking they're the same except for the last
356   // index.
357   GetElementPtrInst *GEPA = getSourceGEP(A);
358   GetElementPtrInst *GEPB = getSourceGEP(B);
359   if (!GEPA || !GEPB || GEPA->getNumOperands() != GEPB->getNumOperands())
360     return false;
361   unsigned FinalIndex = GEPA->getNumOperands() - 1;
362   for (unsigned i = 0; i < FinalIndex; i++)
363     if (GEPA->getOperand(i) != GEPB->getOperand(i))
364       return false;
365 
366   Instruction *OpA = dyn_cast<Instruction>(GEPA->getOperand(FinalIndex));
367   Instruction *OpB = dyn_cast<Instruction>(GEPB->getOperand(FinalIndex));
368   if (!OpA || !OpB || OpA->getOpcode() != OpB->getOpcode() ||
369       OpA->getType() != OpB->getType())
370     return false;
371 
372   // Only look through a ZExt/SExt.
373   if (!isa<SExtInst>(OpA) && !isa<ZExtInst>(OpA))
374     return false;
375 
376   bool Signed = isa<SExtInst>(OpA);
377 
378   OpA = dyn_cast<Instruction>(OpA->getOperand(0));
379   OpB = dyn_cast<Instruction>(OpB->getOperand(0));
380   if (!OpA || !OpB || OpA->getType() != OpB->getType())
381     return false;
382 
383   // Now we need to prove that adding 1 to OpA won't overflow.
384   bool Safe = false;
385   // First attempt: if OpB is an add with NSW/NUW, and OpB is 1 added to OpA,
386   // we're okay.
387   if (OpB->getOpcode() == Instruction::Add &&
388       isa<ConstantInt>(OpB->getOperand(1)) &&
389       cast<ConstantInt>(OpB->getOperand(1))->getSExtValue() > 0) {
390     if (Signed)
391       Safe = cast<BinaryOperator>(OpB)->hasNoSignedWrap();
392     else
393       Safe = cast<BinaryOperator>(OpB)->hasNoUnsignedWrap();
394   }
395 
396   unsigned BitWidth = OpA->getType()->getScalarSizeInBits();
397 
398   // Second attempt:
399   // If any bits are known to be zero other than the sign bit in OpA, we can
400   // add 1 to it while guaranteeing no overflow of any sort.
401   if (!Safe) {
402     KnownBits Known(BitWidth);
403     computeKnownBits(OpA, Known, DL, 0, nullptr, OpA, &DT);
404     if (Known.countMaxTrailingOnes() < (BitWidth - 1))
405       Safe = true;
406   }
407 
408   if (!Safe)
409     return false;
410 
411   const SCEV *OffsetSCEVA = SE.getSCEV(OpA);
412   const SCEV *OffsetSCEVB = SE.getSCEV(OpB);
413   const SCEV *One = SE.getConstant(APInt(BitWidth, 1));
414   const SCEV *X2 = SE.getAddExpr(OffsetSCEVA, One);
415   return X2 == OffsetSCEVB;
416 }
417 
418 void Vectorizer::reorder(Instruction *I) {
419   OrderedBasicBlock OBB(I->getParent());
420   SmallPtrSet<Instruction *, 16> InstructionsToMove;
421   SmallVector<Instruction *, 16> Worklist;
422 
423   Worklist.push_back(I);
424   while (!Worklist.empty()) {
425     Instruction *IW = Worklist.pop_back_val();
426     int NumOperands = IW->getNumOperands();
427     for (int i = 0; i < NumOperands; i++) {
428       Instruction *IM = dyn_cast<Instruction>(IW->getOperand(i));
429       if (!IM || IM->getOpcode() == Instruction::PHI)
430         continue;
431 
432       // If IM is in another BB, no need to move it, because this pass only
433       // vectorizes instructions within one BB.
434       if (IM->getParent() != I->getParent())
435         continue;
436 
437       if (!OBB.dominates(IM, I)) {
438         InstructionsToMove.insert(IM);
439         Worklist.push_back(IM);
440       }
441     }
442   }
443 
444   // All instructions to move should follow I. Start from I, not from begin().
445   for (auto BBI = I->getIterator(), E = I->getParent()->end(); BBI != E;
446        ++BBI) {
447     if (!InstructionsToMove.count(&*BBI))
448       continue;
449     Instruction *IM = &*BBI;
450     --BBI;
451     IM->removeFromParent();
452     IM->insertBefore(I);
453   }
454 }
455 
456 std::pair<BasicBlock::iterator, BasicBlock::iterator>
457 Vectorizer::getBoundaryInstrs(ArrayRef<Instruction *> Chain) {
458   Instruction *C0 = Chain[0];
459   BasicBlock::iterator FirstInstr = C0->getIterator();
460   BasicBlock::iterator LastInstr = C0->getIterator();
461 
462   BasicBlock *BB = C0->getParent();
463   unsigned NumFound = 0;
464   for (Instruction &I : *BB) {
465     if (!is_contained(Chain, &I))
466       continue;
467 
468     ++NumFound;
469     if (NumFound == 1) {
470       FirstInstr = I.getIterator();
471     }
472     if (NumFound == Chain.size()) {
473       LastInstr = I.getIterator();
474       break;
475     }
476   }
477 
478   // Range is [first, last).
479   return std::make_pair(FirstInstr, ++LastInstr);
480 }
481 
482 void Vectorizer::eraseInstructions(ArrayRef<Instruction *> Chain) {
483   SmallVector<Instruction *, 16> Instrs;
484   for (Instruction *I : Chain) {
485     Value *PtrOperand = getPointerOperand(I);
486     assert(PtrOperand && "Instruction must have a pointer operand.");
487     Instrs.push_back(I);
488     if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(PtrOperand))
489       Instrs.push_back(GEP);
490   }
491 
492   // Erase instructions.
493   for (Instruction *I : Instrs)
494     if (I->use_empty())
495       I->eraseFromParent();
496 }
497 
498 std::pair<ArrayRef<Instruction *>, ArrayRef<Instruction *>>
499 Vectorizer::splitOddVectorElts(ArrayRef<Instruction *> Chain,
500                                unsigned ElementSizeBits) {
501   unsigned ElementSizeBytes = ElementSizeBits / 8;
502   unsigned SizeBytes = ElementSizeBytes * Chain.size();
503   unsigned NumLeft = (SizeBytes - (SizeBytes % 4)) / ElementSizeBytes;
504   if (NumLeft == Chain.size()) {
505     if ((NumLeft & 1) == 0)
506       NumLeft /= 2; // Split even in half
507     else
508       --NumLeft;    // Split off last element
509   } else if (NumLeft == 0)
510     NumLeft = 1;
511   return std::make_pair(Chain.slice(0, NumLeft), Chain.slice(NumLeft));
512 }
513 
514 ArrayRef<Instruction *>
515 Vectorizer::getVectorizablePrefix(ArrayRef<Instruction *> Chain) {
516   // These are in BB order, unlike Chain, which is in address order.
517   SmallVector<Instruction *, 16> MemoryInstrs;
518   SmallVector<Instruction *, 16> ChainInstrs;
519 
520   bool IsLoadChain = isa<LoadInst>(Chain[0]);
521   DEBUG({
522     for (Instruction *I : Chain) {
523       if (IsLoadChain)
524         assert(isa<LoadInst>(I) &&
525                "All elements of Chain must be loads, or all must be stores.");
526       else
527         assert(isa<StoreInst>(I) &&
528                "All elements of Chain must be loads, or all must be stores.");
529     }
530   });
531 
532   for (Instruction &I : make_range(getBoundaryInstrs(Chain))) {
533     if (isa<LoadInst>(I) || isa<StoreInst>(I)) {
534       if (!is_contained(Chain, &I))
535         MemoryInstrs.push_back(&I);
536       else
537         ChainInstrs.push_back(&I);
538     } else if (isa<IntrinsicInst>(&I) &&
539                cast<IntrinsicInst>(&I)->getIntrinsicID() ==
540                    Intrinsic::sideeffect) {
541       // Ignore llvm.sideeffect calls.
542     } else if (IsLoadChain && (I.mayWriteToMemory() || I.mayThrow())) {
543       DEBUG(dbgs() << "LSV: Found may-write/throw operation: " << I << '\n');
544       break;
545     } else if (!IsLoadChain && (I.mayReadOrWriteMemory() || I.mayThrow())) {
546       DEBUG(dbgs() << "LSV: Found may-read/write/throw operation: " << I
547                    << '\n');
548       break;
549     }
550   }
551 
552   OrderedBasicBlock OBB(Chain[0]->getParent());
553 
554   // Loop until we find an instruction in ChainInstrs that we can't vectorize.
555   unsigned ChainInstrIdx = 0;
556   Instruction *BarrierMemoryInstr = nullptr;
557 
558   for (unsigned E = ChainInstrs.size(); ChainInstrIdx < E; ++ChainInstrIdx) {
559     Instruction *ChainInstr = ChainInstrs[ChainInstrIdx];
560 
561     // If a barrier memory instruction was found, chain instructions that follow
562     // will not be added to the valid prefix.
563     if (BarrierMemoryInstr && OBB.dominates(BarrierMemoryInstr, ChainInstr))
564       break;
565 
566     // Check (in BB order) if any instruction prevents ChainInstr from being
567     // vectorized. Find and store the first such "conflicting" instruction.
568     for (Instruction *MemInstr : MemoryInstrs) {
569       // If a barrier memory instruction was found, do not check past it.
570       if (BarrierMemoryInstr && OBB.dominates(BarrierMemoryInstr, MemInstr))
571         break;
572 
573       if (isa<LoadInst>(MemInstr) && isa<LoadInst>(ChainInstr))
574         continue;
575 
576       // We can ignore the alias as long as the load comes before the store,
577       // because that means we won't be moving the load past the store to
578       // vectorize it (the vectorized load is inserted at the location of the
579       // first load in the chain).
580       if (isa<StoreInst>(MemInstr) && isa<LoadInst>(ChainInstr) &&
581           OBB.dominates(ChainInstr, MemInstr))
582         continue;
583 
584       // Same case, but in reverse.
585       if (isa<LoadInst>(MemInstr) && isa<StoreInst>(ChainInstr) &&
586           OBB.dominates(MemInstr, ChainInstr))
587         continue;
588 
589       if (!AA.isNoAlias(MemoryLocation::get(MemInstr),
590                         MemoryLocation::get(ChainInstr))) {
591         DEBUG({
592           dbgs() << "LSV: Found alias:\n"
593                     "  Aliasing instruction and pointer:\n"
594                  << "  " << *MemInstr << '\n'
595                  << "  " << *getPointerOperand(MemInstr) << '\n'
596                  << "  Aliased instruction and pointer:\n"
597                  << "  " << *ChainInstr << '\n'
598                  << "  " << *getPointerOperand(ChainInstr) << '\n';
599         });
600         // Save this aliasing memory instruction as a barrier, but allow other
601         // instructions that precede the barrier to be vectorized with this one.
602         BarrierMemoryInstr = MemInstr;
603         break;
604       }
605     }
606     // Continue the search only for store chains, since vectorizing stores that
607     // precede an aliasing load is valid. Conversely, vectorizing loads is valid
608     // up to an aliasing store, but should not pull loads from further down in
609     // the basic block.
610     if (IsLoadChain && BarrierMemoryInstr) {
611       // The BarrierMemoryInstr is a store that precedes ChainInstr.
612       assert(OBB.dominates(BarrierMemoryInstr, ChainInstr));
613       break;
614     }
615   }
616 
617   // Find the largest prefix of Chain whose elements are all in
618   // ChainInstrs[0, ChainInstrIdx).  This is the largest vectorizable prefix of
619   // Chain.  (Recall that Chain is in address order, but ChainInstrs is in BB
620   // order.)
621   SmallPtrSet<Instruction *, 8> VectorizableChainInstrs(
622       ChainInstrs.begin(), ChainInstrs.begin() + ChainInstrIdx);
623   unsigned ChainIdx = 0;
624   for (unsigned ChainLen = Chain.size(); ChainIdx < ChainLen; ++ChainIdx) {
625     if (!VectorizableChainInstrs.count(Chain[ChainIdx]))
626       break;
627   }
628   return Chain.slice(0, ChainIdx);
629 }
630 
631 std::pair<InstrListMap, InstrListMap>
632 Vectorizer::collectInstructions(BasicBlock *BB) {
633   InstrListMap LoadRefs;
634   InstrListMap StoreRefs;
635 
636   for (Instruction &I : *BB) {
637     if (!I.mayReadOrWriteMemory())
638       continue;
639 
640     if (LoadInst *LI = dyn_cast<LoadInst>(&I)) {
641       if (!LI->isSimple())
642         continue;
643 
644       // Skip if it's not legal.
645       if (!TTI.isLegalToVectorizeLoad(LI))
646         continue;
647 
648       Type *Ty = LI->getType();
649       if (!VectorType::isValidElementType(Ty->getScalarType()))
650         continue;
651 
652       // Skip weird non-byte sizes. They probably aren't worth the effort of
653       // handling correctly.
654       unsigned TySize = DL.getTypeSizeInBits(Ty);
655       if ((TySize % 8) != 0)
656         continue;
657 
658       // Skip vectors of pointers. The vectorizeLoadChain/vectorizeStoreChain
659       // functions are currently using an integer type for the vectorized
660       // load/store, and does not support casting between the integer type and a
661       // vector of pointers (e.g. i64 to <2 x i16*>)
662       if (Ty->isVectorTy() && Ty->isPtrOrPtrVectorTy())
663         continue;
664 
665       Value *Ptr = LI->getPointerOperand();
666       unsigned AS = Ptr->getType()->getPointerAddressSpace();
667       unsigned VecRegSize = TTI.getLoadStoreVecRegBitWidth(AS);
668 
669       unsigned VF = VecRegSize / TySize;
670       VectorType *VecTy = dyn_cast<VectorType>(Ty);
671 
672       // No point in looking at these if they're too big to vectorize.
673       if (TySize > VecRegSize / 2 ||
674           (VecTy && TTI.getLoadVectorFactor(VF, TySize, TySize / 8, VecTy) == 0))
675         continue;
676 
677       // Make sure all the users of a vector are constant-index extracts.
678       if (isa<VectorType>(Ty) && !llvm::all_of(LI->users(), [](const User *U) {
679             const ExtractElementInst *EEI = dyn_cast<ExtractElementInst>(U);
680             return EEI && isa<ConstantInt>(EEI->getOperand(1));
681           }))
682         continue;
683 
684       // Save the load locations.
685       Value *ObjPtr = GetUnderlyingObject(Ptr, DL);
686       LoadRefs[ObjPtr].push_back(LI);
687     } else if (StoreInst *SI = dyn_cast<StoreInst>(&I)) {
688       if (!SI->isSimple())
689         continue;
690 
691       // Skip if it's not legal.
692       if (!TTI.isLegalToVectorizeStore(SI))
693         continue;
694 
695       Type *Ty = SI->getValueOperand()->getType();
696       if (!VectorType::isValidElementType(Ty->getScalarType()))
697         continue;
698 
699       // Skip vectors of pointers. The vectorizeLoadChain/vectorizeStoreChain
700       // functions are currently using an integer type for the vectorized
701       // load/store, and does not support casting between the integer type and a
702       // vector of pointers (e.g. i64 to <2 x i16*>)
703       if (Ty->isVectorTy() && Ty->isPtrOrPtrVectorTy())
704         continue;
705 
706       // Skip weird non-byte sizes. They probably aren't worth the effort of
707       // handling correctly.
708       unsigned TySize = DL.getTypeSizeInBits(Ty);
709       if ((TySize % 8) != 0)
710         continue;
711 
712       Value *Ptr = SI->getPointerOperand();
713       unsigned AS = Ptr->getType()->getPointerAddressSpace();
714       unsigned VecRegSize = TTI.getLoadStoreVecRegBitWidth(AS);
715 
716       unsigned VF = VecRegSize / TySize;
717       VectorType *VecTy = dyn_cast<VectorType>(Ty);
718 
719       // No point in looking at these if they're too big to vectorize.
720       if (TySize > VecRegSize / 2 ||
721           (VecTy && TTI.getStoreVectorFactor(VF, TySize, TySize / 8, VecTy) == 0))
722         continue;
723 
724       if (isa<VectorType>(Ty) && !llvm::all_of(SI->users(), [](const User *U) {
725             const ExtractElementInst *EEI = dyn_cast<ExtractElementInst>(U);
726             return EEI && isa<ConstantInt>(EEI->getOperand(1));
727           }))
728         continue;
729 
730       // Save store location.
731       Value *ObjPtr = GetUnderlyingObject(Ptr, DL);
732       StoreRefs[ObjPtr].push_back(SI);
733     }
734   }
735 
736   return {LoadRefs, StoreRefs};
737 }
738 
739 bool Vectorizer::vectorizeChains(InstrListMap &Map) {
740   bool Changed = false;
741 
742   for (const std::pair<Value *, InstrList> &Chain : Map) {
743     unsigned Size = Chain.second.size();
744     if (Size < 2)
745       continue;
746 
747     DEBUG(dbgs() << "LSV: Analyzing a chain of length " << Size << ".\n");
748 
749     // Process the stores in chunks of 64.
750     for (unsigned CI = 0, CE = Size; CI < CE; CI += 64) {
751       unsigned Len = std::min<unsigned>(CE - CI, 64);
752       ArrayRef<Instruction *> Chunk(&Chain.second[CI], Len);
753       Changed |= vectorizeInstructions(Chunk);
754     }
755   }
756 
757   return Changed;
758 }
759 
760 bool Vectorizer::vectorizeInstructions(ArrayRef<Instruction *> Instrs) {
761   DEBUG(dbgs() << "LSV: Vectorizing " << Instrs.size() << " instructions.\n");
762   SmallVector<int, 16> Heads, Tails;
763   int ConsecutiveChain[64];
764 
765   // Do a quadratic search on all of the given loads/stores and find all of the
766   // pairs of loads/stores that follow each other.
767   for (int i = 0, e = Instrs.size(); i < e; ++i) {
768     ConsecutiveChain[i] = -1;
769     for (int j = e - 1; j >= 0; --j) {
770       if (i == j)
771         continue;
772 
773       if (isConsecutiveAccess(Instrs[i], Instrs[j])) {
774         if (ConsecutiveChain[i] != -1) {
775           int CurDistance = std::abs(ConsecutiveChain[i] - i);
776           int NewDistance = std::abs(ConsecutiveChain[i] - j);
777           if (j < i || NewDistance > CurDistance)
778             continue; // Should not insert.
779         }
780 
781         Tails.push_back(j);
782         Heads.push_back(i);
783         ConsecutiveChain[i] = j;
784       }
785     }
786   }
787 
788   bool Changed = false;
789   SmallPtrSet<Instruction *, 16> InstructionsProcessed;
790 
791   for (int Head : Heads) {
792     if (InstructionsProcessed.count(Instrs[Head]))
793       continue;
794     bool LongerChainExists = false;
795     for (unsigned TIt = 0; TIt < Tails.size(); TIt++)
796       if (Head == Tails[TIt] &&
797           !InstructionsProcessed.count(Instrs[Heads[TIt]])) {
798         LongerChainExists = true;
799         break;
800       }
801     if (LongerChainExists)
802       continue;
803 
804     // We found an instr that starts a chain. Now follow the chain and try to
805     // vectorize it.
806     SmallVector<Instruction *, 16> Operands;
807     int I = Head;
808     while (I != -1 && (is_contained(Tails, I) || is_contained(Heads, I))) {
809       if (InstructionsProcessed.count(Instrs[I]))
810         break;
811 
812       Operands.push_back(Instrs[I]);
813       I = ConsecutiveChain[I];
814     }
815 
816     bool Vectorized = false;
817     if (isa<LoadInst>(*Operands.begin()))
818       Vectorized = vectorizeLoadChain(Operands, &InstructionsProcessed);
819     else
820       Vectorized = vectorizeStoreChain(Operands, &InstructionsProcessed);
821 
822     Changed |= Vectorized;
823   }
824 
825   return Changed;
826 }
827 
828 bool Vectorizer::vectorizeStoreChain(
829     ArrayRef<Instruction *> Chain,
830     SmallPtrSet<Instruction *, 16> *InstructionsProcessed) {
831   StoreInst *S0 = cast<StoreInst>(Chain[0]);
832 
833   // If the vector has an int element, default to int for the whole store.
834   Type *StoreTy;
835   for (Instruction *I : Chain) {
836     StoreTy = cast<StoreInst>(I)->getValueOperand()->getType();
837     if (StoreTy->isIntOrIntVectorTy())
838       break;
839 
840     if (StoreTy->isPtrOrPtrVectorTy()) {
841       StoreTy = Type::getIntNTy(F.getParent()->getContext(),
842                                 DL.getTypeSizeInBits(StoreTy));
843       break;
844     }
845   }
846 
847   unsigned Sz = DL.getTypeSizeInBits(StoreTy);
848   unsigned AS = S0->getPointerAddressSpace();
849   unsigned VecRegSize = TTI.getLoadStoreVecRegBitWidth(AS);
850   unsigned VF = VecRegSize / Sz;
851   unsigned ChainSize = Chain.size();
852   unsigned Alignment = getAlignment(S0);
853 
854   if (!isPowerOf2_32(Sz) || VF < 2 || ChainSize < 2) {
855     InstructionsProcessed->insert(Chain.begin(), Chain.end());
856     return false;
857   }
858 
859   ArrayRef<Instruction *> NewChain = getVectorizablePrefix(Chain);
860   if (NewChain.empty()) {
861     // No vectorization possible.
862     InstructionsProcessed->insert(Chain.begin(), Chain.end());
863     return false;
864   }
865   if (NewChain.size() == 1) {
866     // Failed after the first instruction. Discard it and try the smaller chain.
867     InstructionsProcessed->insert(NewChain.front());
868     return false;
869   }
870 
871   // Update Chain to the valid vectorizable subchain.
872   Chain = NewChain;
873   ChainSize = Chain.size();
874 
875   // Check if it's legal to vectorize this chain. If not, split the chain and
876   // try again.
877   unsigned EltSzInBytes = Sz / 8;
878   unsigned SzInBytes = EltSzInBytes * ChainSize;
879   if (!TTI.isLegalToVectorizeStoreChain(SzInBytes, Alignment, AS)) {
880     auto Chains = splitOddVectorElts(Chain, Sz);
881     return vectorizeStoreChain(Chains.first, InstructionsProcessed) |
882            vectorizeStoreChain(Chains.second, InstructionsProcessed);
883   }
884 
885   VectorType *VecTy;
886   VectorType *VecStoreTy = dyn_cast<VectorType>(StoreTy);
887   if (VecStoreTy)
888     VecTy = VectorType::get(StoreTy->getScalarType(),
889                             Chain.size() * VecStoreTy->getNumElements());
890   else
891     VecTy = VectorType::get(StoreTy, Chain.size());
892 
893   // If it's more than the max vector size or the target has a better
894   // vector factor, break it into two pieces.
895   unsigned TargetVF = TTI.getStoreVectorFactor(VF, Sz, SzInBytes, VecTy);
896   if (ChainSize > VF || (VF != TargetVF && TargetVF < ChainSize)) {
897     DEBUG(dbgs() << "LSV: Chain doesn't match with the vector factor."
898                     " Creating two separate arrays.\n");
899     return vectorizeStoreChain(Chain.slice(0, TargetVF),
900                                InstructionsProcessed) |
901            vectorizeStoreChain(Chain.slice(TargetVF), InstructionsProcessed);
902   }
903 
904   DEBUG({
905     dbgs() << "LSV: Stores to vectorize:\n";
906     for (Instruction *I : Chain)
907       dbgs() << "  " << *I << "\n";
908   });
909 
910   // We won't try again to vectorize the elements of the chain, regardless of
911   // whether we succeed below.
912   InstructionsProcessed->insert(Chain.begin(), Chain.end());
913 
914   // If the store is going to be misaligned, don't vectorize it.
915   if (accessIsMisaligned(SzInBytes, AS, Alignment)) {
916     if (S0->getPointerAddressSpace() != 0)
917       return false;
918 
919     unsigned NewAlign = getOrEnforceKnownAlignment(S0->getPointerOperand(),
920                                                    StackAdjustedAlignment,
921                                                    DL, S0, nullptr, &DT);
922     if (NewAlign < StackAdjustedAlignment)
923       return false;
924   }
925 
926   BasicBlock::iterator First, Last;
927   std::tie(First, Last) = getBoundaryInstrs(Chain);
928   Builder.SetInsertPoint(&*Last);
929 
930   Value *Vec = UndefValue::get(VecTy);
931 
932   if (VecStoreTy) {
933     unsigned VecWidth = VecStoreTy->getNumElements();
934     for (unsigned I = 0, E = Chain.size(); I != E; ++I) {
935       StoreInst *Store = cast<StoreInst>(Chain[I]);
936       for (unsigned J = 0, NE = VecStoreTy->getNumElements(); J != NE; ++J) {
937         unsigned NewIdx = J + I * VecWidth;
938         Value *Extract = Builder.CreateExtractElement(Store->getValueOperand(),
939                                                       Builder.getInt32(J));
940         if (Extract->getType() != StoreTy->getScalarType())
941           Extract = Builder.CreateBitCast(Extract, StoreTy->getScalarType());
942 
943         Value *Insert =
944             Builder.CreateInsertElement(Vec, Extract, Builder.getInt32(NewIdx));
945         Vec = Insert;
946       }
947     }
948   } else {
949     for (unsigned I = 0, E = Chain.size(); I != E; ++I) {
950       StoreInst *Store = cast<StoreInst>(Chain[I]);
951       Value *Extract = Store->getValueOperand();
952       if (Extract->getType() != StoreTy->getScalarType())
953         Extract =
954             Builder.CreateBitOrPointerCast(Extract, StoreTy->getScalarType());
955 
956       Value *Insert =
957           Builder.CreateInsertElement(Vec, Extract, Builder.getInt32(I));
958       Vec = Insert;
959     }
960   }
961 
962   // This cast is safe because Builder.CreateStore() always creates a bona fide
963   // StoreInst.
964   StoreInst *SI = cast<StoreInst>(
965       Builder.CreateStore(Vec, Builder.CreateBitCast(S0->getPointerOperand(),
966                                                      VecTy->getPointerTo(AS))));
967   propagateMetadata(SI, Chain);
968   SI->setAlignment(Alignment);
969 
970   eraseInstructions(Chain);
971   ++NumVectorInstructions;
972   NumScalarsVectorized += Chain.size();
973   return true;
974 }
975 
976 bool Vectorizer::vectorizeLoadChain(
977     ArrayRef<Instruction *> Chain,
978     SmallPtrSet<Instruction *, 16> *InstructionsProcessed) {
979   LoadInst *L0 = cast<LoadInst>(Chain[0]);
980 
981   // If the vector has an int element, default to int for the whole load.
982   Type *LoadTy;
983   for (const auto &V : Chain) {
984     LoadTy = cast<LoadInst>(V)->getType();
985     if (LoadTy->isIntOrIntVectorTy())
986       break;
987 
988     if (LoadTy->isPtrOrPtrVectorTy()) {
989       LoadTy = Type::getIntNTy(F.getParent()->getContext(),
990                                DL.getTypeSizeInBits(LoadTy));
991       break;
992     }
993   }
994 
995   unsigned Sz = DL.getTypeSizeInBits(LoadTy);
996   unsigned AS = L0->getPointerAddressSpace();
997   unsigned VecRegSize = TTI.getLoadStoreVecRegBitWidth(AS);
998   unsigned VF = VecRegSize / Sz;
999   unsigned ChainSize = Chain.size();
1000   unsigned Alignment = getAlignment(L0);
1001 
1002   if (!isPowerOf2_32(Sz) || VF < 2 || ChainSize < 2) {
1003     InstructionsProcessed->insert(Chain.begin(), Chain.end());
1004     return false;
1005   }
1006 
1007   ArrayRef<Instruction *> NewChain = getVectorizablePrefix(Chain);
1008   if (NewChain.empty()) {
1009     // No vectorization possible.
1010     InstructionsProcessed->insert(Chain.begin(), Chain.end());
1011     return false;
1012   }
1013   if (NewChain.size() == 1) {
1014     // Failed after the first instruction. Discard it and try the smaller chain.
1015     InstructionsProcessed->insert(NewChain.front());
1016     return false;
1017   }
1018 
1019   // Update Chain to the valid vectorizable subchain.
1020   Chain = NewChain;
1021   ChainSize = Chain.size();
1022 
1023   // Check if it's legal to vectorize this chain. If not, split the chain and
1024   // try again.
1025   unsigned EltSzInBytes = Sz / 8;
1026   unsigned SzInBytes = EltSzInBytes * ChainSize;
1027   if (!TTI.isLegalToVectorizeLoadChain(SzInBytes, Alignment, AS)) {
1028     auto Chains = splitOddVectorElts(Chain, Sz);
1029     return vectorizeLoadChain(Chains.first, InstructionsProcessed) |
1030            vectorizeLoadChain(Chains.second, InstructionsProcessed);
1031   }
1032 
1033   VectorType *VecTy;
1034   VectorType *VecLoadTy = dyn_cast<VectorType>(LoadTy);
1035   if (VecLoadTy)
1036     VecTy = VectorType::get(LoadTy->getScalarType(),
1037                             Chain.size() * VecLoadTy->getNumElements());
1038   else
1039     VecTy = VectorType::get(LoadTy, Chain.size());
1040 
1041   // If it's more than the max vector size or the target has a better
1042   // vector factor, break it into two pieces.
1043   unsigned TargetVF = TTI.getLoadVectorFactor(VF, Sz, SzInBytes, VecTy);
1044   if (ChainSize > VF || (VF != TargetVF && TargetVF < ChainSize)) {
1045     DEBUG(dbgs() << "LSV: Chain doesn't match with the vector factor."
1046                     " Creating two separate arrays.\n");
1047     return vectorizeLoadChain(Chain.slice(0, TargetVF), InstructionsProcessed) |
1048            vectorizeLoadChain(Chain.slice(TargetVF), InstructionsProcessed);
1049   }
1050 
1051   // We won't try again to vectorize the elements of the chain, regardless of
1052   // whether we succeed below.
1053   InstructionsProcessed->insert(Chain.begin(), Chain.end());
1054 
1055   // If the load is going to be misaligned, don't vectorize it.
1056   if (accessIsMisaligned(SzInBytes, AS, Alignment)) {
1057     if (L0->getPointerAddressSpace() != 0)
1058       return false;
1059 
1060     unsigned NewAlign = getOrEnforceKnownAlignment(L0->getPointerOperand(),
1061                                                    StackAdjustedAlignment,
1062                                                    DL, L0, nullptr, &DT);
1063     if (NewAlign < StackAdjustedAlignment)
1064       return false;
1065 
1066     Alignment = NewAlign;
1067   }
1068 
1069   DEBUG({
1070     dbgs() << "LSV: Loads to vectorize:\n";
1071     for (Instruction *I : Chain)
1072       I->dump();
1073   });
1074 
1075   // getVectorizablePrefix already computed getBoundaryInstrs.  The value of
1076   // Last may have changed since then, but the value of First won't have.  If it
1077   // matters, we could compute getBoundaryInstrs only once and reuse it here.
1078   BasicBlock::iterator First, Last;
1079   std::tie(First, Last) = getBoundaryInstrs(Chain);
1080   Builder.SetInsertPoint(&*First);
1081 
1082   Value *Bitcast =
1083       Builder.CreateBitCast(L0->getPointerOperand(), VecTy->getPointerTo(AS));
1084   // This cast is safe because Builder.CreateLoad always creates a bona fide
1085   // LoadInst.
1086   LoadInst *LI = cast<LoadInst>(Builder.CreateLoad(Bitcast));
1087   propagateMetadata(LI, Chain);
1088   LI->setAlignment(Alignment);
1089 
1090   if (VecLoadTy) {
1091     SmallVector<Instruction *, 16> InstrsToErase;
1092 
1093     unsigned VecWidth = VecLoadTy->getNumElements();
1094     for (unsigned I = 0, E = Chain.size(); I != E; ++I) {
1095       for (auto Use : Chain[I]->users()) {
1096         // All users of vector loads are ExtractElement instructions with
1097         // constant indices, otherwise we would have bailed before now.
1098         Instruction *UI = cast<Instruction>(Use);
1099         unsigned Idx = cast<ConstantInt>(UI->getOperand(1))->getZExtValue();
1100         unsigned NewIdx = Idx + I * VecWidth;
1101         Value *V = Builder.CreateExtractElement(LI, Builder.getInt32(NewIdx),
1102                                                 UI->getName());
1103         if (V->getType() != UI->getType())
1104           V = Builder.CreateBitCast(V, UI->getType());
1105 
1106         // Replace the old instruction.
1107         UI->replaceAllUsesWith(V);
1108         InstrsToErase.push_back(UI);
1109       }
1110     }
1111 
1112     // Bitcast might not be an Instruction, if the value being loaded is a
1113     // constant.  In that case, no need to reorder anything.
1114     if (Instruction *BitcastInst = dyn_cast<Instruction>(Bitcast))
1115       reorder(BitcastInst);
1116 
1117     for (auto I : InstrsToErase)
1118       I->eraseFromParent();
1119   } else {
1120     for (unsigned I = 0, E = Chain.size(); I != E; ++I) {
1121       Value *CV = Chain[I];
1122       Value *V =
1123           Builder.CreateExtractElement(LI, Builder.getInt32(I), CV->getName());
1124       if (V->getType() != CV->getType()) {
1125         V = Builder.CreateBitOrPointerCast(V, CV->getType());
1126       }
1127 
1128       // Replace the old instruction.
1129       CV->replaceAllUsesWith(V);
1130     }
1131 
1132     if (Instruction *BitcastInst = dyn_cast<Instruction>(Bitcast))
1133       reorder(BitcastInst);
1134   }
1135 
1136   eraseInstructions(Chain);
1137 
1138   ++NumVectorInstructions;
1139   NumScalarsVectorized += Chain.size();
1140   return true;
1141 }
1142 
1143 bool Vectorizer::accessIsMisaligned(unsigned SzInBytes, unsigned AddressSpace,
1144                                     unsigned Alignment) {
1145   if (Alignment % SzInBytes == 0)
1146     return false;
1147 
1148   bool Fast = false;
1149   bool Allows = TTI.allowsMisalignedMemoryAccesses(F.getParent()->getContext(),
1150                                                    SzInBytes * 8, AddressSpace,
1151                                                    Alignment, &Fast);
1152   DEBUG(dbgs() << "LSV: Target said misaligned is allowed? " << Allows
1153                << " and fast? " << Fast << "\n";);
1154   return !Allows || !Fast;
1155 }
1156