1 //===- Scalarizer.cpp - Scalarize vector operations -----------------------===//
2 //
3 // The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This pass converts vector operations into scalar operations, in order
11 // to expose optimization opportunities on the individual scalar operations.
12 // It is mainly intended for targets that do not have vector units, but it
13 // may also be useful for revectorizing code to different vector widths.
14 //
15 //===----------------------------------------------------------------------===//
16
17 #include "llvm/ADT/PostOrderIterator.h"
18 #include "llvm/ADT/SmallVector.h"
19 #include "llvm/ADT/Twine.h"
20 #include "llvm/Analysis/VectorUtils.h"
21 #include "llvm/IR/Argument.h"
22 #include "llvm/IR/BasicBlock.h"
23 #include "llvm/IR/Constants.h"
24 #include "llvm/IR/DataLayout.h"
25 #include "llvm/IR/DerivedTypes.h"
26 #include "llvm/IR/Function.h"
27 #include "llvm/IR/IRBuilder.h"
28 #include "llvm/IR/InstVisitor.h"
29 #include "llvm/IR/InstrTypes.h"
30 #include "llvm/IR/Instruction.h"
31 #include "llvm/IR/Instructions.h"
32 #include "llvm/IR/Intrinsics.h"
33 #include "llvm/IR/LLVMContext.h"
34 #include "llvm/IR/Module.h"
35 #include "llvm/IR/Type.h"
36 #include "llvm/IR/Value.h"
37 #include "llvm/Pass.h"
38 #include "llvm/Support/Casting.h"
39 #include "llvm/Support/MathExtras.h"
40 #include "llvm/Support/Options.h"
41 #include "llvm/Transforms/Scalar.h"
42 #include "llvm/Transforms/Scalar/Scalarizer.h"
43 #include <cassert>
44 #include <cstdint>
45 #include <iterator>
46 #include <map>
47 #include <utility>
48
49 using namespace llvm;
50
51 #define DEBUG_TYPE "scalarizer"
52
53 // This is disabled by default because having separate loads and stores
54 // makes it more likely that the -combiner-alias-analysis limits will be
55 // reached.
56 static cl::opt<bool>
57 ScalarizeLoadStore("scalarize-load-store", cl::init(false), cl::Hidden,
58 cl::desc("Allow the scalarizer pass to scalarize loads and store"));
59
60 namespace {
61
62 // Used to store the scattered form of a vector.
63 using ValueVector = SmallVector<Value *, 8>;
64
65 // Used to map a vector Value to its scattered form. We use std::map
66 // because we want iterators to persist across insertion and because the
67 // values are relatively large.
68 using ScatterMap = std::map<Value *, ValueVector>;
69
70 // Lists Instructions that have been replaced with scalar implementations,
71 // along with a pointer to their scattered forms.
72 using GatherList = SmallVector<std::pair<Instruction *, ValueVector *>, 16>;
73
74 // Provides a very limited vector-like interface for lazily accessing one
75 // component of a scattered vector or vector pointer.
76 class Scatterer {
77 public:
78 Scatterer() = default;
79
80 // Scatter V into Size components. If new instructions are needed,
81 // insert them before BBI in BB. If Cache is nonnull, use it to cache
82 // the results.
83 Scatterer(BasicBlock *bb, BasicBlock::iterator bbi, Value *v,
84 ValueVector *cachePtr = nullptr);
85
86 // Return component I, creating a new Value for it if necessary.
87 Value *operator[](unsigned I);
88
89 // Return the number of components.
size() const90 unsigned size() const { return Size; }
91
92 private:
93 BasicBlock *BB;
94 BasicBlock::iterator BBI;
95 Value *V;
96 ValueVector *CachePtr;
97 PointerType *PtrTy;
98 ValueVector Tmp;
99 unsigned Size;
100 };
101
102 // FCmpSpliiter(FCI)(Builder, X, Y, Name) uses Builder to create an FCmp
103 // called Name that compares X and Y in the same way as FCI.
104 struct FCmpSplitter {
FCmpSplitter__anon6e10eae80111::FCmpSplitter105 FCmpSplitter(FCmpInst &fci) : FCI(fci) {}
106
operator ()__anon6e10eae80111::FCmpSplitter107 Value *operator()(IRBuilder<> &Builder, Value *Op0, Value *Op1,
108 const Twine &Name) const {
109 return Builder.CreateFCmp(FCI.getPredicate(), Op0, Op1, Name);
110 }
111
112 FCmpInst &FCI;
113 };
114
115 // ICmpSpliiter(ICI)(Builder, X, Y, Name) uses Builder to create an ICmp
116 // called Name that compares X and Y in the same way as ICI.
117 struct ICmpSplitter {
ICmpSplitter__anon6e10eae80111::ICmpSplitter118 ICmpSplitter(ICmpInst &ici) : ICI(ici) {}
119
operator ()__anon6e10eae80111::ICmpSplitter120 Value *operator()(IRBuilder<> &Builder, Value *Op0, Value *Op1,
121 const Twine &Name) const {
122 return Builder.CreateICmp(ICI.getPredicate(), Op0, Op1, Name);
123 }
124
125 ICmpInst &ICI;
126 };
127
128 // BinarySpliiter(BO)(Builder, X, Y, Name) uses Builder to create
129 // a binary operator like BO called Name with operands X and Y.
130 struct BinarySplitter {
BinarySplitter__anon6e10eae80111::BinarySplitter131 BinarySplitter(BinaryOperator &bo) : BO(bo) {}
132
operator ()__anon6e10eae80111::BinarySplitter133 Value *operator()(IRBuilder<> &Builder, Value *Op0, Value *Op1,
134 const Twine &Name) const {
135 return Builder.CreateBinOp(BO.getOpcode(), Op0, Op1, Name);
136 }
137
138 BinaryOperator &BO;
139 };
140
141 // Information about a load or store that we're scalarizing.
142 struct VectorLayout {
143 VectorLayout() = default;
144
145 // Return the alignment of element I.
getElemAlign__anon6e10eae80111::VectorLayout146 uint64_t getElemAlign(unsigned I) {
147 return MinAlign(VecAlign, I * ElemSize);
148 }
149
150 // The type of the vector.
151 VectorType *VecTy = nullptr;
152
153 // The type of each element.
154 Type *ElemTy = nullptr;
155
156 // The alignment of the vector.
157 uint64_t VecAlign = 0;
158
159 // The size of each element.
160 uint64_t ElemSize = 0;
161 };
162
163 class ScalarizerVisitor : public InstVisitor<ScalarizerVisitor, bool> {
164 public:
ScalarizerVisitor(unsigned ParallelLoopAccessMDKind)165 ScalarizerVisitor(unsigned ParallelLoopAccessMDKind)
166 : ParallelLoopAccessMDKind(ParallelLoopAccessMDKind) {
167 }
168
169 bool visit(Function &F);
170
171 // InstVisitor methods. They return true if the instruction was scalarized,
172 // false if nothing changed.
visitInstruction(Instruction & I)173 bool visitInstruction(Instruction &I) { return false; }
174 bool visitSelectInst(SelectInst &SI);
175 bool visitICmpInst(ICmpInst &ICI);
176 bool visitFCmpInst(FCmpInst &FCI);
177 bool visitBinaryOperator(BinaryOperator &BO);
178 bool visitGetElementPtrInst(GetElementPtrInst &GEPI);
179 bool visitCastInst(CastInst &CI);
180 bool visitBitCastInst(BitCastInst &BCI);
181 bool visitShuffleVectorInst(ShuffleVectorInst &SVI);
182 bool visitPHINode(PHINode &PHI);
183 bool visitLoadInst(LoadInst &LI);
184 bool visitStoreInst(StoreInst &SI);
185 bool visitCallInst(CallInst &ICI);
186
187 private:
188 Scatterer scatter(Instruction *Point, Value *V);
189 void gather(Instruction *Op, const ValueVector &CV);
190 bool canTransferMetadata(unsigned Kind);
191 void transferMetadata(Instruction *Op, const ValueVector &CV);
192 bool getVectorLayout(Type *Ty, unsigned Alignment, VectorLayout &Layout,
193 const DataLayout &DL);
194 bool finish();
195
196 template<typename T> bool splitBinary(Instruction &, const T &);
197
198 bool splitCall(CallInst &CI);
199
200 ScatterMap Scattered;
201 GatherList Gathered;
202
203 unsigned ParallelLoopAccessMDKind;
204 };
205
206 class ScalarizerLegacyPass : public FunctionPass {
207 public:
208 static char ID;
209
ScalarizerLegacyPass()210 ScalarizerLegacyPass() : FunctionPass(ID) {
211 initializeScalarizerLegacyPassPass(*PassRegistry::getPassRegistry());
212 }
213
214 bool runOnFunction(Function &F) override;
215 };
216
217 } // end anonymous namespace
218
219 char ScalarizerLegacyPass::ID = 0;
220 INITIALIZE_PASS_BEGIN(ScalarizerLegacyPass, "scalarizer",
221 "Scalarize vector operations", false, false)
222 INITIALIZE_PASS_END(ScalarizerLegacyPass, "scalarizer",
223 "Scalarize vector operations", false, false)
224
Scatterer(BasicBlock * bb,BasicBlock::iterator bbi,Value * v,ValueVector * cachePtr)225 Scatterer::Scatterer(BasicBlock *bb, BasicBlock::iterator bbi, Value *v,
226 ValueVector *cachePtr)
227 : BB(bb), BBI(bbi), V(v), CachePtr(cachePtr) {
228 Type *Ty = V->getType();
229 PtrTy = dyn_cast<PointerType>(Ty);
230 if (PtrTy)
231 Ty = PtrTy->getElementType();
232 Size = Ty->getVectorNumElements();
233 if (!CachePtr)
234 Tmp.resize(Size, nullptr);
235 else if (CachePtr->empty())
236 CachePtr->resize(Size, nullptr);
237 else
238 assert(Size == CachePtr->size() && "Inconsistent vector sizes");
239 }
240
241 // Return component I, creating a new Value for it if necessary.
operator [](unsigned I)242 Value *Scatterer::operator[](unsigned I) {
243 ValueVector &CV = (CachePtr ? *CachePtr : Tmp);
244 // Try to reuse a previous value.
245 if (CV[I])
246 return CV[I];
247 IRBuilder<> Builder(BB, BBI);
248 if (PtrTy) {
249 if (!CV[0]) {
250 Type *Ty =
251 PointerType::get(PtrTy->getElementType()->getVectorElementType(),
252 PtrTy->getAddressSpace());
253 CV[0] = Builder.CreateBitCast(V, Ty, V->getName() + ".i0");
254 }
255 if (I != 0)
256 CV[I] = Builder.CreateConstGEP1_32(nullptr, CV[0], I,
257 V->getName() + ".i" + Twine(I));
258 } else {
259 // Search through a chain of InsertElementInsts looking for element I.
260 // Record other elements in the cache. The new V is still suitable
261 // for all uncached indices.
262 while (true) {
263 InsertElementInst *Insert = dyn_cast<InsertElementInst>(V);
264 if (!Insert)
265 break;
266 ConstantInt *Idx = dyn_cast<ConstantInt>(Insert->getOperand(2));
267 if (!Idx)
268 break;
269 unsigned J = Idx->getZExtValue();
270 V = Insert->getOperand(0);
271 if (I == J) {
272 CV[J] = Insert->getOperand(1);
273 return CV[J];
274 } else if (!CV[J]) {
275 // Only cache the first entry we find for each index we're not actively
276 // searching for. This prevents us from going too far up the chain and
277 // caching incorrect entries.
278 CV[J] = Insert->getOperand(1);
279 }
280 }
281 CV[I] = Builder.CreateExtractElement(V, Builder.getInt32(I),
282 V->getName() + ".i" + Twine(I));
283 }
284 return CV[I];
285 }
286
runOnFunction(Function & F)287 bool ScalarizerLegacyPass::runOnFunction(Function &F) {
288 if (skipFunction(F))
289 return false;
290
291 Module &M = *F.getParent();
292 unsigned ParallelLoopAccessMDKind =
293 M.getContext().getMDKindID("llvm.mem.parallel_loop_access");
294 ScalarizerVisitor Impl(ParallelLoopAccessMDKind);
295 return Impl.visit(F);
296 }
297
createScalarizerPass()298 FunctionPass *llvm::createScalarizerPass() {
299 return new ScalarizerLegacyPass();
300 }
301
visit(Function & F)302 bool ScalarizerVisitor::visit(Function &F) {
303 assert(Gathered.empty() && Scattered.empty());
304
305 // To ensure we replace gathered components correctly we need to do an ordered
306 // traversal of the basic blocks in the function.
307 ReversePostOrderTraversal<BasicBlock *> RPOT(&F.getEntryBlock());
308 for (BasicBlock *BB : RPOT) {
309 for (BasicBlock::iterator II = BB->begin(), IE = BB->end(); II != IE;) {
310 Instruction *I = &*II;
311 bool Done = InstVisitor::visit(I);
312 ++II;
313 if (Done && I->getType()->isVoidTy())
314 I->eraseFromParent();
315 }
316 }
317 return finish();
318 }
319
320 // Return a scattered form of V that can be accessed by Point. V must be a
321 // vector or a pointer to a vector.
scatter(Instruction * Point,Value * V)322 Scatterer ScalarizerVisitor::scatter(Instruction *Point, Value *V) {
323 if (Argument *VArg = dyn_cast<Argument>(V)) {
324 // Put the scattered form of arguments in the entry block,
325 // so that it can be used everywhere.
326 Function *F = VArg->getParent();
327 BasicBlock *BB = &F->getEntryBlock();
328 return Scatterer(BB, BB->begin(), V, &Scattered[V]);
329 }
330 if (Instruction *VOp = dyn_cast<Instruction>(V)) {
331 // Put the scattered form of an instruction directly after the
332 // instruction.
333 BasicBlock *BB = VOp->getParent();
334 return Scatterer(BB, std::next(BasicBlock::iterator(VOp)),
335 V, &Scattered[V]);
336 }
337 // In the fallback case, just put the scattered before Point and
338 // keep the result local to Point.
339 return Scatterer(Point->getParent(), Point->getIterator(), V);
340 }
341
342 // Replace Op with the gathered form of the components in CV. Defer the
343 // deletion of Op and creation of the gathered form to the end of the pass,
344 // so that we can avoid creating the gathered form if all uses of Op are
345 // replaced with uses of CV.
gather(Instruction * Op,const ValueVector & CV)346 void ScalarizerVisitor::gather(Instruction *Op, const ValueVector &CV) {
347 // Since we're not deleting Op yet, stub out its operands, so that it
348 // doesn't make anything live unnecessarily.
349 for (unsigned I = 0, E = Op->getNumOperands(); I != E; ++I)
350 Op->setOperand(I, UndefValue::get(Op->getOperand(I)->getType()));
351
352 transferMetadata(Op, CV);
353
354 // If we already have a scattered form of Op (created from ExtractElements
355 // of Op itself), replace them with the new form.
356 ValueVector &SV = Scattered[Op];
357 if (!SV.empty()) {
358 for (unsigned I = 0, E = SV.size(); I != E; ++I) {
359 Value *V = SV[I];
360 if (V == nullptr)
361 continue;
362
363 Instruction *Old = cast<Instruction>(V);
364 CV[I]->takeName(Old);
365 Old->replaceAllUsesWith(CV[I]);
366 Old->eraseFromParent();
367 }
368 }
369 SV = CV;
370 Gathered.push_back(GatherList::value_type(Op, &SV));
371 }
372
373 // Return true if it is safe to transfer the given metadata tag from
374 // vector to scalar instructions.
canTransferMetadata(unsigned Tag)375 bool ScalarizerVisitor::canTransferMetadata(unsigned Tag) {
376 return (Tag == LLVMContext::MD_tbaa
377 || Tag == LLVMContext::MD_fpmath
378 || Tag == LLVMContext::MD_tbaa_struct
379 || Tag == LLVMContext::MD_invariant_load
380 || Tag == LLVMContext::MD_alias_scope
381 || Tag == LLVMContext::MD_noalias
382 || Tag == ParallelLoopAccessMDKind
383 || Tag == LLVMContext::MD_access_group);
384 }
385
386 // Transfer metadata from Op to the instructions in CV if it is known
387 // to be safe to do so.
transferMetadata(Instruction * Op,const ValueVector & CV)388 void ScalarizerVisitor::transferMetadata(Instruction *Op, const ValueVector &CV) {
389 SmallVector<std::pair<unsigned, MDNode *>, 4> MDs;
390 Op->getAllMetadataOtherThanDebugLoc(MDs);
391 for (unsigned I = 0, E = CV.size(); I != E; ++I) {
392 if (Instruction *New = dyn_cast<Instruction>(CV[I])) {
393 for (const auto &MD : MDs)
394 if (canTransferMetadata(MD.first))
395 New->setMetadata(MD.first, MD.second);
396 if (Op->getDebugLoc() && !New->getDebugLoc())
397 New->setDebugLoc(Op->getDebugLoc());
398 }
399 }
400 }
401
402 // Try to fill in Layout from Ty, returning true on success. Alignment is
403 // the alignment of the vector, or 0 if the ABI default should be used.
getVectorLayout(Type * Ty,unsigned Alignment,VectorLayout & Layout,const DataLayout & DL)404 bool ScalarizerVisitor::getVectorLayout(Type *Ty, unsigned Alignment,
405 VectorLayout &Layout, const DataLayout &DL) {
406 // Make sure we're dealing with a vector.
407 Layout.VecTy = dyn_cast<VectorType>(Ty);
408 if (!Layout.VecTy)
409 return false;
410
411 // Check that we're dealing with full-byte elements.
412 Layout.ElemTy = Layout.VecTy->getElementType();
413 if (DL.getTypeSizeInBits(Layout.ElemTy) !=
414 DL.getTypeStoreSizeInBits(Layout.ElemTy))
415 return false;
416
417 if (Alignment)
418 Layout.VecAlign = Alignment;
419 else
420 Layout.VecAlign = DL.getABITypeAlignment(Layout.VecTy);
421 Layout.ElemSize = DL.getTypeStoreSize(Layout.ElemTy);
422 return true;
423 }
424
425 // Scalarize two-operand instruction I, using Split(Builder, X, Y, Name)
426 // to create an instruction like I with operands X and Y and name Name.
427 template<typename Splitter>
splitBinary(Instruction & I,const Splitter & Split)428 bool ScalarizerVisitor::splitBinary(Instruction &I, const Splitter &Split) {
429 VectorType *VT = dyn_cast<VectorType>(I.getType());
430 if (!VT)
431 return false;
432
433 unsigned NumElems = VT->getNumElements();
434 IRBuilder<> Builder(&I);
435 Scatterer Op0 = scatter(&I, I.getOperand(0));
436 Scatterer Op1 = scatter(&I, I.getOperand(1));
437 assert(Op0.size() == NumElems && "Mismatched binary operation");
438 assert(Op1.size() == NumElems && "Mismatched binary operation");
439 ValueVector Res;
440 Res.resize(NumElems);
441 for (unsigned Elem = 0; Elem < NumElems; ++Elem)
442 Res[Elem] = Split(Builder, Op0[Elem], Op1[Elem],
443 I.getName() + ".i" + Twine(Elem));
444 gather(&I, Res);
445 return true;
446 }
447
isTriviallyScalariable(Intrinsic::ID ID)448 static bool isTriviallyScalariable(Intrinsic::ID ID) {
449 return isTriviallyVectorizable(ID);
450 }
451
452 // All of the current scalarizable intrinsics only have one mangled type.
getScalarIntrinsicDeclaration(Module * M,Intrinsic::ID ID,VectorType * Ty)453 static Function *getScalarIntrinsicDeclaration(Module *M,
454 Intrinsic::ID ID,
455 VectorType *Ty) {
456 return Intrinsic::getDeclaration(M, ID, { Ty->getScalarType() });
457 }
458
459 /// If a call to a vector typed intrinsic function, split into a scalar call per
460 /// element if possible for the intrinsic.
splitCall(CallInst & CI)461 bool ScalarizerVisitor::splitCall(CallInst &CI) {
462 VectorType *VT = dyn_cast<VectorType>(CI.getType());
463 if (!VT)
464 return false;
465
466 Function *F = CI.getCalledFunction();
467 if (!F)
468 return false;
469
470 Intrinsic::ID ID = F->getIntrinsicID();
471 if (ID == Intrinsic::not_intrinsic || !isTriviallyScalariable(ID))
472 return false;
473
474 unsigned NumElems = VT->getNumElements();
475 unsigned NumArgs = CI.getNumArgOperands();
476
477 ValueVector ScalarOperands(NumArgs);
478 SmallVector<Scatterer, 8> Scattered(NumArgs);
479
480 Scattered.resize(NumArgs);
481
482 // Assumes that any vector type has the same number of elements as the return
483 // vector type, which is true for all current intrinsics.
484 for (unsigned I = 0; I != NumArgs; ++I) {
485 Value *OpI = CI.getOperand(I);
486 if (OpI->getType()->isVectorTy()) {
487 Scattered[I] = scatter(&CI, OpI);
488 assert(Scattered[I].size() == NumElems && "mismatched call operands");
489 } else {
490 ScalarOperands[I] = OpI;
491 }
492 }
493
494 ValueVector Res(NumElems);
495 ValueVector ScalarCallOps(NumArgs);
496
497 Function *NewIntrin = getScalarIntrinsicDeclaration(F->getParent(), ID, VT);
498 IRBuilder<> Builder(&CI);
499
500 // Perform actual scalarization, taking care to preserve any scalar operands.
501 for (unsigned Elem = 0; Elem < NumElems; ++Elem) {
502 ScalarCallOps.clear();
503
504 for (unsigned J = 0; J != NumArgs; ++J) {
505 if (hasVectorInstrinsicScalarOpd(ID, J))
506 ScalarCallOps.push_back(ScalarOperands[J]);
507 else
508 ScalarCallOps.push_back(Scattered[J][Elem]);
509 }
510
511 Res[Elem] = Builder.CreateCall(NewIntrin, ScalarCallOps,
512 CI.getName() + ".i" + Twine(Elem));
513 }
514
515 gather(&CI, Res);
516 return true;
517 }
518
visitSelectInst(SelectInst & SI)519 bool ScalarizerVisitor::visitSelectInst(SelectInst &SI) {
520 VectorType *VT = dyn_cast<VectorType>(SI.getType());
521 if (!VT)
522 return false;
523
524 unsigned NumElems = VT->getNumElements();
525 IRBuilder<> Builder(&SI);
526 Scatterer Op1 = scatter(&SI, SI.getOperand(1));
527 Scatterer Op2 = scatter(&SI, SI.getOperand(2));
528 assert(Op1.size() == NumElems && "Mismatched select");
529 assert(Op2.size() == NumElems && "Mismatched select");
530 ValueVector Res;
531 Res.resize(NumElems);
532
533 if (SI.getOperand(0)->getType()->isVectorTy()) {
534 Scatterer Op0 = scatter(&SI, SI.getOperand(0));
535 assert(Op0.size() == NumElems && "Mismatched select");
536 for (unsigned I = 0; I < NumElems; ++I)
537 Res[I] = Builder.CreateSelect(Op0[I], Op1[I], Op2[I],
538 SI.getName() + ".i" + Twine(I));
539 } else {
540 Value *Op0 = SI.getOperand(0);
541 for (unsigned I = 0; I < NumElems; ++I)
542 Res[I] = Builder.CreateSelect(Op0, Op1[I], Op2[I],
543 SI.getName() + ".i" + Twine(I));
544 }
545 gather(&SI, Res);
546 return true;
547 }
548
visitICmpInst(ICmpInst & ICI)549 bool ScalarizerVisitor::visitICmpInst(ICmpInst &ICI) {
550 return splitBinary(ICI, ICmpSplitter(ICI));
551 }
552
visitFCmpInst(FCmpInst & FCI)553 bool ScalarizerVisitor::visitFCmpInst(FCmpInst &FCI) {
554 return splitBinary(FCI, FCmpSplitter(FCI));
555 }
556
visitBinaryOperator(BinaryOperator & BO)557 bool ScalarizerVisitor::visitBinaryOperator(BinaryOperator &BO) {
558 return splitBinary(BO, BinarySplitter(BO));
559 }
560
visitGetElementPtrInst(GetElementPtrInst & GEPI)561 bool ScalarizerVisitor::visitGetElementPtrInst(GetElementPtrInst &GEPI) {
562 VectorType *VT = dyn_cast<VectorType>(GEPI.getType());
563 if (!VT)
564 return false;
565
566 IRBuilder<> Builder(&GEPI);
567 unsigned NumElems = VT->getNumElements();
568 unsigned NumIndices = GEPI.getNumIndices();
569
570 // The base pointer might be scalar even if it's a vector GEP. In those cases,
571 // splat the pointer into a vector value, and scatter that vector.
572 Value *Op0 = GEPI.getOperand(0);
573 if (!Op0->getType()->isVectorTy())
574 Op0 = Builder.CreateVectorSplat(NumElems, Op0);
575 Scatterer Base = scatter(&GEPI, Op0);
576
577 SmallVector<Scatterer, 8> Ops;
578 Ops.resize(NumIndices);
579 for (unsigned I = 0; I < NumIndices; ++I) {
580 Value *Op = GEPI.getOperand(I + 1);
581
582 // The indices might be scalars even if it's a vector GEP. In those cases,
583 // splat the scalar into a vector value, and scatter that vector.
584 if (!Op->getType()->isVectorTy())
585 Op = Builder.CreateVectorSplat(NumElems, Op);
586
587 Ops[I] = scatter(&GEPI, Op);
588 }
589
590 ValueVector Res;
591 Res.resize(NumElems);
592 for (unsigned I = 0; I < NumElems; ++I) {
593 SmallVector<Value *, 8> Indices;
594 Indices.resize(NumIndices);
595 for (unsigned J = 0; J < NumIndices; ++J)
596 Indices[J] = Ops[J][I];
597 Res[I] = Builder.CreateGEP(GEPI.getSourceElementType(), Base[I], Indices,
598 GEPI.getName() + ".i" + Twine(I));
599 if (GEPI.isInBounds())
600 if (GetElementPtrInst *NewGEPI = dyn_cast<GetElementPtrInst>(Res[I]))
601 NewGEPI->setIsInBounds();
602 }
603 gather(&GEPI, Res);
604 return true;
605 }
606
visitCastInst(CastInst & CI)607 bool ScalarizerVisitor::visitCastInst(CastInst &CI) {
608 VectorType *VT = dyn_cast<VectorType>(CI.getDestTy());
609 if (!VT)
610 return false;
611
612 unsigned NumElems = VT->getNumElements();
613 IRBuilder<> Builder(&CI);
614 Scatterer Op0 = scatter(&CI, CI.getOperand(0));
615 assert(Op0.size() == NumElems && "Mismatched cast");
616 ValueVector Res;
617 Res.resize(NumElems);
618 for (unsigned I = 0; I < NumElems; ++I)
619 Res[I] = Builder.CreateCast(CI.getOpcode(), Op0[I], VT->getElementType(),
620 CI.getName() + ".i" + Twine(I));
621 gather(&CI, Res);
622 return true;
623 }
624
visitBitCastInst(BitCastInst & BCI)625 bool ScalarizerVisitor::visitBitCastInst(BitCastInst &BCI) {
626 VectorType *DstVT = dyn_cast<VectorType>(BCI.getDestTy());
627 VectorType *SrcVT = dyn_cast<VectorType>(BCI.getSrcTy());
628 if (!DstVT || !SrcVT)
629 return false;
630
631 unsigned DstNumElems = DstVT->getNumElements();
632 unsigned SrcNumElems = SrcVT->getNumElements();
633 IRBuilder<> Builder(&BCI);
634 Scatterer Op0 = scatter(&BCI, BCI.getOperand(0));
635 ValueVector Res;
636 Res.resize(DstNumElems);
637
638 if (DstNumElems == SrcNumElems) {
639 for (unsigned I = 0; I < DstNumElems; ++I)
640 Res[I] = Builder.CreateBitCast(Op0[I], DstVT->getElementType(),
641 BCI.getName() + ".i" + Twine(I));
642 } else if (DstNumElems > SrcNumElems) {
643 // <M x t1> -> <N*M x t2>. Convert each t1 to <N x t2> and copy the
644 // individual elements to the destination.
645 unsigned FanOut = DstNumElems / SrcNumElems;
646 Type *MidTy = VectorType::get(DstVT->getElementType(), FanOut);
647 unsigned ResI = 0;
648 for (unsigned Op0I = 0; Op0I < SrcNumElems; ++Op0I) {
649 Value *V = Op0[Op0I];
650 Instruction *VI;
651 // Look through any existing bitcasts before converting to <N x t2>.
652 // In the best case, the resulting conversion might be a no-op.
653 while ((VI = dyn_cast<Instruction>(V)) &&
654 VI->getOpcode() == Instruction::BitCast)
655 V = VI->getOperand(0);
656 V = Builder.CreateBitCast(V, MidTy, V->getName() + ".cast");
657 Scatterer Mid = scatter(&BCI, V);
658 for (unsigned MidI = 0; MidI < FanOut; ++MidI)
659 Res[ResI++] = Mid[MidI];
660 }
661 } else {
662 // <N*M x t1> -> <M x t2>. Convert each group of <N x t1> into a t2.
663 unsigned FanIn = SrcNumElems / DstNumElems;
664 Type *MidTy = VectorType::get(SrcVT->getElementType(), FanIn);
665 unsigned Op0I = 0;
666 for (unsigned ResI = 0; ResI < DstNumElems; ++ResI) {
667 Value *V = UndefValue::get(MidTy);
668 for (unsigned MidI = 0; MidI < FanIn; ++MidI)
669 V = Builder.CreateInsertElement(V, Op0[Op0I++], Builder.getInt32(MidI),
670 BCI.getName() + ".i" + Twine(ResI)
671 + ".upto" + Twine(MidI));
672 Res[ResI] = Builder.CreateBitCast(V, DstVT->getElementType(),
673 BCI.getName() + ".i" + Twine(ResI));
674 }
675 }
676 gather(&BCI, Res);
677 return true;
678 }
679
visitShuffleVectorInst(ShuffleVectorInst & SVI)680 bool ScalarizerVisitor::visitShuffleVectorInst(ShuffleVectorInst &SVI) {
681 VectorType *VT = dyn_cast<VectorType>(SVI.getType());
682 if (!VT)
683 return false;
684
685 unsigned NumElems = VT->getNumElements();
686 Scatterer Op0 = scatter(&SVI, SVI.getOperand(0));
687 Scatterer Op1 = scatter(&SVI, SVI.getOperand(1));
688 ValueVector Res;
689 Res.resize(NumElems);
690
691 for (unsigned I = 0; I < NumElems; ++I) {
692 int Selector = SVI.getMaskValue(I);
693 if (Selector < 0)
694 Res[I] = UndefValue::get(VT->getElementType());
695 else if (unsigned(Selector) < Op0.size())
696 Res[I] = Op0[Selector];
697 else
698 Res[I] = Op1[Selector - Op0.size()];
699 }
700 gather(&SVI, Res);
701 return true;
702 }
703
visitPHINode(PHINode & PHI)704 bool ScalarizerVisitor::visitPHINode(PHINode &PHI) {
705 VectorType *VT = dyn_cast<VectorType>(PHI.getType());
706 if (!VT)
707 return false;
708
709 unsigned NumElems = VT->getNumElements();
710 IRBuilder<> Builder(&PHI);
711 ValueVector Res;
712 Res.resize(NumElems);
713
714 unsigned NumOps = PHI.getNumOperands();
715 for (unsigned I = 0; I < NumElems; ++I)
716 Res[I] = Builder.CreatePHI(VT->getElementType(), NumOps,
717 PHI.getName() + ".i" + Twine(I));
718
719 for (unsigned I = 0; I < NumOps; ++I) {
720 Scatterer Op = scatter(&PHI, PHI.getIncomingValue(I));
721 BasicBlock *IncomingBlock = PHI.getIncomingBlock(I);
722 for (unsigned J = 0; J < NumElems; ++J)
723 cast<PHINode>(Res[J])->addIncoming(Op[J], IncomingBlock);
724 }
725 gather(&PHI, Res);
726 return true;
727 }
728
visitLoadInst(LoadInst & LI)729 bool ScalarizerVisitor::visitLoadInst(LoadInst &LI) {
730 if (!ScalarizeLoadStore)
731 return false;
732 if (!LI.isSimple())
733 return false;
734
735 VectorLayout Layout;
736 if (!getVectorLayout(LI.getType(), LI.getAlignment(), Layout,
737 LI.getModule()->getDataLayout()))
738 return false;
739
740 unsigned NumElems = Layout.VecTy->getNumElements();
741 IRBuilder<> Builder(&LI);
742 Scatterer Ptr = scatter(&LI, LI.getPointerOperand());
743 ValueVector Res;
744 Res.resize(NumElems);
745
746 for (unsigned I = 0; I < NumElems; ++I)
747 Res[I] = Builder.CreateAlignedLoad(Ptr[I], Layout.getElemAlign(I),
748 LI.getName() + ".i" + Twine(I));
749 gather(&LI, Res);
750 return true;
751 }
752
visitStoreInst(StoreInst & SI)753 bool ScalarizerVisitor::visitStoreInst(StoreInst &SI) {
754 if (!ScalarizeLoadStore)
755 return false;
756 if (!SI.isSimple())
757 return false;
758
759 VectorLayout Layout;
760 Value *FullValue = SI.getValueOperand();
761 if (!getVectorLayout(FullValue->getType(), SI.getAlignment(), Layout,
762 SI.getModule()->getDataLayout()))
763 return false;
764
765 unsigned NumElems = Layout.VecTy->getNumElements();
766 IRBuilder<> Builder(&SI);
767 Scatterer Ptr = scatter(&SI, SI.getPointerOperand());
768 Scatterer Val = scatter(&SI, FullValue);
769
770 ValueVector Stores;
771 Stores.resize(NumElems);
772 for (unsigned I = 0; I < NumElems; ++I) {
773 unsigned Align = Layout.getElemAlign(I);
774 Stores[I] = Builder.CreateAlignedStore(Val[I], Ptr[I], Align);
775 }
776 transferMetadata(&SI, Stores);
777 return true;
778 }
779
visitCallInst(CallInst & CI)780 bool ScalarizerVisitor::visitCallInst(CallInst &CI) {
781 return splitCall(CI);
782 }
783
784 // Delete the instructions that we scalarized. If a full vector result
785 // is still needed, recreate it using InsertElements.
finish()786 bool ScalarizerVisitor::finish() {
787 // The presence of data in Gathered or Scattered indicates changes
788 // made to the Function.
789 if (Gathered.empty() && Scattered.empty())
790 return false;
791 for (const auto &GMI : Gathered) {
792 Instruction *Op = GMI.first;
793 ValueVector &CV = *GMI.second;
794 if (!Op->use_empty()) {
795 // The value is still needed, so recreate it using a series of
796 // InsertElements.
797 Type *Ty = Op->getType();
798 Value *Res = UndefValue::get(Ty);
799 BasicBlock *BB = Op->getParent();
800 unsigned Count = Ty->getVectorNumElements();
801 IRBuilder<> Builder(Op);
802 if (isa<PHINode>(Op))
803 Builder.SetInsertPoint(BB, BB->getFirstInsertionPt());
804 for (unsigned I = 0; I < Count; ++I)
805 Res = Builder.CreateInsertElement(Res, CV[I], Builder.getInt32(I),
806 Op->getName() + ".upto" + Twine(I));
807 Res->takeName(Op);
808 Op->replaceAllUsesWith(Res);
809 }
810 Op->eraseFromParent();
811 }
812 Gathered.clear();
813 Scattered.clear();
814 return true;
815 }
816
run(Function & F,FunctionAnalysisManager & AM)817 PreservedAnalyses ScalarizerPass::run(Function &F, FunctionAnalysisManager &AM) {
818 Module &M = *F.getParent();
819 unsigned ParallelLoopAccessMDKind =
820 M.getContext().getMDKindID("llvm.mem.parallel_loop_access");
821 ScalarizerVisitor Impl(ParallelLoopAccessMDKind);
822 bool Changed = Impl.visit(F);
823 return Changed ? PreservedAnalyses::none() : PreservedAnalyses::all();
824 }
825