1 //===- Scalarizer.cpp - Scalarize vector operations -----------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This pass converts vector operations into scalar operations, in order
10 // to expose optimization opportunities on the individual scalar operations.
11 // It is mainly intended for targets that do not have vector units, but it
12 // may also be useful for revectorizing code to different vector widths.
13 //
14 //===----------------------------------------------------------------------===//
15
16 #include "llvm/Transforms/Scalar/Scalarizer.h"
17 #include "llvm/ADT/PostOrderIterator.h"
18 #include "llvm/ADT/SmallVector.h"
19 #include "llvm/ADT/Twine.h"
20 #include "llvm/Analysis/VectorUtils.h"
21 #include "llvm/IR/Argument.h"
22 #include "llvm/IR/BasicBlock.h"
23 #include "llvm/IR/Constants.h"
24 #include "llvm/IR/DataLayout.h"
25 #include "llvm/IR/DerivedTypes.h"
26 #include "llvm/IR/Dominators.h"
27 #include "llvm/IR/Function.h"
28 #include "llvm/IR/IRBuilder.h"
29 #include "llvm/IR/InstVisitor.h"
30 #include "llvm/IR/InstrTypes.h"
31 #include "llvm/IR/Instruction.h"
32 #include "llvm/IR/Instructions.h"
33 #include "llvm/IR/Intrinsics.h"
34 #include "llvm/IR/LLVMContext.h"
35 #include "llvm/IR/Module.h"
36 #include "llvm/IR/Type.h"
37 #include "llvm/IR/Value.h"
38 #include "llvm/InitializePasses.h"
39 #include "llvm/Pass.h"
40 #include "llvm/Support/Casting.h"
41 #include "llvm/Support/CommandLine.h"
42 #include "llvm/Transforms/Utils/Local.h"
43 #include <cassert>
44 #include <cstdint>
45 #include <iterator>
46 #include <map>
47 #include <utility>
48
49 using namespace llvm;
50
51 #define DEBUG_TYPE "scalarizer"
52
53 static cl::opt<bool> ClScalarizeVariableInsertExtract(
54 "scalarize-variable-insert-extract", cl::init(true), cl::Hidden,
55 cl::desc("Allow the scalarizer pass to scalarize "
56 "insertelement/extractelement with variable index"));
57
58 // This is disabled by default because having separate loads and stores
59 // makes it more likely that the -combiner-alias-analysis limits will be
60 // reached.
61 static cl::opt<bool> ClScalarizeLoadStore(
62 "scalarize-load-store", cl::init(false), cl::Hidden,
63 cl::desc("Allow the scalarizer pass to scalarize loads and store"));
64
65 namespace {
66
skipPastPhiNodesAndDbg(BasicBlock::iterator Itr)67 BasicBlock::iterator skipPastPhiNodesAndDbg(BasicBlock::iterator Itr) {
68 BasicBlock *BB = Itr->getParent();
69 if (isa<PHINode>(Itr))
70 Itr = BB->getFirstInsertionPt();
71 if (Itr != BB->end())
72 Itr = skipDebugIntrinsics(Itr);
73 return Itr;
74 }
75
76 // Used to store the scattered form of a vector.
77 using ValueVector = SmallVector<Value *, 8>;
78
79 // Used to map a vector Value to its scattered form. We use std::map
80 // because we want iterators to persist across insertion and because the
81 // values are relatively large.
82 using ScatterMap = std::map<Value *, ValueVector>;
83
84 // Lists Instructions that have been replaced with scalar implementations,
85 // along with a pointer to their scattered forms.
86 using GatherList = SmallVector<std::pair<Instruction *, ValueVector *>, 16>;
87
88 // Provides a very limited vector-like interface for lazily accessing one
89 // component of a scattered vector or vector pointer.
90 class Scatterer {
91 public:
92 Scatterer() = default;
93
94 // Scatter V into Size components. If new instructions are needed,
95 // insert them before BBI in BB. If Cache is nonnull, use it to cache
96 // the results.
97 Scatterer(BasicBlock *bb, BasicBlock::iterator bbi, Value *v, Type *PtrElemTy,
98 ValueVector *cachePtr = nullptr);
99
100 // Return component I, creating a new Value for it if necessary.
101 Value *operator[](unsigned I);
102
103 // Return the number of components.
size() const104 unsigned size() const { return Size; }
105
106 private:
107 BasicBlock *BB;
108 BasicBlock::iterator BBI;
109 Value *V;
110 Type *PtrElemTy;
111 ValueVector *CachePtr;
112 ValueVector Tmp;
113 unsigned Size;
114 };
115
116 // FCmpSpliiter(FCI)(Builder, X, Y, Name) uses Builder to create an FCmp
117 // called Name that compares X and Y in the same way as FCI.
118 struct FCmpSplitter {
FCmpSplitter__anonbe7e80730111::FCmpSplitter119 FCmpSplitter(FCmpInst &fci) : FCI(fci) {}
120
operator ()__anonbe7e80730111::FCmpSplitter121 Value *operator()(IRBuilder<> &Builder, Value *Op0, Value *Op1,
122 const Twine &Name) const {
123 return Builder.CreateFCmp(FCI.getPredicate(), Op0, Op1, Name);
124 }
125
126 FCmpInst &FCI;
127 };
128
129 // ICmpSpliiter(ICI)(Builder, X, Y, Name) uses Builder to create an ICmp
130 // called Name that compares X and Y in the same way as ICI.
131 struct ICmpSplitter {
ICmpSplitter__anonbe7e80730111::ICmpSplitter132 ICmpSplitter(ICmpInst &ici) : ICI(ici) {}
133
operator ()__anonbe7e80730111::ICmpSplitter134 Value *operator()(IRBuilder<> &Builder, Value *Op0, Value *Op1,
135 const Twine &Name) const {
136 return Builder.CreateICmp(ICI.getPredicate(), Op0, Op1, Name);
137 }
138
139 ICmpInst &ICI;
140 };
141
142 // UnarySpliiter(UO)(Builder, X, Name) uses Builder to create
143 // a unary operator like UO called Name with operand X.
144 struct UnarySplitter {
UnarySplitter__anonbe7e80730111::UnarySplitter145 UnarySplitter(UnaryOperator &uo) : UO(uo) {}
146
operator ()__anonbe7e80730111::UnarySplitter147 Value *operator()(IRBuilder<> &Builder, Value *Op, const Twine &Name) const {
148 return Builder.CreateUnOp(UO.getOpcode(), Op, Name);
149 }
150
151 UnaryOperator &UO;
152 };
153
154 // BinarySpliiter(BO)(Builder, X, Y, Name) uses Builder to create
155 // a binary operator like BO called Name with operands X and Y.
156 struct BinarySplitter {
BinarySplitter__anonbe7e80730111::BinarySplitter157 BinarySplitter(BinaryOperator &bo) : BO(bo) {}
158
operator ()__anonbe7e80730111::BinarySplitter159 Value *operator()(IRBuilder<> &Builder, Value *Op0, Value *Op1,
160 const Twine &Name) const {
161 return Builder.CreateBinOp(BO.getOpcode(), Op0, Op1, Name);
162 }
163
164 BinaryOperator &BO;
165 };
166
167 // Information about a load or store that we're scalarizing.
168 struct VectorLayout {
169 VectorLayout() = default;
170
171 // Return the alignment of element I.
getElemAlign__anonbe7e80730111::VectorLayout172 Align getElemAlign(unsigned I) {
173 return commonAlignment(VecAlign, I * ElemSize);
174 }
175
176 // The type of the vector.
177 VectorType *VecTy = nullptr;
178
179 // The type of each element.
180 Type *ElemTy = nullptr;
181
182 // The alignment of the vector.
183 Align VecAlign;
184
185 // The size of each element.
186 uint64_t ElemSize = 0;
187 };
188
189 template <typename T>
getWithDefaultOverride(const cl::opt<T> & ClOption,const llvm::Optional<T> & DefaultOverride)190 T getWithDefaultOverride(const cl::opt<T> &ClOption,
191 const llvm::Optional<T> &DefaultOverride) {
192 return ClOption.getNumOccurrences() ? ClOption
193 : DefaultOverride.value_or(ClOption);
194 }
195
196 class ScalarizerVisitor : public InstVisitor<ScalarizerVisitor, bool> {
197 public:
ScalarizerVisitor(unsigned ParallelLoopAccessMDKind,DominatorTree * DT,ScalarizerPassOptions Options)198 ScalarizerVisitor(unsigned ParallelLoopAccessMDKind, DominatorTree *DT,
199 ScalarizerPassOptions Options)
200 : ParallelLoopAccessMDKind(ParallelLoopAccessMDKind), DT(DT),
201 ScalarizeVariableInsertExtract(
202 getWithDefaultOverride(ClScalarizeVariableInsertExtract,
203 Options.ScalarizeVariableInsertExtract)),
204 ScalarizeLoadStore(getWithDefaultOverride(ClScalarizeLoadStore,
205 Options.ScalarizeLoadStore)) {
206 }
207
208 bool visit(Function &F);
209
210 // InstVisitor methods. They return true if the instruction was scalarized,
211 // false if nothing changed.
visitInstruction(Instruction & I)212 bool visitInstruction(Instruction &I) { return false; }
213 bool visitSelectInst(SelectInst &SI);
214 bool visitICmpInst(ICmpInst &ICI);
215 bool visitFCmpInst(FCmpInst &FCI);
216 bool visitUnaryOperator(UnaryOperator &UO);
217 bool visitBinaryOperator(BinaryOperator &BO);
218 bool visitGetElementPtrInst(GetElementPtrInst &GEPI);
219 bool visitCastInst(CastInst &CI);
220 bool visitBitCastInst(BitCastInst &BCI);
221 bool visitInsertElementInst(InsertElementInst &IEI);
222 bool visitExtractElementInst(ExtractElementInst &EEI);
223 bool visitShuffleVectorInst(ShuffleVectorInst &SVI);
224 bool visitPHINode(PHINode &PHI);
225 bool visitLoadInst(LoadInst &LI);
226 bool visitStoreInst(StoreInst &SI);
227 bool visitCallInst(CallInst &ICI);
228
229 private:
230 Scatterer scatter(Instruction *Point, Value *V, Type *PtrElemTy = nullptr);
231 void gather(Instruction *Op, const ValueVector &CV);
232 void replaceUses(Instruction *Op, Value *CV);
233 bool canTransferMetadata(unsigned Kind);
234 void transferMetadataAndIRFlags(Instruction *Op, const ValueVector &CV);
235 Optional<VectorLayout> getVectorLayout(Type *Ty, Align Alignment,
236 const DataLayout &DL);
237 bool finish();
238
239 template<typename T> bool splitUnary(Instruction &, const T &);
240 template<typename T> bool splitBinary(Instruction &, const T &);
241
242 bool splitCall(CallInst &CI);
243
244 ScatterMap Scattered;
245 GatherList Gathered;
246 bool Scalarized;
247
248 SmallVector<WeakTrackingVH, 32> PotentiallyDeadInstrs;
249
250 unsigned ParallelLoopAccessMDKind;
251
252 DominatorTree *DT;
253
254 const bool ScalarizeVariableInsertExtract;
255 const bool ScalarizeLoadStore;
256 };
257
258 class ScalarizerLegacyPass : public FunctionPass {
259 public:
260 static char ID;
261
ScalarizerLegacyPass()262 ScalarizerLegacyPass() : FunctionPass(ID) {
263 initializeScalarizerLegacyPassPass(*PassRegistry::getPassRegistry());
264 }
265
266 bool runOnFunction(Function &F) override;
267
getAnalysisUsage(AnalysisUsage & AU) const268 void getAnalysisUsage(AnalysisUsage& AU) const override {
269 AU.addRequired<DominatorTreeWrapperPass>();
270 AU.addPreserved<DominatorTreeWrapperPass>();
271 }
272 };
273
274 } // end anonymous namespace
275
276 char ScalarizerLegacyPass::ID = 0;
277 INITIALIZE_PASS_BEGIN(ScalarizerLegacyPass, "scalarizer",
278 "Scalarize vector operations", false, false)
INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)279 INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)
280 INITIALIZE_PASS_END(ScalarizerLegacyPass, "scalarizer",
281 "Scalarize vector operations", false, false)
282
283 Scatterer::Scatterer(BasicBlock *bb, BasicBlock::iterator bbi, Value *v,
284 Type *PtrElemTy, ValueVector *cachePtr)
285 : BB(bb), BBI(bbi), V(v), PtrElemTy(PtrElemTy), CachePtr(cachePtr) {
286 Type *Ty = V->getType();
287 if (Ty->isPointerTy()) {
288 assert(cast<PointerType>(Ty)->isOpaqueOrPointeeTypeMatches(PtrElemTy) &&
289 "Pointer element type mismatch");
290 Ty = PtrElemTy;
291 }
292 Size = cast<FixedVectorType>(Ty)->getNumElements();
293 if (!CachePtr)
294 Tmp.resize(Size, nullptr);
295 else if (CachePtr->empty())
296 CachePtr->resize(Size, nullptr);
297 else
298 assert(Size == CachePtr->size() && "Inconsistent vector sizes");
299 }
300
301 // Return component I, creating a new Value for it if necessary.
operator [](unsigned I)302 Value *Scatterer::operator[](unsigned I) {
303 ValueVector &CV = (CachePtr ? *CachePtr : Tmp);
304 // Try to reuse a previous value.
305 if (CV[I])
306 return CV[I];
307 IRBuilder<> Builder(BB, BBI);
308 if (PtrElemTy) {
309 Type *VectorElemTy = cast<VectorType>(PtrElemTy)->getElementType();
310 if (!CV[0]) {
311 Type *NewPtrTy = PointerType::get(
312 VectorElemTy, V->getType()->getPointerAddressSpace());
313 CV[0] = Builder.CreateBitCast(V, NewPtrTy, V->getName() + ".i0");
314 }
315 if (I != 0)
316 CV[I] = Builder.CreateConstGEP1_32(VectorElemTy, CV[0], I,
317 V->getName() + ".i" + Twine(I));
318 } else {
319 // Search through a chain of InsertElementInsts looking for element I.
320 // Record other elements in the cache. The new V is still suitable
321 // for all uncached indices.
322 while (true) {
323 InsertElementInst *Insert = dyn_cast<InsertElementInst>(V);
324 if (!Insert)
325 break;
326 ConstantInt *Idx = dyn_cast<ConstantInt>(Insert->getOperand(2));
327 if (!Idx)
328 break;
329 unsigned J = Idx->getZExtValue();
330 V = Insert->getOperand(0);
331 if (I == J) {
332 CV[J] = Insert->getOperand(1);
333 return CV[J];
334 } else if (!CV[J]) {
335 // Only cache the first entry we find for each index we're not actively
336 // searching for. This prevents us from going too far up the chain and
337 // caching incorrect entries.
338 CV[J] = Insert->getOperand(1);
339 }
340 }
341 CV[I] = Builder.CreateExtractElement(V, Builder.getInt32(I),
342 V->getName() + ".i" + Twine(I));
343 }
344 return CV[I];
345 }
346
runOnFunction(Function & F)347 bool ScalarizerLegacyPass::runOnFunction(Function &F) {
348 if (skipFunction(F))
349 return false;
350
351 Module &M = *F.getParent();
352 unsigned ParallelLoopAccessMDKind =
353 M.getContext().getMDKindID("llvm.mem.parallel_loop_access");
354 DominatorTree *DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree();
355 ScalarizerVisitor Impl(ParallelLoopAccessMDKind, DT, ScalarizerPassOptions());
356 return Impl.visit(F);
357 }
358
createScalarizerPass()359 FunctionPass *llvm::createScalarizerPass() {
360 return new ScalarizerLegacyPass();
361 }
362
visit(Function & F)363 bool ScalarizerVisitor::visit(Function &F) {
364 assert(Gathered.empty() && Scattered.empty());
365
366 Scalarized = false;
367
368 // To ensure we replace gathered components correctly we need to do an ordered
369 // traversal of the basic blocks in the function.
370 ReversePostOrderTraversal<BasicBlock *> RPOT(&F.getEntryBlock());
371 for (BasicBlock *BB : RPOT) {
372 for (BasicBlock::iterator II = BB->begin(), IE = BB->end(); II != IE;) {
373 Instruction *I = &*II;
374 bool Done = InstVisitor::visit(I);
375 ++II;
376 if (Done && I->getType()->isVoidTy())
377 I->eraseFromParent();
378 }
379 }
380 return finish();
381 }
382
383 // Return a scattered form of V that can be accessed by Point. V must be a
384 // vector or a pointer to a vector.
scatter(Instruction * Point,Value * V,Type * PtrElemTy)385 Scatterer ScalarizerVisitor::scatter(Instruction *Point, Value *V,
386 Type *PtrElemTy) {
387 if (Argument *VArg = dyn_cast<Argument>(V)) {
388 // Put the scattered form of arguments in the entry block,
389 // so that it can be used everywhere.
390 Function *F = VArg->getParent();
391 BasicBlock *BB = &F->getEntryBlock();
392 return Scatterer(BB, BB->begin(), V, PtrElemTy, &Scattered[V]);
393 }
394 if (Instruction *VOp = dyn_cast<Instruction>(V)) {
395 // When scalarizing PHI nodes we might try to examine/rewrite InsertElement
396 // nodes in predecessors. If those predecessors are unreachable from entry,
397 // then the IR in those blocks could have unexpected properties resulting in
398 // infinite loops in Scatterer::operator[]. By simply treating values
399 // originating from instructions in unreachable blocks as undef we do not
400 // need to analyse them further.
401 if (!DT->isReachableFromEntry(VOp->getParent()))
402 return Scatterer(Point->getParent(), Point->getIterator(),
403 PoisonValue::get(V->getType()), PtrElemTy);
404 // Put the scattered form of an instruction directly after the
405 // instruction, skipping over PHI nodes and debug intrinsics.
406 BasicBlock *BB = VOp->getParent();
407 return Scatterer(
408 BB, skipPastPhiNodesAndDbg(std::next(BasicBlock::iterator(VOp))), V,
409 PtrElemTy, &Scattered[V]);
410 }
411 // In the fallback case, just put the scattered before Point and
412 // keep the result local to Point.
413 return Scatterer(Point->getParent(), Point->getIterator(), V, PtrElemTy);
414 }
415
416 // Replace Op with the gathered form of the components in CV. Defer the
417 // deletion of Op and creation of the gathered form to the end of the pass,
418 // so that we can avoid creating the gathered form if all uses of Op are
419 // replaced with uses of CV.
gather(Instruction * Op,const ValueVector & CV)420 void ScalarizerVisitor::gather(Instruction *Op, const ValueVector &CV) {
421 transferMetadataAndIRFlags(Op, CV);
422
423 // If we already have a scattered form of Op (created from ExtractElements
424 // of Op itself), replace them with the new form.
425 ValueVector &SV = Scattered[Op];
426 if (!SV.empty()) {
427 for (unsigned I = 0, E = SV.size(); I != E; ++I) {
428 Value *V = SV[I];
429 if (V == nullptr || SV[I] == CV[I])
430 continue;
431
432 Instruction *Old = cast<Instruction>(V);
433 if (isa<Instruction>(CV[I]))
434 CV[I]->takeName(Old);
435 Old->replaceAllUsesWith(CV[I]);
436 PotentiallyDeadInstrs.emplace_back(Old);
437 }
438 }
439 SV = CV;
440 Gathered.push_back(GatherList::value_type(Op, &SV));
441 }
442
443 // Replace Op with CV and collect Op has a potentially dead instruction.
replaceUses(Instruction * Op,Value * CV)444 void ScalarizerVisitor::replaceUses(Instruction *Op, Value *CV) {
445 if (CV != Op) {
446 Op->replaceAllUsesWith(CV);
447 PotentiallyDeadInstrs.emplace_back(Op);
448 Scalarized = true;
449 }
450 }
451
452 // Return true if it is safe to transfer the given metadata tag from
453 // vector to scalar instructions.
canTransferMetadata(unsigned Tag)454 bool ScalarizerVisitor::canTransferMetadata(unsigned Tag) {
455 return (Tag == LLVMContext::MD_tbaa
456 || Tag == LLVMContext::MD_fpmath
457 || Tag == LLVMContext::MD_tbaa_struct
458 || Tag == LLVMContext::MD_invariant_load
459 || Tag == LLVMContext::MD_alias_scope
460 || Tag == LLVMContext::MD_noalias
461 || Tag == ParallelLoopAccessMDKind
462 || Tag == LLVMContext::MD_access_group);
463 }
464
465 // Transfer metadata from Op to the instructions in CV if it is known
466 // to be safe to do so.
transferMetadataAndIRFlags(Instruction * Op,const ValueVector & CV)467 void ScalarizerVisitor::transferMetadataAndIRFlags(Instruction *Op,
468 const ValueVector &CV) {
469 SmallVector<std::pair<unsigned, MDNode *>, 4> MDs;
470 Op->getAllMetadataOtherThanDebugLoc(MDs);
471 for (unsigned I = 0, E = CV.size(); I != E; ++I) {
472 if (Instruction *New = dyn_cast<Instruction>(CV[I])) {
473 for (const auto &MD : MDs)
474 if (canTransferMetadata(MD.first))
475 New->setMetadata(MD.first, MD.second);
476 New->copyIRFlags(Op);
477 if (Op->getDebugLoc() && !New->getDebugLoc())
478 New->setDebugLoc(Op->getDebugLoc());
479 }
480 }
481 }
482
483 // Try to fill in Layout from Ty, returning true on success. Alignment is
484 // the alignment of the vector, or None if the ABI default should be used.
485 Optional<VectorLayout>
getVectorLayout(Type * Ty,Align Alignment,const DataLayout & DL)486 ScalarizerVisitor::getVectorLayout(Type *Ty, Align Alignment,
487 const DataLayout &DL) {
488 VectorLayout Layout;
489 // Make sure we're dealing with a vector.
490 Layout.VecTy = dyn_cast<VectorType>(Ty);
491 if (!Layout.VecTy)
492 return None;
493 // Check that we're dealing with full-byte elements.
494 Layout.ElemTy = Layout.VecTy->getElementType();
495 if (!DL.typeSizeEqualsStoreSize(Layout.ElemTy))
496 return None;
497 Layout.VecAlign = Alignment;
498 Layout.ElemSize = DL.getTypeStoreSize(Layout.ElemTy);
499 return Layout;
500 }
501
502 // Scalarize one-operand instruction I, using Split(Builder, X, Name)
503 // to create an instruction like I with operand X and name Name.
504 template<typename Splitter>
splitUnary(Instruction & I,const Splitter & Split)505 bool ScalarizerVisitor::splitUnary(Instruction &I, const Splitter &Split) {
506 VectorType *VT = dyn_cast<VectorType>(I.getType());
507 if (!VT)
508 return false;
509
510 unsigned NumElems = cast<FixedVectorType>(VT)->getNumElements();
511 IRBuilder<> Builder(&I);
512 Scatterer Op = scatter(&I, I.getOperand(0));
513 assert(Op.size() == NumElems && "Mismatched unary operation");
514 ValueVector Res;
515 Res.resize(NumElems);
516 for (unsigned Elem = 0; Elem < NumElems; ++Elem)
517 Res[Elem] = Split(Builder, Op[Elem], I.getName() + ".i" + Twine(Elem));
518 gather(&I, Res);
519 return true;
520 }
521
522 // Scalarize two-operand instruction I, using Split(Builder, X, Y, Name)
523 // to create an instruction like I with operands X and Y and name Name.
524 template<typename Splitter>
splitBinary(Instruction & I,const Splitter & Split)525 bool ScalarizerVisitor::splitBinary(Instruction &I, const Splitter &Split) {
526 VectorType *VT = dyn_cast<VectorType>(I.getType());
527 if (!VT)
528 return false;
529
530 unsigned NumElems = cast<FixedVectorType>(VT)->getNumElements();
531 IRBuilder<> Builder(&I);
532 Scatterer VOp0 = scatter(&I, I.getOperand(0));
533 Scatterer VOp1 = scatter(&I, I.getOperand(1));
534 assert(VOp0.size() == NumElems && "Mismatched binary operation");
535 assert(VOp1.size() == NumElems && "Mismatched binary operation");
536 ValueVector Res;
537 Res.resize(NumElems);
538 for (unsigned Elem = 0; Elem < NumElems; ++Elem) {
539 Value *Op0 = VOp0[Elem];
540 Value *Op1 = VOp1[Elem];
541 Res[Elem] = Split(Builder, Op0, Op1, I.getName() + ".i" + Twine(Elem));
542 }
543 gather(&I, Res);
544 return true;
545 }
546
isTriviallyScalariable(Intrinsic::ID ID)547 static bool isTriviallyScalariable(Intrinsic::ID ID) {
548 return isTriviallyVectorizable(ID);
549 }
550
551 // All of the current scalarizable intrinsics only have one mangled type.
getScalarIntrinsicDeclaration(Module * M,Intrinsic::ID ID,ArrayRef<Type * > Tys)552 static Function *getScalarIntrinsicDeclaration(Module *M,
553 Intrinsic::ID ID,
554 ArrayRef<Type*> Tys) {
555 return Intrinsic::getDeclaration(M, ID, Tys);
556 }
557
558 /// If a call to a vector typed intrinsic function, split into a scalar call per
559 /// element if possible for the intrinsic.
splitCall(CallInst & CI)560 bool ScalarizerVisitor::splitCall(CallInst &CI) {
561 VectorType *VT = dyn_cast<VectorType>(CI.getType());
562 if (!VT)
563 return false;
564
565 Function *F = CI.getCalledFunction();
566 if (!F)
567 return false;
568
569 Intrinsic::ID ID = F->getIntrinsicID();
570 if (ID == Intrinsic::not_intrinsic || !isTriviallyScalariable(ID))
571 return false;
572
573 unsigned NumElems = cast<FixedVectorType>(VT)->getNumElements();
574 unsigned NumArgs = CI.arg_size();
575
576 ValueVector ScalarOperands(NumArgs);
577 SmallVector<Scatterer, 8> Scattered(NumArgs);
578
579 Scattered.resize(NumArgs);
580
581 SmallVector<llvm::Type *, 3> Tys;
582 Tys.push_back(VT->getScalarType());
583
584 // Assumes that any vector type has the same number of elements as the return
585 // vector type, which is true for all current intrinsics.
586 for (unsigned I = 0; I != NumArgs; ++I) {
587 Value *OpI = CI.getOperand(I);
588 if (OpI->getType()->isVectorTy()) {
589 Scattered[I] = scatter(&CI, OpI);
590 assert(Scattered[I].size() == NumElems && "mismatched call operands");
591 if (isVectorIntrinsicWithOverloadTypeAtArg(ID, I))
592 Tys.push_back(OpI->getType()->getScalarType());
593 } else {
594 ScalarOperands[I] = OpI;
595 if (isVectorIntrinsicWithOverloadTypeAtArg(ID, I))
596 Tys.push_back(OpI->getType());
597 }
598 }
599
600 ValueVector Res(NumElems);
601 ValueVector ScalarCallOps(NumArgs);
602
603 Function *NewIntrin = getScalarIntrinsicDeclaration(F->getParent(), ID, Tys);
604 IRBuilder<> Builder(&CI);
605
606 // Perform actual scalarization, taking care to preserve any scalar operands.
607 for (unsigned Elem = 0; Elem < NumElems; ++Elem) {
608 ScalarCallOps.clear();
609
610 for (unsigned J = 0; J != NumArgs; ++J) {
611 if (isVectorIntrinsicWithScalarOpAtArg(ID, J))
612 ScalarCallOps.push_back(ScalarOperands[J]);
613 else
614 ScalarCallOps.push_back(Scattered[J][Elem]);
615 }
616
617 Res[Elem] = Builder.CreateCall(NewIntrin, ScalarCallOps,
618 CI.getName() + ".i" + Twine(Elem));
619 }
620
621 gather(&CI, Res);
622 return true;
623 }
624
visitSelectInst(SelectInst & SI)625 bool ScalarizerVisitor::visitSelectInst(SelectInst &SI) {
626 VectorType *VT = dyn_cast<VectorType>(SI.getType());
627 if (!VT)
628 return false;
629
630 unsigned NumElems = cast<FixedVectorType>(VT)->getNumElements();
631 IRBuilder<> Builder(&SI);
632 Scatterer VOp1 = scatter(&SI, SI.getOperand(1));
633 Scatterer VOp2 = scatter(&SI, SI.getOperand(2));
634 assert(VOp1.size() == NumElems && "Mismatched select");
635 assert(VOp2.size() == NumElems && "Mismatched select");
636 ValueVector Res;
637 Res.resize(NumElems);
638
639 if (SI.getOperand(0)->getType()->isVectorTy()) {
640 Scatterer VOp0 = scatter(&SI, SI.getOperand(0));
641 assert(VOp0.size() == NumElems && "Mismatched select");
642 for (unsigned I = 0; I < NumElems; ++I) {
643 Value *Op0 = VOp0[I];
644 Value *Op1 = VOp1[I];
645 Value *Op2 = VOp2[I];
646 Res[I] = Builder.CreateSelect(Op0, Op1, Op2,
647 SI.getName() + ".i" + Twine(I));
648 }
649 } else {
650 Value *Op0 = SI.getOperand(0);
651 for (unsigned I = 0; I < NumElems; ++I) {
652 Value *Op1 = VOp1[I];
653 Value *Op2 = VOp2[I];
654 Res[I] = Builder.CreateSelect(Op0, Op1, Op2,
655 SI.getName() + ".i" + Twine(I));
656 }
657 }
658 gather(&SI, Res);
659 return true;
660 }
661
visitICmpInst(ICmpInst & ICI)662 bool ScalarizerVisitor::visitICmpInst(ICmpInst &ICI) {
663 return splitBinary(ICI, ICmpSplitter(ICI));
664 }
665
visitFCmpInst(FCmpInst & FCI)666 bool ScalarizerVisitor::visitFCmpInst(FCmpInst &FCI) {
667 return splitBinary(FCI, FCmpSplitter(FCI));
668 }
669
visitUnaryOperator(UnaryOperator & UO)670 bool ScalarizerVisitor::visitUnaryOperator(UnaryOperator &UO) {
671 return splitUnary(UO, UnarySplitter(UO));
672 }
673
visitBinaryOperator(BinaryOperator & BO)674 bool ScalarizerVisitor::visitBinaryOperator(BinaryOperator &BO) {
675 return splitBinary(BO, BinarySplitter(BO));
676 }
677
visitGetElementPtrInst(GetElementPtrInst & GEPI)678 bool ScalarizerVisitor::visitGetElementPtrInst(GetElementPtrInst &GEPI) {
679 VectorType *VT = dyn_cast<VectorType>(GEPI.getType());
680 if (!VT)
681 return false;
682
683 IRBuilder<> Builder(&GEPI);
684 unsigned NumElems = cast<FixedVectorType>(VT)->getNumElements();
685 unsigned NumIndices = GEPI.getNumIndices();
686
687 // The base pointer might be scalar even if it's a vector GEP. In those cases,
688 // splat the pointer into a vector value, and scatter that vector.
689 Value *Op0 = GEPI.getOperand(0);
690 if (!Op0->getType()->isVectorTy())
691 Op0 = Builder.CreateVectorSplat(NumElems, Op0);
692 Scatterer Base = scatter(&GEPI, Op0);
693
694 SmallVector<Scatterer, 8> Ops;
695 Ops.resize(NumIndices);
696 for (unsigned I = 0; I < NumIndices; ++I) {
697 Value *Op = GEPI.getOperand(I + 1);
698
699 // The indices might be scalars even if it's a vector GEP. In those cases,
700 // splat the scalar into a vector value, and scatter that vector.
701 if (!Op->getType()->isVectorTy())
702 Op = Builder.CreateVectorSplat(NumElems, Op);
703
704 Ops[I] = scatter(&GEPI, Op);
705 }
706
707 ValueVector Res;
708 Res.resize(NumElems);
709 for (unsigned I = 0; I < NumElems; ++I) {
710 SmallVector<Value *, 8> Indices;
711 Indices.resize(NumIndices);
712 for (unsigned J = 0; J < NumIndices; ++J)
713 Indices[J] = Ops[J][I];
714 Res[I] = Builder.CreateGEP(GEPI.getSourceElementType(), Base[I], Indices,
715 GEPI.getName() + ".i" + Twine(I));
716 if (GEPI.isInBounds())
717 if (GetElementPtrInst *NewGEPI = dyn_cast<GetElementPtrInst>(Res[I]))
718 NewGEPI->setIsInBounds();
719 }
720 gather(&GEPI, Res);
721 return true;
722 }
723
visitCastInst(CastInst & CI)724 bool ScalarizerVisitor::visitCastInst(CastInst &CI) {
725 VectorType *VT = dyn_cast<VectorType>(CI.getDestTy());
726 if (!VT)
727 return false;
728
729 unsigned NumElems = cast<FixedVectorType>(VT)->getNumElements();
730 IRBuilder<> Builder(&CI);
731 Scatterer Op0 = scatter(&CI, CI.getOperand(0));
732 assert(Op0.size() == NumElems && "Mismatched cast");
733 ValueVector Res;
734 Res.resize(NumElems);
735 for (unsigned I = 0; I < NumElems; ++I)
736 Res[I] = Builder.CreateCast(CI.getOpcode(), Op0[I], VT->getElementType(),
737 CI.getName() + ".i" + Twine(I));
738 gather(&CI, Res);
739 return true;
740 }
741
visitBitCastInst(BitCastInst & BCI)742 bool ScalarizerVisitor::visitBitCastInst(BitCastInst &BCI) {
743 VectorType *DstVT = dyn_cast<VectorType>(BCI.getDestTy());
744 VectorType *SrcVT = dyn_cast<VectorType>(BCI.getSrcTy());
745 if (!DstVT || !SrcVT)
746 return false;
747
748 unsigned DstNumElems = cast<FixedVectorType>(DstVT)->getNumElements();
749 unsigned SrcNumElems = cast<FixedVectorType>(SrcVT)->getNumElements();
750 IRBuilder<> Builder(&BCI);
751 Scatterer Op0 = scatter(&BCI, BCI.getOperand(0));
752 ValueVector Res;
753 Res.resize(DstNumElems);
754
755 if (DstNumElems == SrcNumElems) {
756 for (unsigned I = 0; I < DstNumElems; ++I)
757 Res[I] = Builder.CreateBitCast(Op0[I], DstVT->getElementType(),
758 BCI.getName() + ".i" + Twine(I));
759 } else if (DstNumElems > SrcNumElems) {
760 // <M x t1> -> <N*M x t2>. Convert each t1 to <N x t2> and copy the
761 // individual elements to the destination.
762 unsigned FanOut = DstNumElems / SrcNumElems;
763 auto *MidTy = FixedVectorType::get(DstVT->getElementType(), FanOut);
764 unsigned ResI = 0;
765 for (unsigned Op0I = 0; Op0I < SrcNumElems; ++Op0I) {
766 Value *V = Op0[Op0I];
767 Instruction *VI;
768 // Look through any existing bitcasts before converting to <N x t2>.
769 // In the best case, the resulting conversion might be a no-op.
770 while ((VI = dyn_cast<Instruction>(V)) &&
771 VI->getOpcode() == Instruction::BitCast)
772 V = VI->getOperand(0);
773 V = Builder.CreateBitCast(V, MidTy, V->getName() + ".cast");
774 Scatterer Mid = scatter(&BCI, V);
775 for (unsigned MidI = 0; MidI < FanOut; ++MidI)
776 Res[ResI++] = Mid[MidI];
777 }
778 } else {
779 // <N*M x t1> -> <M x t2>. Convert each group of <N x t1> into a t2.
780 unsigned FanIn = SrcNumElems / DstNumElems;
781 auto *MidTy = FixedVectorType::get(SrcVT->getElementType(), FanIn);
782 unsigned Op0I = 0;
783 for (unsigned ResI = 0; ResI < DstNumElems; ++ResI) {
784 Value *V = PoisonValue::get(MidTy);
785 for (unsigned MidI = 0; MidI < FanIn; ++MidI)
786 V = Builder.CreateInsertElement(V, Op0[Op0I++], Builder.getInt32(MidI),
787 BCI.getName() + ".i" + Twine(ResI)
788 + ".upto" + Twine(MidI));
789 Res[ResI] = Builder.CreateBitCast(V, DstVT->getElementType(),
790 BCI.getName() + ".i" + Twine(ResI));
791 }
792 }
793 gather(&BCI, Res);
794 return true;
795 }
796
visitInsertElementInst(InsertElementInst & IEI)797 bool ScalarizerVisitor::visitInsertElementInst(InsertElementInst &IEI) {
798 VectorType *VT = dyn_cast<VectorType>(IEI.getType());
799 if (!VT)
800 return false;
801
802 unsigned NumElems = cast<FixedVectorType>(VT)->getNumElements();
803 IRBuilder<> Builder(&IEI);
804 Scatterer Op0 = scatter(&IEI, IEI.getOperand(0));
805 Value *NewElt = IEI.getOperand(1);
806 Value *InsIdx = IEI.getOperand(2);
807
808 ValueVector Res;
809 Res.resize(NumElems);
810
811 if (auto *CI = dyn_cast<ConstantInt>(InsIdx)) {
812 for (unsigned I = 0; I < NumElems; ++I)
813 Res[I] = CI->getValue().getZExtValue() == I ? NewElt : Op0[I];
814 } else {
815 if (!ScalarizeVariableInsertExtract)
816 return false;
817
818 for (unsigned I = 0; I < NumElems; ++I) {
819 Value *ShouldReplace =
820 Builder.CreateICmpEQ(InsIdx, ConstantInt::get(InsIdx->getType(), I),
821 InsIdx->getName() + ".is." + Twine(I));
822 Value *OldElt = Op0[I];
823 Res[I] = Builder.CreateSelect(ShouldReplace, NewElt, OldElt,
824 IEI.getName() + ".i" + Twine(I));
825 }
826 }
827
828 gather(&IEI, Res);
829 return true;
830 }
831
visitExtractElementInst(ExtractElementInst & EEI)832 bool ScalarizerVisitor::visitExtractElementInst(ExtractElementInst &EEI) {
833 VectorType *VT = dyn_cast<VectorType>(EEI.getOperand(0)->getType());
834 if (!VT)
835 return false;
836
837 unsigned NumSrcElems = cast<FixedVectorType>(VT)->getNumElements();
838 IRBuilder<> Builder(&EEI);
839 Scatterer Op0 = scatter(&EEI, EEI.getOperand(0));
840 Value *ExtIdx = EEI.getOperand(1);
841
842 if (auto *CI = dyn_cast<ConstantInt>(ExtIdx)) {
843 Value *Res = Op0[CI->getValue().getZExtValue()];
844 replaceUses(&EEI, Res);
845 return true;
846 }
847
848 if (!ScalarizeVariableInsertExtract)
849 return false;
850
851 Value *Res = UndefValue::get(VT->getElementType());
852 for (unsigned I = 0; I < NumSrcElems; ++I) {
853 Value *ShouldExtract =
854 Builder.CreateICmpEQ(ExtIdx, ConstantInt::get(ExtIdx->getType(), I),
855 ExtIdx->getName() + ".is." + Twine(I));
856 Value *Elt = Op0[I];
857 Res = Builder.CreateSelect(ShouldExtract, Elt, Res,
858 EEI.getName() + ".upto" + Twine(I));
859 }
860 replaceUses(&EEI, Res);
861 return true;
862 }
863
visitShuffleVectorInst(ShuffleVectorInst & SVI)864 bool ScalarizerVisitor::visitShuffleVectorInst(ShuffleVectorInst &SVI) {
865 VectorType *VT = dyn_cast<VectorType>(SVI.getType());
866 if (!VT)
867 return false;
868
869 unsigned NumElems = cast<FixedVectorType>(VT)->getNumElements();
870 Scatterer Op0 = scatter(&SVI, SVI.getOperand(0));
871 Scatterer Op1 = scatter(&SVI, SVI.getOperand(1));
872 ValueVector Res;
873 Res.resize(NumElems);
874
875 for (unsigned I = 0; I < NumElems; ++I) {
876 int Selector = SVI.getMaskValue(I);
877 if (Selector < 0)
878 Res[I] = UndefValue::get(VT->getElementType());
879 else if (unsigned(Selector) < Op0.size())
880 Res[I] = Op0[Selector];
881 else
882 Res[I] = Op1[Selector - Op0.size()];
883 }
884 gather(&SVI, Res);
885 return true;
886 }
887
visitPHINode(PHINode & PHI)888 bool ScalarizerVisitor::visitPHINode(PHINode &PHI) {
889 VectorType *VT = dyn_cast<VectorType>(PHI.getType());
890 if (!VT)
891 return false;
892
893 unsigned NumElems = cast<FixedVectorType>(VT)->getNumElements();
894 IRBuilder<> Builder(&PHI);
895 ValueVector Res;
896 Res.resize(NumElems);
897
898 unsigned NumOps = PHI.getNumOperands();
899 for (unsigned I = 0; I < NumElems; ++I)
900 Res[I] = Builder.CreatePHI(VT->getElementType(), NumOps,
901 PHI.getName() + ".i" + Twine(I));
902
903 for (unsigned I = 0; I < NumOps; ++I) {
904 Scatterer Op = scatter(&PHI, PHI.getIncomingValue(I));
905 BasicBlock *IncomingBlock = PHI.getIncomingBlock(I);
906 for (unsigned J = 0; J < NumElems; ++J)
907 cast<PHINode>(Res[J])->addIncoming(Op[J], IncomingBlock);
908 }
909 gather(&PHI, Res);
910 return true;
911 }
912
visitLoadInst(LoadInst & LI)913 bool ScalarizerVisitor::visitLoadInst(LoadInst &LI) {
914 if (!ScalarizeLoadStore)
915 return false;
916 if (!LI.isSimple())
917 return false;
918
919 Optional<VectorLayout> Layout = getVectorLayout(
920 LI.getType(), LI.getAlign(), LI.getModule()->getDataLayout());
921 if (!Layout)
922 return false;
923
924 unsigned NumElems = cast<FixedVectorType>(Layout->VecTy)->getNumElements();
925 IRBuilder<> Builder(&LI);
926 Scatterer Ptr = scatter(&LI, LI.getPointerOperand(), LI.getType());
927 ValueVector Res;
928 Res.resize(NumElems);
929
930 for (unsigned I = 0; I < NumElems; ++I)
931 Res[I] = Builder.CreateAlignedLoad(Layout->VecTy->getElementType(), Ptr[I],
932 Align(Layout->getElemAlign(I)),
933 LI.getName() + ".i" + Twine(I));
934 gather(&LI, Res);
935 return true;
936 }
937
visitStoreInst(StoreInst & SI)938 bool ScalarizerVisitor::visitStoreInst(StoreInst &SI) {
939 if (!ScalarizeLoadStore)
940 return false;
941 if (!SI.isSimple())
942 return false;
943
944 Value *FullValue = SI.getValueOperand();
945 Optional<VectorLayout> Layout = getVectorLayout(
946 FullValue->getType(), SI.getAlign(), SI.getModule()->getDataLayout());
947 if (!Layout)
948 return false;
949
950 unsigned NumElems = cast<FixedVectorType>(Layout->VecTy)->getNumElements();
951 IRBuilder<> Builder(&SI);
952 Scatterer VPtr = scatter(&SI, SI.getPointerOperand(), FullValue->getType());
953 Scatterer VVal = scatter(&SI, FullValue);
954
955 ValueVector Stores;
956 Stores.resize(NumElems);
957 for (unsigned I = 0; I < NumElems; ++I) {
958 Value *Val = VVal[I];
959 Value *Ptr = VPtr[I];
960 Stores[I] = Builder.CreateAlignedStore(Val, Ptr, Layout->getElemAlign(I));
961 }
962 transferMetadataAndIRFlags(&SI, Stores);
963 return true;
964 }
965
visitCallInst(CallInst & CI)966 bool ScalarizerVisitor::visitCallInst(CallInst &CI) {
967 return splitCall(CI);
968 }
969
970 // Delete the instructions that we scalarized. If a full vector result
971 // is still needed, recreate it using InsertElements.
finish()972 bool ScalarizerVisitor::finish() {
973 // The presence of data in Gathered or Scattered indicates changes
974 // made to the Function.
975 if (Gathered.empty() && Scattered.empty() && !Scalarized)
976 return false;
977 for (const auto &GMI : Gathered) {
978 Instruction *Op = GMI.first;
979 ValueVector &CV = *GMI.second;
980 if (!Op->use_empty()) {
981 // The value is still needed, so recreate it using a series of
982 // InsertElements.
983 Value *Res = PoisonValue::get(Op->getType());
984 if (auto *Ty = dyn_cast<VectorType>(Op->getType())) {
985 BasicBlock *BB = Op->getParent();
986 unsigned Count = cast<FixedVectorType>(Ty)->getNumElements();
987 IRBuilder<> Builder(Op);
988 if (isa<PHINode>(Op))
989 Builder.SetInsertPoint(BB, BB->getFirstInsertionPt());
990 for (unsigned I = 0; I < Count; ++I)
991 Res = Builder.CreateInsertElement(Res, CV[I], Builder.getInt32(I),
992 Op->getName() + ".upto" + Twine(I));
993 Res->takeName(Op);
994 } else {
995 assert(CV.size() == 1 && Op->getType() == CV[0]->getType());
996 Res = CV[0];
997 if (Op == Res)
998 continue;
999 }
1000 Op->replaceAllUsesWith(Res);
1001 }
1002 PotentiallyDeadInstrs.emplace_back(Op);
1003 }
1004 Gathered.clear();
1005 Scattered.clear();
1006 Scalarized = false;
1007
1008 RecursivelyDeleteTriviallyDeadInstructionsPermissive(PotentiallyDeadInstrs);
1009
1010 return true;
1011 }
1012
run(Function & F,FunctionAnalysisManager & AM)1013 PreservedAnalyses ScalarizerPass::run(Function &F, FunctionAnalysisManager &AM) {
1014 Module &M = *F.getParent();
1015 unsigned ParallelLoopAccessMDKind =
1016 M.getContext().getMDKindID("llvm.mem.parallel_loop_access");
1017 DominatorTree *DT = &AM.getResult<DominatorTreeAnalysis>(F);
1018 ScalarizerVisitor Impl(ParallelLoopAccessMDKind, DT, Options);
1019 bool Changed = Impl.visit(F);
1020 PreservedAnalyses PA;
1021 PA.preserve<DominatorTreeAnalysis>();
1022 return Changed ? PA : PreservedAnalyses::all();
1023 }
1024