1a17f03bdSSanjay Patel //===------- VectorCombine.cpp - Optimize partial vector operations -------===//
2a17f03bdSSanjay Patel //
3a17f03bdSSanjay Patel // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4a17f03bdSSanjay Patel // See https://llvm.org/LICENSE.txt for license information.
5a17f03bdSSanjay Patel // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6a17f03bdSSanjay Patel //
7a17f03bdSSanjay Patel //===----------------------------------------------------------------------===//
8a17f03bdSSanjay Patel //
9a17f03bdSSanjay Patel // This pass optimizes scalar/vector interactions using target cost models. The
10a17f03bdSSanjay Patel // transforms implemented here may not fit in traditional loop-based or SLP
11a17f03bdSSanjay Patel // vectorization passes.
12a17f03bdSSanjay Patel //
13a17f03bdSSanjay Patel //===----------------------------------------------------------------------===//
14a17f03bdSSanjay Patel 
15a17f03bdSSanjay Patel #include "llvm/Transforms/Vectorize/VectorCombine.h"
16a17f03bdSSanjay Patel #include "llvm/ADT/Statistic.h"
175006e551SSimon Pilgrim #include "llvm/Analysis/BasicAliasAnalysis.h"
18a17f03bdSSanjay Patel #include "llvm/Analysis/GlobalsModRef.h"
19a17f03bdSSanjay Patel #include "llvm/Analysis/TargetTransformInfo.h"
2019b62b79SSanjay Patel #include "llvm/Analysis/ValueTracking.h"
21b6050ca1SSanjay Patel #include "llvm/Analysis/VectorUtils.h"
22a17f03bdSSanjay Patel #include "llvm/IR/Dominators.h"
23a17f03bdSSanjay Patel #include "llvm/IR/Function.h"
24a17f03bdSSanjay Patel #include "llvm/IR/IRBuilder.h"
25a17f03bdSSanjay Patel #include "llvm/IR/PatternMatch.h"
26a17f03bdSSanjay Patel #include "llvm/InitializePasses.h"
27a17f03bdSSanjay Patel #include "llvm/Pass.h"
2825c6544fSSanjay Patel #include "llvm/Support/CommandLine.h"
29a17f03bdSSanjay Patel #include "llvm/Transforms/Utils/Local.h"
305006e551SSimon Pilgrim #include "llvm/Transforms/Vectorize.h"
31a17f03bdSSanjay Patel 
32a17f03bdSSanjay Patel using namespace llvm;
33a17f03bdSSanjay Patel using namespace llvm::PatternMatch;
34a17f03bdSSanjay Patel 
35a17f03bdSSanjay Patel #define DEBUG_TYPE "vector-combine"
36a17f03bdSSanjay Patel STATISTIC(NumVecCmp, "Number of vector compares formed");
3719b62b79SSanjay Patel STATISTIC(NumVecBO, "Number of vector binops formed");
387aeb41b3SRoman Lebedev STATISTIC(NumShufOfBitcast, "Number of shuffles moved after bitcast");
390d2a0b44SSanjay Patel STATISTIC(NumScalarBO, "Number of scalar binops formed");
40ed67f5e7SSanjay Patel STATISTIC(NumScalarCmp, "Number of scalar compares formed");
41a17f03bdSSanjay Patel 
4225c6544fSSanjay Patel static cl::opt<bool> DisableVectorCombine(
4325c6544fSSanjay Patel     "disable-vector-combine", cl::init(false), cl::Hidden,
4425c6544fSSanjay Patel     cl::desc("Disable all vector combine transforms"));
4525c6544fSSanjay Patel 
46a69158c1SSanjay Patel static cl::opt<bool> DisableBinopExtractShuffle(
47a69158c1SSanjay Patel     "disable-binop-extract-shuffle", cl::init(false), cl::Hidden,
48a69158c1SSanjay Patel     cl::desc("Disable binop extract to shuffle transforms"));
49a69158c1SSanjay Patel 
506bdd531aSSanjay Patel class VectorCombine {
516bdd531aSSanjay Patel public:
526bdd531aSSanjay Patel   VectorCombine(Function &F, const TargetTransformInfo &TTI,
536bdd531aSSanjay Patel                 const DominatorTree &DT)
546bdd531aSSanjay Patel       : F(F), TTI(TTI), DT(DT) {}
556bdd531aSSanjay Patel 
566bdd531aSSanjay Patel   bool run();
576bdd531aSSanjay Patel 
586bdd531aSSanjay Patel private:
596bdd531aSSanjay Patel   Function &F;
606bdd531aSSanjay Patel   const TargetTransformInfo &TTI;
616bdd531aSSanjay Patel   const DominatorTree &DT;
626bdd531aSSanjay Patel 
636bdd531aSSanjay Patel   bool isExtractExtractCheap(ExtractElementInst *Ext0, ExtractElementInst *Ext1,
646bdd531aSSanjay Patel                              unsigned Opcode,
656bdd531aSSanjay Patel                              ExtractElementInst *&ConvertToShuffle,
666bdd531aSSanjay Patel                              unsigned PreferredExtractIndex);
676bdd531aSSanjay Patel   bool foldExtractExtract(Instruction &I);
686bdd531aSSanjay Patel   bool foldBitcastShuf(Instruction &I);
696bdd531aSSanjay Patel   bool scalarizeBinopOrCmp(Instruction &I);
706bdd531aSSanjay Patel };
71a69158c1SSanjay Patel 
72a69158c1SSanjay Patel /// Compare the relative costs of 2 extracts followed by scalar operation vs.
73a69158c1SSanjay Patel /// vector operation(s) followed by extract. Return true if the existing
74a69158c1SSanjay Patel /// instructions are cheaper than a vector alternative. Otherwise, return false
75a69158c1SSanjay Patel /// and if one of the extracts should be transformed to a shufflevector, set
76a69158c1SSanjay Patel /// \p ConvertToShuffle to that extract instruction.
776bdd531aSSanjay Patel bool VectorCombine::isExtractExtractCheap(ExtractElementInst *Ext0,
786bdd531aSSanjay Patel                                           ExtractElementInst *Ext1,
796bdd531aSSanjay Patel                                           unsigned Opcode,
80216a37bbSSanjay Patel                                           ExtractElementInst *&ConvertToShuffle,
81ce97ce3aSSanjay Patel                                           unsigned PreferredExtractIndex) {
824fa63fd4SAustin Kerbow   assert(isa<ConstantInt>(Ext0->getOperand(1)) &&
83a69158c1SSanjay Patel          isa<ConstantInt>(Ext1->getOperand(1)) &&
84a69158c1SSanjay Patel          "Expected constant extract indexes");
8534e34855SSanjay Patel   Type *ScalarTy = Ext0->getType();
86e3056ae9SSam Parker   auto *VecTy = cast<VectorType>(Ext0->getOperand(0)->getType());
8734e34855SSanjay Patel   int ScalarOpCost, VectorOpCost;
8834e34855SSanjay Patel 
8934e34855SSanjay Patel   // Get cost estimates for scalar and vector versions of the operation.
9034e34855SSanjay Patel   bool IsBinOp = Instruction::isBinaryOp(Opcode);
9134e34855SSanjay Patel   if (IsBinOp) {
9234e34855SSanjay Patel     ScalarOpCost = TTI.getArithmeticInstrCost(Opcode, ScalarTy);
9334e34855SSanjay Patel     VectorOpCost = TTI.getArithmeticInstrCost(Opcode, VecTy);
9434e34855SSanjay Patel   } else {
9534e34855SSanjay Patel     assert((Opcode == Instruction::ICmp || Opcode == Instruction::FCmp) &&
9634e34855SSanjay Patel            "Expected a compare");
9734e34855SSanjay Patel     ScalarOpCost = TTI.getCmpSelInstrCost(Opcode, ScalarTy,
9834e34855SSanjay Patel                                           CmpInst::makeCmpResultType(ScalarTy));
9934e34855SSanjay Patel     VectorOpCost = TTI.getCmpSelInstrCost(Opcode, VecTy,
10034e34855SSanjay Patel                                           CmpInst::makeCmpResultType(VecTy));
10134e34855SSanjay Patel   }
10234e34855SSanjay Patel 
103a69158c1SSanjay Patel   // Get cost estimates for the extract elements. These costs will factor into
10434e34855SSanjay Patel   // both sequences.
105a69158c1SSanjay Patel   unsigned Ext0Index = cast<ConstantInt>(Ext0->getOperand(1))->getZExtValue();
106a69158c1SSanjay Patel   unsigned Ext1Index = cast<ConstantInt>(Ext1->getOperand(1))->getZExtValue();
107a69158c1SSanjay Patel 
1086bdd531aSSanjay Patel   int Extract0Cost =
1096bdd531aSSanjay Patel       TTI.getVectorInstrCost(Instruction::ExtractElement, VecTy, Ext0Index);
1106bdd531aSSanjay Patel   int Extract1Cost =
1116bdd531aSSanjay Patel       TTI.getVectorInstrCost(Instruction::ExtractElement, VecTy, Ext1Index);
112a69158c1SSanjay Patel 
113a69158c1SSanjay Patel   // A more expensive extract will always be replaced by a splat shuffle.
114a69158c1SSanjay Patel   // For example, if Ext0 is more expensive:
115a69158c1SSanjay Patel   // opcode (extelt V0, Ext0), (ext V1, Ext1) -->
116a69158c1SSanjay Patel   // extelt (opcode (splat V0, Ext0), V1), Ext1
117a69158c1SSanjay Patel   // TODO: Evaluate whether that always results in lowest cost. Alternatively,
118a69158c1SSanjay Patel   //       check the cost of creating a broadcast shuffle and shuffling both
119a69158c1SSanjay Patel   //       operands to element 0.
120a69158c1SSanjay Patel   int CheapExtractCost = std::min(Extract0Cost, Extract1Cost);
12134e34855SSanjay Patel 
12234e34855SSanjay Patel   // Extra uses of the extracts mean that we include those costs in the
12334e34855SSanjay Patel   // vector total because those instructions will not be eliminated.
124e9c79a7aSSanjay Patel   int OldCost, NewCost;
125a69158c1SSanjay Patel   if (Ext0->getOperand(0) == Ext1->getOperand(0) && Ext0Index == Ext1Index) {
126a69158c1SSanjay Patel     // Handle a special case. If the 2 extracts are identical, adjust the
12734e34855SSanjay Patel     // formulas to account for that. The extra use charge allows for either the
12834e34855SSanjay Patel     // CSE'd pattern or an unoptimized form with identical values:
12934e34855SSanjay Patel     // opcode (extelt V, C), (extelt V, C) --> extelt (opcode V, V), C
13034e34855SSanjay Patel     bool HasUseTax = Ext0 == Ext1 ? !Ext0->hasNUses(2)
13134e34855SSanjay Patel                                   : !Ext0->hasOneUse() || !Ext1->hasOneUse();
132a69158c1SSanjay Patel     OldCost = CheapExtractCost + ScalarOpCost;
133a69158c1SSanjay Patel     NewCost = VectorOpCost + CheapExtractCost + HasUseTax * CheapExtractCost;
13434e34855SSanjay Patel   } else {
13534e34855SSanjay Patel     // Handle the general case. Each extract is actually a different value:
136a69158c1SSanjay Patel     // opcode (extelt V0, C0), (extelt V1, C1) --> extelt (opcode V0, V1), C
137a69158c1SSanjay Patel     OldCost = Extract0Cost + Extract1Cost + ScalarOpCost;
138a69158c1SSanjay Patel     NewCost = VectorOpCost + CheapExtractCost +
139a69158c1SSanjay Patel               !Ext0->hasOneUse() * Extract0Cost +
140a69158c1SSanjay Patel               !Ext1->hasOneUse() * Extract1Cost;
14134e34855SSanjay Patel   }
142a69158c1SSanjay Patel 
143a69158c1SSanjay Patel   if (Ext0Index == Ext1Index) {
144a69158c1SSanjay Patel     // If the extract indexes are identical, no shuffle is needed.
145a69158c1SSanjay Patel     ConvertToShuffle = nullptr;
146a69158c1SSanjay Patel   } else {
147a69158c1SSanjay Patel     if (IsBinOp && DisableBinopExtractShuffle)
148a69158c1SSanjay Patel       return true;
149a69158c1SSanjay Patel 
150a69158c1SSanjay Patel     // If we are extracting from 2 different indexes, then one operand must be
151a69158c1SSanjay Patel     // shuffled before performing the vector operation. The shuffle mask is
152a69158c1SSanjay Patel     // undefined except for 1 lane that is being translated to the remaining
153a69158c1SSanjay Patel     // extraction lane. Therefore, it is a splat shuffle. Ex:
154a69158c1SSanjay Patel     // ShufMask = { undef, undef, 0, undef }
155a69158c1SSanjay Patel     // TODO: The cost model has an option for a "broadcast" shuffle
156a69158c1SSanjay Patel     //       (splat-from-element-0), but no option for a more general splat.
157a69158c1SSanjay Patel     NewCost +=
158a69158c1SSanjay Patel         TTI.getShuffleCost(TargetTransformInfo::SK_PermuteSingleSrc, VecTy);
159a69158c1SSanjay Patel 
160ce97ce3aSSanjay Patel     // The more expensive extract will be replaced by a shuffle. If the costs
161ce97ce3aSSanjay Patel     // are equal and there is a preferred extract index, shuffle the opposite
162ce97ce3aSSanjay Patel     // operand. Otherwise, replace the extract with the higher index.
163a69158c1SSanjay Patel     if (Extract0Cost > Extract1Cost)
164a69158c1SSanjay Patel       ConvertToShuffle = Ext0;
165a69158c1SSanjay Patel     else if (Extract1Cost > Extract0Cost)
166a69158c1SSanjay Patel       ConvertToShuffle = Ext1;
167ce97ce3aSSanjay Patel     else if (PreferredExtractIndex == Ext0Index)
168ce97ce3aSSanjay Patel       ConvertToShuffle = Ext1;
169ce97ce3aSSanjay Patel     else if (PreferredExtractIndex == Ext1Index)
170ce97ce3aSSanjay Patel       ConvertToShuffle = Ext0;
171a69158c1SSanjay Patel     else
172a69158c1SSanjay Patel       ConvertToShuffle = Ext0Index > Ext1Index ? Ext0 : Ext1;
173a69158c1SSanjay Patel   }
174a69158c1SSanjay Patel 
17510ea01d8SSanjay Patel   // Aggressively form a vector op if the cost is equal because the transform
17610ea01d8SSanjay Patel   // may enable further optimization.
17710ea01d8SSanjay Patel   // Codegen can reverse this transform (scalarize) if it was not profitable.
17810ea01d8SSanjay Patel   return OldCost < NewCost;
17934e34855SSanjay Patel }
18034e34855SSanjay Patel 
181216a37bbSSanjay Patel /// Given an extract element instruction with constant index operand, shuffle
182216a37bbSSanjay Patel /// the source vector (shift the scalar element) to a NewIndex for extraction.
183216a37bbSSanjay Patel /// Return null if the input can be constant folded, so that we are not creating
184216a37bbSSanjay Patel /// unnecessary instructions.
185216a37bbSSanjay Patel static ExtractElementInst *translateExtract(ExtractElementInst *ExtElt,
186216a37bbSSanjay Patel                                             unsigned NewIndex) {
187216a37bbSSanjay Patel   // If the extract can be constant-folded, this code is unsimplified. Defer
188216a37bbSSanjay Patel   // to other passes to handle that.
189216a37bbSSanjay Patel   Value *X = ExtElt->getVectorOperand();
190216a37bbSSanjay Patel   Value *C = ExtElt->getIndexOperand();
191216a37bbSSanjay Patel   if (isa<Constant>(X))
192216a37bbSSanjay Patel     return nullptr;
193216a37bbSSanjay Patel 
194216a37bbSSanjay Patel   // The shuffle mask is undefined except for 1 lane that is being translated
195216a37bbSSanjay Patel   // to the cheap extraction lane. Example:
196216a37bbSSanjay Patel   // ShufMask = { 2, undef, undef, undef }
197216a37bbSSanjay Patel   auto *VecTy = cast<FixedVectorType>(X->getType());
198216a37bbSSanjay Patel   SmallVector<int, 32> Mask(VecTy->getNumElements(), -1);
199216a37bbSSanjay Patel   assert(isa<ConstantInt>(C) && "Expected a constant index operand");
200216a37bbSSanjay Patel   Mask[NewIndex] = cast<ConstantInt>(C)->getZExtValue();
201216a37bbSSanjay Patel 
202216a37bbSSanjay Patel   // extelt X, C --> extelt (shuffle X), NewIndex
203216a37bbSSanjay Patel   IRBuilder<> Builder(ExtElt);
204*cce625f7SSanjay Patel   Value *Shuf =
205*cce625f7SSanjay Patel       Builder.CreateShuffleVector(X, UndefValue::get(VecTy), Mask, "shift");
206216a37bbSSanjay Patel   return cast<ExtractElementInst>(Builder.CreateExtractElement(Shuf, NewIndex));
207216a37bbSSanjay Patel }
208216a37bbSSanjay Patel 
209fc445589SSanjay Patel /// Try to reduce extract element costs by converting scalar compares to vector
210fc445589SSanjay Patel /// compares followed by extract.
211e9c79a7aSSanjay Patel /// cmp (ext0 V0, C), (ext1 V1, C)
212216a37bbSSanjay Patel static void foldExtExtCmp(ExtractElementInst *Ext0, ExtractElementInst *Ext1,
213039ff29eSSanjay Patel                           Instruction &I) {
214fc445589SSanjay Patel   assert(isa<CmpInst>(&I) && "Expected a compare");
215216a37bbSSanjay Patel   assert(cast<ConstantInt>(Ext0->getIndexOperand())->getZExtValue() ==
216216a37bbSSanjay Patel              cast<ConstantInt>(Ext1->getIndexOperand())->getZExtValue() &&
217216a37bbSSanjay Patel          "Expected matching constant extract indexes");
218a17f03bdSSanjay Patel 
219a17f03bdSSanjay Patel   // cmp Pred (extelt V0, C), (extelt V1, C) --> extelt (cmp Pred V0, V1), C
220a17f03bdSSanjay Patel   ++NumVecCmp;
221a17f03bdSSanjay Patel   IRBuilder<> Builder(&I);
222fc445589SSanjay Patel   CmpInst::Predicate Pred = cast<CmpInst>(&I)->getPredicate();
223216a37bbSSanjay Patel   Value *V0 = Ext0->getVectorOperand(), *V1 = Ext1->getVectorOperand();
22446a285adSSanjay Patel   Value *VecCmp = Builder.CreateCmp(Pred, V0, V1);
225216a37bbSSanjay Patel   Value *NewExt = Builder.CreateExtractElement(VecCmp, Ext0->getIndexOperand());
226216a37bbSSanjay Patel   I.replaceAllUsesWith(NewExt);
227*cce625f7SSanjay Patel   NewExt->takeName(&I);
228a17f03bdSSanjay Patel }
229a17f03bdSSanjay Patel 
23019b62b79SSanjay Patel /// Try to reduce extract element costs by converting scalar binops to vector
23119b62b79SSanjay Patel /// binops followed by extract.
232e9c79a7aSSanjay Patel /// bo (ext0 V0, C), (ext1 V1, C)
233216a37bbSSanjay Patel static void foldExtExtBinop(ExtractElementInst *Ext0, ExtractElementInst *Ext1,
234039ff29eSSanjay Patel                             Instruction &I) {
235fc445589SSanjay Patel   assert(isa<BinaryOperator>(&I) && "Expected a binary operator");
236216a37bbSSanjay Patel   assert(cast<ConstantInt>(Ext0->getIndexOperand())->getZExtValue() ==
237216a37bbSSanjay Patel              cast<ConstantInt>(Ext1->getIndexOperand())->getZExtValue() &&
238216a37bbSSanjay Patel          "Expected matching constant extract indexes");
23919b62b79SSanjay Patel 
24034e34855SSanjay Patel   // bo (extelt V0, C), (extelt V1, C) --> extelt (bo V0, V1), C
24119b62b79SSanjay Patel   ++NumVecBO;
24219b62b79SSanjay Patel   IRBuilder<> Builder(&I);
243216a37bbSSanjay Patel   Value *V0 = Ext0->getVectorOperand(), *V1 = Ext1->getVectorOperand();
244e9c79a7aSSanjay Patel   Value *VecBO =
24534e34855SSanjay Patel       Builder.CreateBinOp(cast<BinaryOperator>(&I)->getOpcode(), V0, V1);
246e9c79a7aSSanjay Patel 
24719b62b79SSanjay Patel   // All IR flags are safe to back-propagate because any potential poison
24819b62b79SSanjay Patel   // created in unused vector elements is discarded by the extract.
249e9c79a7aSSanjay Patel   if (auto *VecBOInst = dyn_cast<Instruction>(VecBO))
25019b62b79SSanjay Patel     VecBOInst->copyIRFlags(&I);
251e9c79a7aSSanjay Patel 
252216a37bbSSanjay Patel   Value *NewExt = Builder.CreateExtractElement(VecBO, Ext0->getIndexOperand());
253216a37bbSSanjay Patel   I.replaceAllUsesWith(NewExt);
254*cce625f7SSanjay Patel   NewExt->takeName(&I);
25519b62b79SSanjay Patel }
25619b62b79SSanjay Patel 
257fc445589SSanjay Patel /// Match an instruction with extracted vector operands.
2586bdd531aSSanjay Patel bool VectorCombine::foldExtractExtract(Instruction &I) {
259e9c79a7aSSanjay Patel   // It is not safe to transform things like div, urem, etc. because we may
260e9c79a7aSSanjay Patel   // create undefined behavior when executing those on unknown vector elements.
261e9c79a7aSSanjay Patel   if (!isSafeToSpeculativelyExecute(&I))
262e9c79a7aSSanjay Patel     return false;
263e9c79a7aSSanjay Patel 
264216a37bbSSanjay Patel   Instruction *I0, *I1;
265fc445589SSanjay Patel   CmpInst::Predicate Pred = CmpInst::BAD_ICMP_PREDICATE;
266216a37bbSSanjay Patel   if (!match(&I, m_Cmp(Pred, m_Instruction(I0), m_Instruction(I1))) &&
267216a37bbSSanjay Patel       !match(&I, m_BinOp(m_Instruction(I0), m_Instruction(I1))))
268fc445589SSanjay Patel     return false;
269fc445589SSanjay Patel 
270fc445589SSanjay Patel   Value *V0, *V1;
271fc445589SSanjay Patel   uint64_t C0, C1;
272216a37bbSSanjay Patel   if (!match(I0, m_ExtractElt(m_Value(V0), m_ConstantInt(C0))) ||
273216a37bbSSanjay Patel       !match(I1, m_ExtractElt(m_Value(V1), m_ConstantInt(C1))) ||
274fc445589SSanjay Patel       V0->getType() != V1->getType())
275fc445589SSanjay Patel     return false;
276fc445589SSanjay Patel 
277ce97ce3aSSanjay Patel   // If the scalar value 'I' is going to be re-inserted into a vector, then try
278ce97ce3aSSanjay Patel   // to create an extract to that same element. The extract/insert can be
279ce97ce3aSSanjay Patel   // reduced to a "select shuffle".
280ce97ce3aSSanjay Patel   // TODO: If we add a larger pattern match that starts from an insert, this
281ce97ce3aSSanjay Patel   //       probably becomes unnecessary.
282216a37bbSSanjay Patel   auto *Ext0 = cast<ExtractElementInst>(I0);
283216a37bbSSanjay Patel   auto *Ext1 = cast<ExtractElementInst>(I1);
284ce97ce3aSSanjay Patel   uint64_t InsertIndex = std::numeric_limits<uint64_t>::max();
285ce97ce3aSSanjay Patel   if (I.hasOneUse())
2867eed772aSSanjay Patel     match(I.user_back(),
2877eed772aSSanjay Patel           m_InsertElt(m_Value(), m_Value(), m_ConstantInt(InsertIndex)));
288ce97ce3aSSanjay Patel 
289216a37bbSSanjay Patel   ExtractElementInst *ExtractToChange;
2906bdd531aSSanjay Patel   if (isExtractExtractCheap(Ext0, Ext1, I.getOpcode(), ExtractToChange,
291ce97ce3aSSanjay Patel                             InsertIndex))
292fc445589SSanjay Patel     return false;
293e9c79a7aSSanjay Patel 
294216a37bbSSanjay Patel   if (ExtractToChange) {
295216a37bbSSanjay Patel     unsigned CheapExtractIdx = ExtractToChange == Ext0 ? C1 : C0;
296216a37bbSSanjay Patel     ExtractElementInst *NewExtract =
297216a37bbSSanjay Patel         translateExtract(ExtractToChange, CheapExtractIdx);
298216a37bbSSanjay Patel     if (!NewExtract)
2996d864097SSanjay Patel       return false;
300216a37bbSSanjay Patel     if (ExtractToChange == Ext0)
301216a37bbSSanjay Patel       Ext0 = NewExtract;
302a69158c1SSanjay Patel     else
303216a37bbSSanjay Patel       Ext1 = NewExtract;
304a69158c1SSanjay Patel   }
305e9c79a7aSSanjay Patel 
306e9c79a7aSSanjay Patel   if (Pred != CmpInst::BAD_ICMP_PREDICATE)
307039ff29eSSanjay Patel     foldExtExtCmp(Ext0, Ext1, I);
308e9c79a7aSSanjay Patel   else
309039ff29eSSanjay Patel     foldExtExtBinop(Ext0, Ext1, I);
310e9c79a7aSSanjay Patel 
311e9c79a7aSSanjay Patel   return true;
312fc445589SSanjay Patel }
313fc445589SSanjay Patel 
314bef6e67eSSanjay Patel /// If this is a bitcast of a shuffle, try to bitcast the source vector to the
315bef6e67eSSanjay Patel /// destination type followed by shuffle. This can enable further transforms by
316bef6e67eSSanjay Patel /// moving bitcasts or shuffles together.
3176bdd531aSSanjay Patel bool VectorCombine::foldBitcastShuf(Instruction &I) {
318b6050ca1SSanjay Patel   Value *V;
319b6050ca1SSanjay Patel   ArrayRef<int> Mask;
3207eed772aSSanjay Patel   if (!match(&I, m_BitCast(
3217eed772aSSanjay Patel                      m_OneUse(m_Shuffle(m_Value(V), m_Undef(), m_Mask(Mask))))))
322b6050ca1SSanjay Patel     return false;
323b6050ca1SSanjay Patel 
324bef6e67eSSanjay Patel   // Disallow non-vector casts and length-changing shuffles.
325bef6e67eSSanjay Patel   // TODO: We could allow any shuffle.
3263297e9b7SChristopher Tetreault   auto *DestTy = dyn_cast<VectorType>(I.getType());
3273297e9b7SChristopher Tetreault   auto *SrcTy = cast<VectorType>(V->getType());
3283297e9b7SChristopher Tetreault   if (!DestTy || I.getOperand(0)->getType() != SrcTy)
329b6050ca1SSanjay Patel     return false;
330b6050ca1SSanjay Patel 
331b6050ca1SSanjay Patel   // The new shuffle must not cost more than the old shuffle. The bitcast is
332b6050ca1SSanjay Patel   // moved ahead of the shuffle, so assume that it has the same cost as before.
333b6050ca1SSanjay Patel   if (TTI.getShuffleCost(TargetTransformInfo::SK_PermuteSingleSrc, DestTy) >
334b6050ca1SSanjay Patel       TTI.getShuffleCost(TargetTransformInfo::SK_PermuteSingleSrc, SrcTy))
335b6050ca1SSanjay Patel     return false;
336b6050ca1SSanjay Patel 
337bef6e67eSSanjay Patel   unsigned DestNumElts = DestTy->getNumElements();
338bef6e67eSSanjay Patel   unsigned SrcNumElts = SrcTy->getNumElements();
339b6050ca1SSanjay Patel   SmallVector<int, 16> NewMask;
340bef6e67eSSanjay Patel   if (SrcNumElts <= DestNumElts) {
341bef6e67eSSanjay Patel     // The bitcast is from wide to narrow/equal elements. The shuffle mask can
342bef6e67eSSanjay Patel     // always be expanded to the equivalent form choosing narrower elements.
343b6050ca1SSanjay Patel     assert(DestNumElts % SrcNumElts == 0 && "Unexpected shuffle mask");
344b6050ca1SSanjay Patel     unsigned ScaleFactor = DestNumElts / SrcNumElts;
3451318ddbcSSanjay Patel     narrowShuffleMaskElts(ScaleFactor, Mask, NewMask);
346bef6e67eSSanjay Patel   } else {
347bef6e67eSSanjay Patel     // The bitcast is from narrow elements to wide elements. The shuffle mask
348bef6e67eSSanjay Patel     // must choose consecutive elements to allow casting first.
349bef6e67eSSanjay Patel     assert(SrcNumElts % DestNumElts == 0 && "Unexpected shuffle mask");
350bef6e67eSSanjay Patel     unsigned ScaleFactor = SrcNumElts / DestNumElts;
351bef6e67eSSanjay Patel     if (!widenShuffleMaskElts(ScaleFactor, Mask, NewMask))
352bef6e67eSSanjay Patel       return false;
353bef6e67eSSanjay Patel   }
354bef6e67eSSanjay Patel   // bitcast (shuf V, MaskC) --> shuf (bitcast V), MaskC'
3557aeb41b3SRoman Lebedev   ++NumShufOfBitcast;
356bef6e67eSSanjay Patel   IRBuilder<> Builder(&I);
357bef6e67eSSanjay Patel   Value *CastV = Builder.CreateBitCast(V, DestTy);
3587eed772aSSanjay Patel   Value *Shuf =
3597eed772aSSanjay Patel       Builder.CreateShuffleVector(CastV, UndefValue::get(DestTy), NewMask);
360b6050ca1SSanjay Patel   I.replaceAllUsesWith(Shuf);
361b6050ca1SSanjay Patel   return true;
362b6050ca1SSanjay Patel }
363b6050ca1SSanjay Patel 
364ed67f5e7SSanjay Patel /// Match a vector binop or compare instruction with at least one inserted
365ed67f5e7SSanjay Patel /// scalar operand and convert to scalar binop/cmp followed by insertelement.
3666bdd531aSSanjay Patel bool VectorCombine::scalarizeBinopOrCmp(Instruction &I) {
367ed67f5e7SSanjay Patel   CmpInst::Predicate Pred = CmpInst::BAD_ICMP_PREDICATE;
3685dc4e7c2SSimon Pilgrim   Value *Ins0, *Ins1;
369ed67f5e7SSanjay Patel   if (!match(&I, m_BinOp(m_Value(Ins0), m_Value(Ins1))) &&
370ed67f5e7SSanjay Patel       !match(&I, m_Cmp(Pred, m_Value(Ins0), m_Value(Ins1))))
371ed67f5e7SSanjay Patel     return false;
372ed67f5e7SSanjay Patel 
373ed67f5e7SSanjay Patel   // Do not convert the vector condition of a vector select into a scalar
374ed67f5e7SSanjay Patel   // condition. That may cause problems for codegen because of differences in
375ed67f5e7SSanjay Patel   // boolean formats and register-file transfers.
376ed67f5e7SSanjay Patel   // TODO: Can we account for that in the cost model?
377ed67f5e7SSanjay Patel   bool IsCmp = Pred != CmpInst::Predicate::BAD_ICMP_PREDICATE;
378ed67f5e7SSanjay Patel   if (IsCmp)
379ed67f5e7SSanjay Patel     for (User *U : I.users())
380ed67f5e7SSanjay Patel       if (match(U, m_Select(m_Specific(&I), m_Value(), m_Value())))
3810d2a0b44SSanjay Patel         return false;
3820d2a0b44SSanjay Patel 
3835dc4e7c2SSimon Pilgrim   // Match against one or both scalar values being inserted into constant
3845dc4e7c2SSimon Pilgrim   // vectors:
385ed67f5e7SSanjay Patel   // vec_op VecC0, (inselt VecC1, V1, Index)
386ed67f5e7SSanjay Patel   // vec_op (inselt VecC0, V0, Index), VecC1
387ed67f5e7SSanjay Patel   // vec_op (inselt VecC0, V0, Index), (inselt VecC1, V1, Index)
3880d2a0b44SSanjay Patel   // TODO: Deal with mismatched index constants and variable indexes?
3895dc4e7c2SSimon Pilgrim   Constant *VecC0 = nullptr, *VecC1 = nullptr;
3905dc4e7c2SSimon Pilgrim   Value *V0 = nullptr, *V1 = nullptr;
3915dc4e7c2SSimon Pilgrim   uint64_t Index0 = 0, Index1 = 0;
3927eed772aSSanjay Patel   if (!match(Ins0, m_InsertElt(m_Constant(VecC0), m_Value(V0),
3935dc4e7c2SSimon Pilgrim                                m_ConstantInt(Index0))) &&
3945dc4e7c2SSimon Pilgrim       !match(Ins0, m_Constant(VecC0)))
3955dc4e7c2SSimon Pilgrim     return false;
3965dc4e7c2SSimon Pilgrim   if (!match(Ins1, m_InsertElt(m_Constant(VecC1), m_Value(V1),
3975dc4e7c2SSimon Pilgrim                                m_ConstantInt(Index1))) &&
3985dc4e7c2SSimon Pilgrim       !match(Ins1, m_Constant(VecC1)))
3990d2a0b44SSanjay Patel     return false;
4000d2a0b44SSanjay Patel 
4015dc4e7c2SSimon Pilgrim   bool IsConst0 = !V0;
4025dc4e7c2SSimon Pilgrim   bool IsConst1 = !V1;
4035dc4e7c2SSimon Pilgrim   if (IsConst0 && IsConst1)
4045dc4e7c2SSimon Pilgrim     return false;
4055dc4e7c2SSimon Pilgrim   if (!IsConst0 && !IsConst1 && Index0 != Index1)
4065dc4e7c2SSimon Pilgrim     return false;
4075dc4e7c2SSimon Pilgrim 
4085dc4e7c2SSimon Pilgrim   // Bail for single insertion if it is a load.
4095dc4e7c2SSimon Pilgrim   // TODO: Handle this once getVectorInstrCost can cost for load/stores.
4105dc4e7c2SSimon Pilgrim   auto *I0 = dyn_cast_or_null<Instruction>(V0);
4115dc4e7c2SSimon Pilgrim   auto *I1 = dyn_cast_or_null<Instruction>(V1);
4125dc4e7c2SSimon Pilgrim   if ((IsConst0 && I1 && I1->mayReadFromMemory()) ||
4135dc4e7c2SSimon Pilgrim       (IsConst1 && I0 && I0->mayReadFromMemory()))
4145dc4e7c2SSimon Pilgrim     return false;
4155dc4e7c2SSimon Pilgrim 
4165dc4e7c2SSimon Pilgrim   uint64_t Index = IsConst0 ? Index1 : Index0;
4175dc4e7c2SSimon Pilgrim   Type *ScalarTy = IsConst0 ? V1->getType() : V0->getType();
4180d2a0b44SSanjay Patel   Type *VecTy = I.getType();
4195dc4e7c2SSimon Pilgrim   assert(VecTy->isVectorTy() &&
4205dc4e7c2SSimon Pilgrim          (IsConst0 || IsConst1 || V0->getType() == V1->getType()) &&
421741e20f3SSanjay Patel          (ScalarTy->isIntegerTy() || ScalarTy->isFloatingPointTy() ||
422741e20f3SSanjay Patel           ScalarTy->isPointerTy()) &&
423741e20f3SSanjay Patel          "Unexpected types for insert element into binop or cmp");
4240d2a0b44SSanjay Patel 
425ed67f5e7SSanjay Patel   unsigned Opcode = I.getOpcode();
426ed67f5e7SSanjay Patel   int ScalarOpCost, VectorOpCost;
427ed67f5e7SSanjay Patel   if (IsCmp) {
428ed67f5e7SSanjay Patel     ScalarOpCost = TTI.getCmpSelInstrCost(Opcode, ScalarTy);
429ed67f5e7SSanjay Patel     VectorOpCost = TTI.getCmpSelInstrCost(Opcode, VecTy);
430ed67f5e7SSanjay Patel   } else {
431ed67f5e7SSanjay Patel     ScalarOpCost = TTI.getArithmeticInstrCost(Opcode, ScalarTy);
432ed67f5e7SSanjay Patel     VectorOpCost = TTI.getArithmeticInstrCost(Opcode, VecTy);
433ed67f5e7SSanjay Patel   }
4340d2a0b44SSanjay Patel 
4350d2a0b44SSanjay Patel   // Get cost estimate for the insert element. This cost will factor into
4360d2a0b44SSanjay Patel   // both sequences.
4370d2a0b44SSanjay Patel   int InsertCost =
4380d2a0b44SSanjay Patel       TTI.getVectorInstrCost(Instruction::InsertElement, VecTy, Index);
4395dc4e7c2SSimon Pilgrim   int OldCost = (IsConst0 ? 0 : InsertCost) + (IsConst1 ? 0 : InsertCost) +
4405dc4e7c2SSimon Pilgrim                 VectorOpCost;
4415f730b64SSanjay Patel   int NewCost = ScalarOpCost + InsertCost +
4425dc4e7c2SSimon Pilgrim                 (IsConst0 ? 0 : !Ins0->hasOneUse() * InsertCost) +
4435dc4e7c2SSimon Pilgrim                 (IsConst1 ? 0 : !Ins1->hasOneUse() * InsertCost);
4440d2a0b44SSanjay Patel 
4450d2a0b44SSanjay Patel   // We want to scalarize unless the vector variant actually has lower cost.
4460d2a0b44SSanjay Patel   if (OldCost < NewCost)
4470d2a0b44SSanjay Patel     return false;
4480d2a0b44SSanjay Patel 
449ed67f5e7SSanjay Patel   // vec_op (inselt VecC0, V0, Index), (inselt VecC1, V1, Index) -->
450ed67f5e7SSanjay Patel   // inselt NewVecC, (scalar_op V0, V1), Index
451ed67f5e7SSanjay Patel   if (IsCmp)
452ed67f5e7SSanjay Patel     ++NumScalarCmp;
453ed67f5e7SSanjay Patel   else
4540d2a0b44SSanjay Patel     ++NumScalarBO;
4555dc4e7c2SSimon Pilgrim 
4565dc4e7c2SSimon Pilgrim   // For constant cases, extract the scalar element, this should constant fold.
457ed67f5e7SSanjay Patel   IRBuilder<> Builder(&I);
4585dc4e7c2SSimon Pilgrim   if (IsConst0)
4595dc4e7c2SSimon Pilgrim     V0 = ConstantExpr::getExtractElement(VecC0, Builder.getInt64(Index));
4605dc4e7c2SSimon Pilgrim   if (IsConst1)
4615dc4e7c2SSimon Pilgrim     V1 = ConstantExpr::getExtractElement(VecC1, Builder.getInt64(Index));
4625dc4e7c2SSimon Pilgrim 
463ed67f5e7SSanjay Patel   Value *Scalar =
46446a285adSSanjay Patel       IsCmp ? Builder.CreateCmp(Pred, V0, V1)
465ed67f5e7SSanjay Patel             : Builder.CreateBinOp((Instruction::BinaryOps)Opcode, V0, V1);
466ed67f5e7SSanjay Patel 
467ed67f5e7SSanjay Patel   Scalar->setName(I.getName() + ".scalar");
4680d2a0b44SSanjay Patel 
4690d2a0b44SSanjay Patel   // All IR flags are safe to back-propagate. There is no potential for extra
4700d2a0b44SSanjay Patel   // poison to be created by the scalar instruction.
4710d2a0b44SSanjay Patel   if (auto *ScalarInst = dyn_cast<Instruction>(Scalar))
4720d2a0b44SSanjay Patel     ScalarInst->copyIRFlags(&I);
4730d2a0b44SSanjay Patel 
4740d2a0b44SSanjay Patel   // Fold the vector constants in the original vectors into a new base vector.
475ed67f5e7SSanjay Patel   Constant *NewVecC = IsCmp ? ConstantExpr::getCompare(Pred, VecC0, VecC1)
476ed67f5e7SSanjay Patel                             : ConstantExpr::get(Opcode, VecC0, VecC1);
4770d2a0b44SSanjay Patel   Value *Insert = Builder.CreateInsertElement(NewVecC, Scalar, Index);
4780d2a0b44SSanjay Patel   I.replaceAllUsesWith(Insert);
4790d2a0b44SSanjay Patel   Insert->takeName(&I);
4800d2a0b44SSanjay Patel   return true;
4810d2a0b44SSanjay Patel }
4820d2a0b44SSanjay Patel 
483a17f03bdSSanjay Patel /// This is the entry point for all transforms. Pass manager differences are
484a17f03bdSSanjay Patel /// handled in the callers of this function.
4856bdd531aSSanjay Patel bool VectorCombine::run() {
48625c6544fSSanjay Patel   if (DisableVectorCombine)
48725c6544fSSanjay Patel     return false;
48825c6544fSSanjay Patel 
489a17f03bdSSanjay Patel   bool MadeChange = false;
490a17f03bdSSanjay Patel   for (BasicBlock &BB : F) {
491a17f03bdSSanjay Patel     // Ignore unreachable basic blocks.
492a17f03bdSSanjay Patel     if (!DT.isReachableFromEntry(&BB))
493a17f03bdSSanjay Patel       continue;
494a17f03bdSSanjay Patel     // Do not delete instructions under here and invalidate the iterator.
49581e9ede3SSanjay Patel     // Walk the block forwards to enable simple iterative chains of transforms.
496a17f03bdSSanjay Patel     // TODO: It could be more efficient to remove dead instructions
497a17f03bdSSanjay Patel     //       iteratively in this loop rather than waiting until the end.
49881e9ede3SSanjay Patel     for (Instruction &I : BB) {
499fc3cc8a4SSanjay Patel       if (isa<DbgInfoIntrinsic>(I))
500fc3cc8a4SSanjay Patel         continue;
5016bdd531aSSanjay Patel       MadeChange |= foldExtractExtract(I);
5026bdd531aSSanjay Patel       MadeChange |= foldBitcastShuf(I);
5036bdd531aSSanjay Patel       MadeChange |= scalarizeBinopOrCmp(I);
504a17f03bdSSanjay Patel     }
505fc3cc8a4SSanjay Patel   }
506a17f03bdSSanjay Patel 
507a17f03bdSSanjay Patel   // We're done with transforms, so remove dead instructions.
508a17f03bdSSanjay Patel   if (MadeChange)
509a17f03bdSSanjay Patel     for (BasicBlock &BB : F)
510a17f03bdSSanjay Patel       SimplifyInstructionsInBlock(&BB);
511a17f03bdSSanjay Patel 
512a17f03bdSSanjay Patel   return MadeChange;
513a17f03bdSSanjay Patel }
514a17f03bdSSanjay Patel 
515a17f03bdSSanjay Patel // Pass manager boilerplate below here.
516a17f03bdSSanjay Patel 
517a17f03bdSSanjay Patel namespace {
518a17f03bdSSanjay Patel class VectorCombineLegacyPass : public FunctionPass {
519a17f03bdSSanjay Patel public:
520a17f03bdSSanjay Patel   static char ID;
521a17f03bdSSanjay Patel   VectorCombineLegacyPass() : FunctionPass(ID) {
522a17f03bdSSanjay Patel     initializeVectorCombineLegacyPassPass(*PassRegistry::getPassRegistry());
523a17f03bdSSanjay Patel   }
524a17f03bdSSanjay Patel 
525a17f03bdSSanjay Patel   void getAnalysisUsage(AnalysisUsage &AU) const override {
526a17f03bdSSanjay Patel     AU.addRequired<DominatorTreeWrapperPass>();
527a17f03bdSSanjay Patel     AU.addRequired<TargetTransformInfoWrapperPass>();
528a17f03bdSSanjay Patel     AU.setPreservesCFG();
529a17f03bdSSanjay Patel     AU.addPreserved<DominatorTreeWrapperPass>();
530a17f03bdSSanjay Patel     AU.addPreserved<GlobalsAAWrapperPass>();
531024098aeSSanjay Patel     AU.addPreserved<AAResultsWrapperPass>();
532024098aeSSanjay Patel     AU.addPreserved<BasicAAWrapperPass>();
533a17f03bdSSanjay Patel     FunctionPass::getAnalysisUsage(AU);
534a17f03bdSSanjay Patel   }
535a17f03bdSSanjay Patel 
536a17f03bdSSanjay Patel   bool runOnFunction(Function &F) override {
537a17f03bdSSanjay Patel     if (skipFunction(F))
538a17f03bdSSanjay Patel       return false;
539a17f03bdSSanjay Patel     auto &TTI = getAnalysis<TargetTransformInfoWrapperPass>().getTTI(F);
540a17f03bdSSanjay Patel     auto &DT = getAnalysis<DominatorTreeWrapperPass>().getDomTree();
5416bdd531aSSanjay Patel     VectorCombine Combiner(F, TTI, DT);
5426bdd531aSSanjay Patel     return Combiner.run();
543a17f03bdSSanjay Patel   }
544a17f03bdSSanjay Patel };
545a17f03bdSSanjay Patel } // namespace
546a17f03bdSSanjay Patel 
547a17f03bdSSanjay Patel char VectorCombineLegacyPass::ID = 0;
548a17f03bdSSanjay Patel INITIALIZE_PASS_BEGIN(VectorCombineLegacyPass, "vector-combine",
549a17f03bdSSanjay Patel                       "Optimize scalar/vector ops", false,
550a17f03bdSSanjay Patel                       false)
551a17f03bdSSanjay Patel INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)
552a17f03bdSSanjay Patel INITIALIZE_PASS_END(VectorCombineLegacyPass, "vector-combine",
553a17f03bdSSanjay Patel                     "Optimize scalar/vector ops", false, false)
554a17f03bdSSanjay Patel Pass *llvm::createVectorCombinePass() {
555a17f03bdSSanjay Patel   return new VectorCombineLegacyPass();
556a17f03bdSSanjay Patel }
557a17f03bdSSanjay Patel 
558a17f03bdSSanjay Patel PreservedAnalyses VectorCombinePass::run(Function &F,
559a17f03bdSSanjay Patel                                          FunctionAnalysisManager &FAM) {
560a17f03bdSSanjay Patel   TargetTransformInfo &TTI = FAM.getResult<TargetIRAnalysis>(F);
561a17f03bdSSanjay Patel   DominatorTree &DT = FAM.getResult<DominatorTreeAnalysis>(F);
5626bdd531aSSanjay Patel   VectorCombine Combiner(F, TTI, DT);
5636bdd531aSSanjay Patel   if (!Combiner.run())
564a17f03bdSSanjay Patel     return PreservedAnalyses::all();
565a17f03bdSSanjay Patel   PreservedAnalyses PA;
566a17f03bdSSanjay Patel   PA.preserveSet<CFGAnalyses>();
567a17f03bdSSanjay Patel   PA.preserve<GlobalsAA>();
568024098aeSSanjay Patel   PA.preserve<AAManager>();
569024098aeSSanjay Patel   PA.preserve<BasicAA>();
570a17f03bdSSanjay Patel   return PA;
571a17f03bdSSanjay Patel }
572