1a17f03bdSSanjay Patel //===------- VectorCombine.cpp - Optimize partial vector operations -------===//
2a17f03bdSSanjay Patel //
3a17f03bdSSanjay Patel // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4a17f03bdSSanjay Patel // See https://llvm.org/LICENSE.txt for license information.
5a17f03bdSSanjay Patel // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6a17f03bdSSanjay Patel //
7a17f03bdSSanjay Patel //===----------------------------------------------------------------------===//
8a17f03bdSSanjay Patel //
9a17f03bdSSanjay Patel // This pass optimizes scalar/vector interactions using target cost models. The
10a17f03bdSSanjay Patel // transforms implemented here may not fit in traditional loop-based or SLP
11a17f03bdSSanjay Patel // vectorization passes.
12a17f03bdSSanjay Patel //
13a17f03bdSSanjay Patel //===----------------------------------------------------------------------===//
14a17f03bdSSanjay Patel 
15a17f03bdSSanjay Patel #include "llvm/Transforms/Vectorize/VectorCombine.h"
16a17f03bdSSanjay Patel #include "llvm/ADT/Statistic.h"
175006e551SSimon Pilgrim #include "llvm/Analysis/BasicAliasAnalysis.h"
18a17f03bdSSanjay Patel #include "llvm/Analysis/GlobalsModRef.h"
1943bdac29SSanjay Patel #include "llvm/Analysis/Loads.h"
20a17f03bdSSanjay Patel #include "llvm/Analysis/TargetTransformInfo.h"
2119b62b79SSanjay Patel #include "llvm/Analysis/ValueTracking.h"
22b6050ca1SSanjay Patel #include "llvm/Analysis/VectorUtils.h"
23a17f03bdSSanjay Patel #include "llvm/IR/Dominators.h"
24a17f03bdSSanjay Patel #include "llvm/IR/Function.h"
25a17f03bdSSanjay Patel #include "llvm/IR/IRBuilder.h"
26a17f03bdSSanjay Patel #include "llvm/IR/PatternMatch.h"
27a17f03bdSSanjay Patel #include "llvm/InitializePasses.h"
28a17f03bdSSanjay Patel #include "llvm/Pass.h"
2925c6544fSSanjay Patel #include "llvm/Support/CommandLine.h"
30a17f03bdSSanjay Patel #include "llvm/Transforms/Utils/Local.h"
315006e551SSimon Pilgrim #include "llvm/Transforms/Vectorize.h"
32a17f03bdSSanjay Patel 
33a17f03bdSSanjay Patel using namespace llvm;
34a17f03bdSSanjay Patel using namespace llvm::PatternMatch;
35a17f03bdSSanjay Patel 
36a17f03bdSSanjay Patel #define DEBUG_TYPE "vector-combine"
3743bdac29SSanjay Patel STATISTIC(NumVecLoad, "Number of vector loads formed");
38a17f03bdSSanjay Patel STATISTIC(NumVecCmp, "Number of vector compares formed");
3919b62b79SSanjay Patel STATISTIC(NumVecBO, "Number of vector binops formed");
40b6315aeeSSanjay Patel STATISTIC(NumVecCmpBO, "Number of vector compare + binop formed");
417aeb41b3SRoman Lebedev STATISTIC(NumShufOfBitcast, "Number of shuffles moved after bitcast");
420d2a0b44SSanjay Patel STATISTIC(NumScalarBO, "Number of scalar binops formed");
43ed67f5e7SSanjay Patel STATISTIC(NumScalarCmp, "Number of scalar compares formed");
44a17f03bdSSanjay Patel 
4525c6544fSSanjay Patel static cl::opt<bool> DisableVectorCombine(
4625c6544fSSanjay Patel     "disable-vector-combine", cl::init(false), cl::Hidden,
4725c6544fSSanjay Patel     cl::desc("Disable all vector combine transforms"));
4825c6544fSSanjay Patel 
49a69158c1SSanjay Patel static cl::opt<bool> DisableBinopExtractShuffle(
50a69158c1SSanjay Patel     "disable-binop-extract-shuffle", cl::init(false), cl::Hidden,
51a69158c1SSanjay Patel     cl::desc("Disable binop extract to shuffle transforms"));
52a69158c1SSanjay Patel 
53a0f96741SSanjay Patel static const unsigned InvalidIndex = std::numeric_limits<unsigned>::max();
54a0f96741SSanjay Patel 
55b4447054SBenjamin Kramer namespace {
566bdd531aSSanjay Patel class VectorCombine {
576bdd531aSSanjay Patel public:
586bdd531aSSanjay Patel   VectorCombine(Function &F, const TargetTransformInfo &TTI,
596bdd531aSSanjay Patel                 const DominatorTree &DT)
60de65b356SSanjay Patel       : F(F), Builder(F.getContext()), TTI(TTI), DT(DT) {}
616bdd531aSSanjay Patel 
626bdd531aSSanjay Patel   bool run();
636bdd531aSSanjay Patel 
646bdd531aSSanjay Patel private:
656bdd531aSSanjay Patel   Function &F;
66de65b356SSanjay Patel   IRBuilder<> Builder;
676bdd531aSSanjay Patel   const TargetTransformInfo &TTI;
686bdd531aSSanjay Patel   const DominatorTree &DT;
696bdd531aSSanjay Patel 
7043bdac29SSanjay Patel   bool vectorizeLoadInsert(Instruction &I);
713b95d834SSanjay Patel   ExtractElementInst *getShuffleExtract(ExtractElementInst *Ext0,
723b95d834SSanjay Patel                                         ExtractElementInst *Ext1,
733b95d834SSanjay Patel                                         unsigned PreferredExtractIndex) const;
746bdd531aSSanjay Patel   bool isExtractExtractCheap(ExtractElementInst *Ext0, ExtractElementInst *Ext1,
756bdd531aSSanjay Patel                              unsigned Opcode,
766bdd531aSSanjay Patel                              ExtractElementInst *&ConvertToShuffle,
776bdd531aSSanjay Patel                              unsigned PreferredExtractIndex);
78de65b356SSanjay Patel   void foldExtExtCmp(ExtractElementInst *Ext0, ExtractElementInst *Ext1,
79de65b356SSanjay Patel                      Instruction &I);
80de65b356SSanjay Patel   void foldExtExtBinop(ExtractElementInst *Ext0, ExtractElementInst *Ext1,
81de65b356SSanjay Patel                        Instruction &I);
826bdd531aSSanjay Patel   bool foldExtractExtract(Instruction &I);
836bdd531aSSanjay Patel   bool foldBitcastShuf(Instruction &I);
846bdd531aSSanjay Patel   bool scalarizeBinopOrCmp(Instruction &I);
85b6315aeeSSanjay Patel   bool foldExtractedCmps(Instruction &I);
866bdd531aSSanjay Patel };
87b4447054SBenjamin Kramer } // namespace
88a69158c1SSanjay Patel 
8998c2f4eeSSanjay Patel static void replaceValue(Value &Old, Value &New) {
9098c2f4eeSSanjay Patel   Old.replaceAllUsesWith(&New);
9198c2f4eeSSanjay Patel   New.takeName(&Old);
9298c2f4eeSSanjay Patel }
9398c2f4eeSSanjay Patel 
9443bdac29SSanjay Patel bool VectorCombine::vectorizeLoadInsert(Instruction &I) {
95b2ef2640SSanjay Patel   // Match insert into fixed vector of scalar value.
96*47aaa99cSSanjay Patel   // TODO: Handle non-zero insert index.
97ddd9575dSSanjay Patel   auto *Ty = dyn_cast<FixedVectorType>(I.getType());
9843bdac29SSanjay Patel   Value *Scalar;
9948a23bccSSanjay Patel   if (!Ty || !match(&I, m_InsertElt(m_Undef(), m_Value(Scalar), m_ZeroInt())) ||
10048a23bccSSanjay Patel       !Scalar->hasOneUse())
10143bdac29SSanjay Patel     return false;
102ddd9575dSSanjay Patel 
103b2ef2640SSanjay Patel   // Optionally match an extract from another vector.
104b2ef2640SSanjay Patel   Value *X;
105b2ef2640SSanjay Patel   bool HasExtract = match(Scalar, m_ExtractElt(m_Value(X), m_ZeroInt()));
106b2ef2640SSanjay Patel   if (!HasExtract)
107b2ef2640SSanjay Patel     X = Scalar;
108b2ef2640SSanjay Patel 
109b2ef2640SSanjay Patel   // Match source value as load of scalar or vector.
1104452cc40SFangrui Song   // Do not vectorize scalar load (widening) if atomic/volatile or under
1114452cc40SFangrui Song   // asan/hwasan/memtag/tsan. The widened load may load data from dirty regions
1124452cc40SFangrui Song   // or create data races non-existent in the source.
113b2ef2640SSanjay Patel   auto *Load = dyn_cast<LoadInst>(X);
114b2ef2640SSanjay Patel   if (!Load || !Load->isSimple() || !Load->hasOneUse() ||
1154452cc40SFangrui Song       Load->getFunction()->hasFnAttribute(Attribute::SanitizeMemTag) ||
1164452cc40SFangrui Song       mustSuppressSpeculation(*Load))
11743bdac29SSanjay Patel     return false;
11843bdac29SSanjay Patel 
11912b684aeSSanjay Patel   const DataLayout &DL = I.getModule()->getDataLayout();
12012b684aeSSanjay Patel   Value *SrcPtr = Load->getPointerOperand()->stripPointerCasts();
12112b684aeSSanjay Patel   assert(isa<PointerType>(SrcPtr->getType()) && "Expected a pointer type");
122c36c0fabSArtem Belevich 
123c36c0fabSArtem Belevich   // If original AS != Load's AS, we can't bitcast the original pointer and have
124c36c0fabSArtem Belevich   // to use Load's operand instead. Ideally we would want to strip pointer casts
125c36c0fabSArtem Belevich   // without changing AS, but there's no API to do that ATM.
12612b684aeSSanjay Patel   unsigned AS = Load->getPointerAddressSpace();
12712b684aeSSanjay Patel   if (AS != SrcPtr->getType()->getPointerAddressSpace())
12812b684aeSSanjay Patel     SrcPtr = Load->getPointerOperand();
12943bdac29SSanjay Patel 
130*47aaa99cSSanjay Patel   // We are potentially transforming byte-sized (8-bit) memory accesses, so make
131*47aaa99cSSanjay Patel   // sure we have all of our type-based constraints in place for this target.
132ddd9575dSSanjay Patel   Type *ScalarTy = Scalar->getType();
13343bdac29SSanjay Patel   uint64_t ScalarSize = ScalarTy->getPrimitiveSizeInBits();
134ddd9575dSSanjay Patel   unsigned MinVectorSize = TTI.getMinVectorRegisterBitWidth();
135*47aaa99cSSanjay Patel   if (!ScalarSize || !MinVectorSize || MinVectorSize % ScalarSize != 0 ||
136*47aaa99cSSanjay Patel       ScalarSize % 8 != 0)
13743bdac29SSanjay Patel     return false;
13843bdac29SSanjay Patel 
13943bdac29SSanjay Patel   // Check safety of replacing the scalar load with a larger vector load.
140aaaf0ec7SSanjay Patel   // We use minimal alignment (maximum flexibility) because we only care about
141aaaf0ec7SSanjay Patel   // the dereferenceable region. When calculating cost and creating a new op,
142aaaf0ec7SSanjay Patel   // we may use a larger value based on alignment attributes.
1438fb05593SSanjay Patel   unsigned MinVecNumElts = MinVectorSize / ScalarSize;
1448fb05593SSanjay Patel   auto *MinVecTy = VectorType::get(ScalarTy, MinVecNumElts, false);
145*47aaa99cSSanjay Patel   unsigned OffsetEltIndex = 0;
146*47aaa99cSSanjay Patel   Align Alignment = Load->getAlign();
147*47aaa99cSSanjay Patel   if (!isSafeToLoadUnconditionally(SrcPtr, MinVecTy, Align(1), DL, Load, &DT)) {
148*47aaa99cSSanjay Patel     // It is not safe to load directly from the pointer, but we can still peek
149*47aaa99cSSanjay Patel     // through gep offsets and check if it safe to load from a base address with
150*47aaa99cSSanjay Patel     // updated alignment. If it is, we can shuffle the element(s) into place
151*47aaa99cSSanjay Patel     // after loading.
152*47aaa99cSSanjay Patel     unsigned OffsetBitWidth = DL.getIndexTypeSizeInBits(SrcPtr->getType());
153*47aaa99cSSanjay Patel     APInt Offset(OffsetBitWidth, 0);
154*47aaa99cSSanjay Patel     SrcPtr = SrcPtr->stripAndAccumulateInBoundsConstantOffsets(DL, Offset);
155*47aaa99cSSanjay Patel 
156*47aaa99cSSanjay Patel     // We want to shuffle the result down from a high element of a vector, so
157*47aaa99cSSanjay Patel     // the offset must be positive.
158*47aaa99cSSanjay Patel     if (Offset.isNegative())
159*47aaa99cSSanjay Patel       return false;
160*47aaa99cSSanjay Patel 
161*47aaa99cSSanjay Patel     // The offset must be a multiple of the scalar element to shuffle cleanly
162*47aaa99cSSanjay Patel     // in the element's size.
163*47aaa99cSSanjay Patel     uint64_t ScalarSizeInBytes = ScalarSize / 8;
164*47aaa99cSSanjay Patel     if (Offset.urem(ScalarSizeInBytes) != 0)
165*47aaa99cSSanjay Patel       return false;
166*47aaa99cSSanjay Patel 
167*47aaa99cSSanjay Patel     // If we load MinVecNumElts, will our target element still be loaded?
168*47aaa99cSSanjay Patel     OffsetEltIndex = Offset.udiv(ScalarSizeInBytes).getZExtValue();
169*47aaa99cSSanjay Patel     if (OffsetEltIndex >= MinVecNumElts)
170*47aaa99cSSanjay Patel       return false;
171*47aaa99cSSanjay Patel 
172aaaf0ec7SSanjay Patel     if (!isSafeToLoadUnconditionally(SrcPtr, MinVecTy, Align(1), DL, Load, &DT))
17343bdac29SSanjay Patel       return false;
17443bdac29SSanjay Patel 
175*47aaa99cSSanjay Patel     // Update alignment with offset value. Note that the offset could be negated
176*47aaa99cSSanjay Patel     // to more accurately represent "(new) SrcPtr - Offset = (old) SrcPtr", but
177*47aaa99cSSanjay Patel     // negation does not change the result of the alignment calculation.
178*47aaa99cSSanjay Patel     Alignment = commonAlignment(Alignment, Offset.getZExtValue());
179*47aaa99cSSanjay Patel   }
180*47aaa99cSSanjay Patel 
181b2ef2640SSanjay Patel   // Original pattern: insertelt undef, load [free casts of] PtrOp, 0
18238ebc1a1SSanjay Patel   // Use the greater of the alignment on the load or its source pointer.
183*47aaa99cSSanjay Patel   Alignment = std::max(SrcPtr->getPointerAlignment(DL), Alignment);
184b2ef2640SSanjay Patel   Type *LoadTy = Load->getType();
185b2ef2640SSanjay Patel   int OldCost = TTI.getMemoryOpCost(Instruction::Load, LoadTy, Alignment, AS);
1868fb05593SSanjay Patel   APInt DemandedElts = APInt::getOneBitSet(MinVecNumElts, 0);
187b2ef2640SSanjay Patel   OldCost += TTI.getScalarizationOverhead(MinVecTy, DemandedElts,
188b2ef2640SSanjay Patel                                           /* Insert */ true, HasExtract);
18943bdac29SSanjay Patel 
19043bdac29SSanjay Patel   // New pattern: load VecPtr
1918fb05593SSanjay Patel   int NewCost = TTI.getMemoryOpCost(Instruction::Load, MinVecTy, Alignment, AS);
192*47aaa99cSSanjay Patel   // Optionally, we are shuffling the loaded vector element(s) into place.
193*47aaa99cSSanjay Patel   if (OffsetEltIndex)
194*47aaa99cSSanjay Patel     NewCost += TTI.getShuffleCost(TTI::SK_PermuteSingleSrc, MinVecTy);
19543bdac29SSanjay Patel 
19643bdac29SSanjay Patel   // We can aggressively convert to the vector form because the backend can
19743bdac29SSanjay Patel   // invert this transform if it does not result in a performance win.
19843bdac29SSanjay Patel   if (OldCost < NewCost)
19943bdac29SSanjay Patel     return false;
20043bdac29SSanjay Patel 
20143bdac29SSanjay Patel   // It is safe and potentially profitable to load a vector directly:
20243bdac29SSanjay Patel   // inselt undef, load Scalar, 0 --> load VecPtr
20343bdac29SSanjay Patel   IRBuilder<> Builder(Load);
20412b684aeSSanjay Patel   Value *CastedPtr = Builder.CreateBitCast(SrcPtr, MinVecTy->getPointerTo(AS));
2058fb05593SSanjay Patel   Value *VecLd = Builder.CreateAlignedLoad(MinVecTy, CastedPtr, Alignment);
2068fb05593SSanjay Patel 
207d399f870SSanjay Patel   // Set everything but element 0 to undef to prevent poison from propagating
208d399f870SSanjay Patel   // from the extra loaded memory. This will also optionally shrink/grow the
209d399f870SSanjay Patel   // vector from the loaded size to the output size.
210*47aaa99cSSanjay Patel   // We assume this operation has no cost in codegen if there was no offset.
211d399f870SSanjay Patel   // Note that we could use freeze to avoid poison problems, but then we might
212d399f870SSanjay Patel   // still need a shuffle to change the vector size.
2138fb05593SSanjay Patel   unsigned OutputNumElts = Ty->getNumElements();
2148fb05593SSanjay Patel   SmallVector<int, 16> Mask(OutputNumElts, UndefMaskElem);
215*47aaa99cSSanjay Patel   assert(OffsetEltIndex < MinVecNumElts && "Address offset too big");
216*47aaa99cSSanjay Patel   Mask[0] = OffsetEltIndex;
2171e6b240dSSanjay Patel   VecLd = Builder.CreateShuffleVector(VecLd, Mask);
218d399f870SSanjay Patel 
21943bdac29SSanjay Patel   replaceValue(I, *VecLd);
22043bdac29SSanjay Patel   ++NumVecLoad;
22143bdac29SSanjay Patel   return true;
22243bdac29SSanjay Patel }
22343bdac29SSanjay Patel 
2243b95d834SSanjay Patel /// Determine which, if any, of the inputs should be replaced by a shuffle
2253b95d834SSanjay Patel /// followed by extract from a different index.
2263b95d834SSanjay Patel ExtractElementInst *VectorCombine::getShuffleExtract(
2273b95d834SSanjay Patel     ExtractElementInst *Ext0, ExtractElementInst *Ext1,
2283b95d834SSanjay Patel     unsigned PreferredExtractIndex = InvalidIndex) const {
2293b95d834SSanjay Patel   assert(isa<ConstantInt>(Ext0->getIndexOperand()) &&
2303b95d834SSanjay Patel          isa<ConstantInt>(Ext1->getIndexOperand()) &&
2313b95d834SSanjay Patel          "Expected constant extract indexes");
2323b95d834SSanjay Patel 
2333b95d834SSanjay Patel   unsigned Index0 = cast<ConstantInt>(Ext0->getIndexOperand())->getZExtValue();
2343b95d834SSanjay Patel   unsigned Index1 = cast<ConstantInt>(Ext1->getIndexOperand())->getZExtValue();
2353b95d834SSanjay Patel 
2363b95d834SSanjay Patel   // If the extract indexes are identical, no shuffle is needed.
2373b95d834SSanjay Patel   if (Index0 == Index1)
2383b95d834SSanjay Patel     return nullptr;
2393b95d834SSanjay Patel 
2403b95d834SSanjay Patel   Type *VecTy = Ext0->getVectorOperand()->getType();
2413b95d834SSanjay Patel   assert(VecTy == Ext1->getVectorOperand()->getType() && "Need matching types");
2423b95d834SSanjay Patel   int Cost0 = TTI.getVectorInstrCost(Ext0->getOpcode(), VecTy, Index0);
2433b95d834SSanjay Patel   int Cost1 = TTI.getVectorInstrCost(Ext1->getOpcode(), VecTy, Index1);
2443b95d834SSanjay Patel 
2453b95d834SSanjay Patel   // We are extracting from 2 different indexes, so one operand must be shuffled
2463b95d834SSanjay Patel   // before performing a vector operation and/or extract. The more expensive
2473b95d834SSanjay Patel   // extract will be replaced by a shuffle.
2483b95d834SSanjay Patel   if (Cost0 > Cost1)
2493b95d834SSanjay Patel     return Ext0;
2503b95d834SSanjay Patel   if (Cost1 > Cost0)
2513b95d834SSanjay Patel     return Ext1;
2523b95d834SSanjay Patel 
2533b95d834SSanjay Patel   // If the costs are equal and there is a preferred extract index, shuffle the
2543b95d834SSanjay Patel   // opposite operand.
2553b95d834SSanjay Patel   if (PreferredExtractIndex == Index0)
2563b95d834SSanjay Patel     return Ext1;
2573b95d834SSanjay Patel   if (PreferredExtractIndex == Index1)
2583b95d834SSanjay Patel     return Ext0;
2593b95d834SSanjay Patel 
2603b95d834SSanjay Patel   // Otherwise, replace the extract with the higher index.
2613b95d834SSanjay Patel   return Index0 > Index1 ? Ext0 : Ext1;
2623b95d834SSanjay Patel }
2633b95d834SSanjay Patel 
264a69158c1SSanjay Patel /// Compare the relative costs of 2 extracts followed by scalar operation vs.
265a69158c1SSanjay Patel /// vector operation(s) followed by extract. Return true if the existing
266a69158c1SSanjay Patel /// instructions are cheaper than a vector alternative. Otherwise, return false
267a69158c1SSanjay Patel /// and if one of the extracts should be transformed to a shufflevector, set
268a69158c1SSanjay Patel /// \p ConvertToShuffle to that extract instruction.
2696bdd531aSSanjay Patel bool VectorCombine::isExtractExtractCheap(ExtractElementInst *Ext0,
2706bdd531aSSanjay Patel                                           ExtractElementInst *Ext1,
2716bdd531aSSanjay Patel                                           unsigned Opcode,
272216a37bbSSanjay Patel                                           ExtractElementInst *&ConvertToShuffle,
273ce97ce3aSSanjay Patel                                           unsigned PreferredExtractIndex) {
2744fa63fd4SAustin Kerbow   assert(isa<ConstantInt>(Ext0->getOperand(1)) &&
275a69158c1SSanjay Patel          isa<ConstantInt>(Ext1->getOperand(1)) &&
276a69158c1SSanjay Patel          "Expected constant extract indexes");
27734e34855SSanjay Patel   Type *ScalarTy = Ext0->getType();
278e3056ae9SSam Parker   auto *VecTy = cast<VectorType>(Ext0->getOperand(0)->getType());
27934e34855SSanjay Patel   int ScalarOpCost, VectorOpCost;
28034e34855SSanjay Patel 
28134e34855SSanjay Patel   // Get cost estimates for scalar and vector versions of the operation.
28234e34855SSanjay Patel   bool IsBinOp = Instruction::isBinaryOp(Opcode);
28334e34855SSanjay Patel   if (IsBinOp) {
28434e34855SSanjay Patel     ScalarOpCost = TTI.getArithmeticInstrCost(Opcode, ScalarTy);
28534e34855SSanjay Patel     VectorOpCost = TTI.getArithmeticInstrCost(Opcode, VecTy);
28634e34855SSanjay Patel   } else {
28734e34855SSanjay Patel     assert((Opcode == Instruction::ICmp || Opcode == Instruction::FCmp) &&
28834e34855SSanjay Patel            "Expected a compare");
28934e34855SSanjay Patel     ScalarOpCost = TTI.getCmpSelInstrCost(Opcode, ScalarTy,
29034e34855SSanjay Patel                                           CmpInst::makeCmpResultType(ScalarTy));
29134e34855SSanjay Patel     VectorOpCost = TTI.getCmpSelInstrCost(Opcode, VecTy,
29234e34855SSanjay Patel                                           CmpInst::makeCmpResultType(VecTy));
29334e34855SSanjay Patel   }
29434e34855SSanjay Patel 
295a69158c1SSanjay Patel   // Get cost estimates for the extract elements. These costs will factor into
29634e34855SSanjay Patel   // both sequences.
297a69158c1SSanjay Patel   unsigned Ext0Index = cast<ConstantInt>(Ext0->getOperand(1))->getZExtValue();
298a69158c1SSanjay Patel   unsigned Ext1Index = cast<ConstantInt>(Ext1->getOperand(1))->getZExtValue();
299a69158c1SSanjay Patel 
3006bdd531aSSanjay Patel   int Extract0Cost =
3016bdd531aSSanjay Patel       TTI.getVectorInstrCost(Instruction::ExtractElement, VecTy, Ext0Index);
3026bdd531aSSanjay Patel   int Extract1Cost =
3036bdd531aSSanjay Patel       TTI.getVectorInstrCost(Instruction::ExtractElement, VecTy, Ext1Index);
304a69158c1SSanjay Patel 
305a69158c1SSanjay Patel   // A more expensive extract will always be replaced by a splat shuffle.
306a69158c1SSanjay Patel   // For example, if Ext0 is more expensive:
307a69158c1SSanjay Patel   // opcode (extelt V0, Ext0), (ext V1, Ext1) -->
308a69158c1SSanjay Patel   // extelt (opcode (splat V0, Ext0), V1), Ext1
309a69158c1SSanjay Patel   // TODO: Evaluate whether that always results in lowest cost. Alternatively,
310a69158c1SSanjay Patel   //       check the cost of creating a broadcast shuffle and shuffling both
311a69158c1SSanjay Patel   //       operands to element 0.
312a69158c1SSanjay Patel   int CheapExtractCost = std::min(Extract0Cost, Extract1Cost);
31334e34855SSanjay Patel 
31434e34855SSanjay Patel   // Extra uses of the extracts mean that we include those costs in the
31534e34855SSanjay Patel   // vector total because those instructions will not be eliminated.
316e9c79a7aSSanjay Patel   int OldCost, NewCost;
317a69158c1SSanjay Patel   if (Ext0->getOperand(0) == Ext1->getOperand(0) && Ext0Index == Ext1Index) {
318a69158c1SSanjay Patel     // Handle a special case. If the 2 extracts are identical, adjust the
31934e34855SSanjay Patel     // formulas to account for that. The extra use charge allows for either the
32034e34855SSanjay Patel     // CSE'd pattern or an unoptimized form with identical values:
32134e34855SSanjay Patel     // opcode (extelt V, C), (extelt V, C) --> extelt (opcode V, V), C
32234e34855SSanjay Patel     bool HasUseTax = Ext0 == Ext1 ? !Ext0->hasNUses(2)
32334e34855SSanjay Patel                                   : !Ext0->hasOneUse() || !Ext1->hasOneUse();
324a69158c1SSanjay Patel     OldCost = CheapExtractCost + ScalarOpCost;
325a69158c1SSanjay Patel     NewCost = VectorOpCost + CheapExtractCost + HasUseTax * CheapExtractCost;
32634e34855SSanjay Patel   } else {
32734e34855SSanjay Patel     // Handle the general case. Each extract is actually a different value:
328a69158c1SSanjay Patel     // opcode (extelt V0, C0), (extelt V1, C1) --> extelt (opcode V0, V1), C
329a69158c1SSanjay Patel     OldCost = Extract0Cost + Extract1Cost + ScalarOpCost;
330a69158c1SSanjay Patel     NewCost = VectorOpCost + CheapExtractCost +
331a69158c1SSanjay Patel               !Ext0->hasOneUse() * Extract0Cost +
332a69158c1SSanjay Patel               !Ext1->hasOneUse() * Extract1Cost;
33334e34855SSanjay Patel   }
334a69158c1SSanjay Patel 
3353b95d834SSanjay Patel   ConvertToShuffle = getShuffleExtract(Ext0, Ext1, PreferredExtractIndex);
3363b95d834SSanjay Patel   if (ConvertToShuffle) {
337a69158c1SSanjay Patel     if (IsBinOp && DisableBinopExtractShuffle)
338a69158c1SSanjay Patel       return true;
339a69158c1SSanjay Patel 
340a69158c1SSanjay Patel     // If we are extracting from 2 different indexes, then one operand must be
341a69158c1SSanjay Patel     // shuffled before performing the vector operation. The shuffle mask is
342a69158c1SSanjay Patel     // undefined except for 1 lane that is being translated to the remaining
343a69158c1SSanjay Patel     // extraction lane. Therefore, it is a splat shuffle. Ex:
344a69158c1SSanjay Patel     // ShufMask = { undef, undef, 0, undef }
345a69158c1SSanjay Patel     // TODO: The cost model has an option for a "broadcast" shuffle
346a69158c1SSanjay Patel     //       (splat-from-element-0), but no option for a more general splat.
347a69158c1SSanjay Patel     NewCost +=
348a69158c1SSanjay Patel         TTI.getShuffleCost(TargetTransformInfo::SK_PermuteSingleSrc, VecTy);
349a69158c1SSanjay Patel   }
350a69158c1SSanjay Patel 
35110ea01d8SSanjay Patel   // Aggressively form a vector op if the cost is equal because the transform
35210ea01d8SSanjay Patel   // may enable further optimization.
35310ea01d8SSanjay Patel   // Codegen can reverse this transform (scalarize) if it was not profitable.
35410ea01d8SSanjay Patel   return OldCost < NewCost;
35534e34855SSanjay Patel }
35634e34855SSanjay Patel 
3579934cc54SSanjay Patel /// Create a shuffle that translates (shifts) 1 element from the input vector
3589934cc54SSanjay Patel /// to a new element location.
3599934cc54SSanjay Patel static Value *createShiftShuffle(Value *Vec, unsigned OldIndex,
3609934cc54SSanjay Patel                                  unsigned NewIndex, IRBuilder<> &Builder) {
3619934cc54SSanjay Patel   // The shuffle mask is undefined except for 1 lane that is being translated
3629934cc54SSanjay Patel   // to the new element index. Example for OldIndex == 2 and NewIndex == 0:
3639934cc54SSanjay Patel   // ShufMask = { 2, undef, undef, undef }
3649934cc54SSanjay Patel   auto *VecTy = cast<FixedVectorType>(Vec->getType());
36554143e2bSSanjay Patel   SmallVector<int, 32> ShufMask(VecTy->getNumElements(), UndefMaskElem);
3669934cc54SSanjay Patel   ShufMask[NewIndex] = OldIndex;
3671e6b240dSSanjay Patel   return Builder.CreateShuffleVector(Vec, ShufMask, "shift");
3689934cc54SSanjay Patel }
3699934cc54SSanjay Patel 
370216a37bbSSanjay Patel /// Given an extract element instruction with constant index operand, shuffle
371216a37bbSSanjay Patel /// the source vector (shift the scalar element) to a NewIndex for extraction.
372216a37bbSSanjay Patel /// Return null if the input can be constant folded, so that we are not creating
373216a37bbSSanjay Patel /// unnecessary instructions.
3749934cc54SSanjay Patel static ExtractElementInst *translateExtract(ExtractElementInst *ExtElt,
3759934cc54SSanjay Patel                                             unsigned NewIndex,
3769934cc54SSanjay Patel                                             IRBuilder<> &Builder) {
377216a37bbSSanjay Patel   // If the extract can be constant-folded, this code is unsimplified. Defer
378216a37bbSSanjay Patel   // to other passes to handle that.
379216a37bbSSanjay Patel   Value *X = ExtElt->getVectorOperand();
380216a37bbSSanjay Patel   Value *C = ExtElt->getIndexOperand();
381de65b356SSanjay Patel   assert(isa<ConstantInt>(C) && "Expected a constant index operand");
382216a37bbSSanjay Patel   if (isa<Constant>(X))
383216a37bbSSanjay Patel     return nullptr;
384216a37bbSSanjay Patel 
3859934cc54SSanjay Patel   Value *Shuf = createShiftShuffle(X, cast<ConstantInt>(C)->getZExtValue(),
3869934cc54SSanjay Patel                                    NewIndex, Builder);
387216a37bbSSanjay Patel   return cast<ExtractElementInst>(Builder.CreateExtractElement(Shuf, NewIndex));
388216a37bbSSanjay Patel }
389216a37bbSSanjay Patel 
390fc445589SSanjay Patel /// Try to reduce extract element costs by converting scalar compares to vector
391fc445589SSanjay Patel /// compares followed by extract.
392e9c79a7aSSanjay Patel /// cmp (ext0 V0, C), (ext1 V1, C)
393de65b356SSanjay Patel void VectorCombine::foldExtExtCmp(ExtractElementInst *Ext0,
394de65b356SSanjay Patel                                   ExtractElementInst *Ext1, Instruction &I) {
395fc445589SSanjay Patel   assert(isa<CmpInst>(&I) && "Expected a compare");
396216a37bbSSanjay Patel   assert(cast<ConstantInt>(Ext0->getIndexOperand())->getZExtValue() ==
397216a37bbSSanjay Patel              cast<ConstantInt>(Ext1->getIndexOperand())->getZExtValue() &&
398216a37bbSSanjay Patel          "Expected matching constant extract indexes");
399a17f03bdSSanjay Patel 
400a17f03bdSSanjay Patel   // cmp Pred (extelt V0, C), (extelt V1, C) --> extelt (cmp Pred V0, V1), C
401a17f03bdSSanjay Patel   ++NumVecCmp;
402fc445589SSanjay Patel   CmpInst::Predicate Pred = cast<CmpInst>(&I)->getPredicate();
403216a37bbSSanjay Patel   Value *V0 = Ext0->getVectorOperand(), *V1 = Ext1->getVectorOperand();
40446a285adSSanjay Patel   Value *VecCmp = Builder.CreateCmp(Pred, V0, V1);
405216a37bbSSanjay Patel   Value *NewExt = Builder.CreateExtractElement(VecCmp, Ext0->getIndexOperand());
40698c2f4eeSSanjay Patel   replaceValue(I, *NewExt);
407a17f03bdSSanjay Patel }
408a17f03bdSSanjay Patel 
40919b62b79SSanjay Patel /// Try to reduce extract element costs by converting scalar binops to vector
41019b62b79SSanjay Patel /// binops followed by extract.
411e9c79a7aSSanjay Patel /// bo (ext0 V0, C), (ext1 V1, C)
412de65b356SSanjay Patel void VectorCombine::foldExtExtBinop(ExtractElementInst *Ext0,
413de65b356SSanjay Patel                                     ExtractElementInst *Ext1, Instruction &I) {
414fc445589SSanjay Patel   assert(isa<BinaryOperator>(&I) && "Expected a binary operator");
415216a37bbSSanjay Patel   assert(cast<ConstantInt>(Ext0->getIndexOperand())->getZExtValue() ==
416216a37bbSSanjay Patel              cast<ConstantInt>(Ext1->getIndexOperand())->getZExtValue() &&
417216a37bbSSanjay Patel          "Expected matching constant extract indexes");
41819b62b79SSanjay Patel 
41934e34855SSanjay Patel   // bo (extelt V0, C), (extelt V1, C) --> extelt (bo V0, V1), C
42019b62b79SSanjay Patel   ++NumVecBO;
421216a37bbSSanjay Patel   Value *V0 = Ext0->getVectorOperand(), *V1 = Ext1->getVectorOperand();
422e9c79a7aSSanjay Patel   Value *VecBO =
42334e34855SSanjay Patel       Builder.CreateBinOp(cast<BinaryOperator>(&I)->getOpcode(), V0, V1);
424e9c79a7aSSanjay Patel 
42519b62b79SSanjay Patel   // All IR flags are safe to back-propagate because any potential poison
42619b62b79SSanjay Patel   // created in unused vector elements is discarded by the extract.
427e9c79a7aSSanjay Patel   if (auto *VecBOInst = dyn_cast<Instruction>(VecBO))
42819b62b79SSanjay Patel     VecBOInst->copyIRFlags(&I);
429e9c79a7aSSanjay Patel 
430216a37bbSSanjay Patel   Value *NewExt = Builder.CreateExtractElement(VecBO, Ext0->getIndexOperand());
43198c2f4eeSSanjay Patel   replaceValue(I, *NewExt);
43219b62b79SSanjay Patel }
43319b62b79SSanjay Patel 
434fc445589SSanjay Patel /// Match an instruction with extracted vector operands.
4356bdd531aSSanjay Patel bool VectorCombine::foldExtractExtract(Instruction &I) {
436e9c79a7aSSanjay Patel   // It is not safe to transform things like div, urem, etc. because we may
437e9c79a7aSSanjay Patel   // create undefined behavior when executing those on unknown vector elements.
438e9c79a7aSSanjay Patel   if (!isSafeToSpeculativelyExecute(&I))
439e9c79a7aSSanjay Patel     return false;
440e9c79a7aSSanjay Patel 
441216a37bbSSanjay Patel   Instruction *I0, *I1;
442fc445589SSanjay Patel   CmpInst::Predicate Pred = CmpInst::BAD_ICMP_PREDICATE;
443216a37bbSSanjay Patel   if (!match(&I, m_Cmp(Pred, m_Instruction(I0), m_Instruction(I1))) &&
444216a37bbSSanjay Patel       !match(&I, m_BinOp(m_Instruction(I0), m_Instruction(I1))))
445fc445589SSanjay Patel     return false;
446fc445589SSanjay Patel 
447fc445589SSanjay Patel   Value *V0, *V1;
448fc445589SSanjay Patel   uint64_t C0, C1;
449216a37bbSSanjay Patel   if (!match(I0, m_ExtractElt(m_Value(V0), m_ConstantInt(C0))) ||
450216a37bbSSanjay Patel       !match(I1, m_ExtractElt(m_Value(V1), m_ConstantInt(C1))) ||
451fc445589SSanjay Patel       V0->getType() != V1->getType())
452fc445589SSanjay Patel     return false;
453fc445589SSanjay Patel 
454ce97ce3aSSanjay Patel   // If the scalar value 'I' is going to be re-inserted into a vector, then try
455ce97ce3aSSanjay Patel   // to create an extract to that same element. The extract/insert can be
456ce97ce3aSSanjay Patel   // reduced to a "select shuffle".
457ce97ce3aSSanjay Patel   // TODO: If we add a larger pattern match that starts from an insert, this
458ce97ce3aSSanjay Patel   //       probably becomes unnecessary.
459216a37bbSSanjay Patel   auto *Ext0 = cast<ExtractElementInst>(I0);
460216a37bbSSanjay Patel   auto *Ext1 = cast<ExtractElementInst>(I1);
461a0f96741SSanjay Patel   uint64_t InsertIndex = InvalidIndex;
462ce97ce3aSSanjay Patel   if (I.hasOneUse())
4637eed772aSSanjay Patel     match(I.user_back(),
4647eed772aSSanjay Patel           m_InsertElt(m_Value(), m_Value(), m_ConstantInt(InsertIndex)));
465ce97ce3aSSanjay Patel 
466216a37bbSSanjay Patel   ExtractElementInst *ExtractToChange;
4676bdd531aSSanjay Patel   if (isExtractExtractCheap(Ext0, Ext1, I.getOpcode(), ExtractToChange,
468ce97ce3aSSanjay Patel                             InsertIndex))
469fc445589SSanjay Patel     return false;
470e9c79a7aSSanjay Patel 
471216a37bbSSanjay Patel   if (ExtractToChange) {
472216a37bbSSanjay Patel     unsigned CheapExtractIdx = ExtractToChange == Ext0 ? C1 : C0;
473216a37bbSSanjay Patel     ExtractElementInst *NewExtract =
4749934cc54SSanjay Patel         translateExtract(ExtractToChange, CheapExtractIdx, Builder);
475216a37bbSSanjay Patel     if (!NewExtract)
4766d864097SSanjay Patel       return false;
477216a37bbSSanjay Patel     if (ExtractToChange == Ext0)
478216a37bbSSanjay Patel       Ext0 = NewExtract;
479a69158c1SSanjay Patel     else
480216a37bbSSanjay Patel       Ext1 = NewExtract;
481a69158c1SSanjay Patel   }
482e9c79a7aSSanjay Patel 
483e9c79a7aSSanjay Patel   if (Pred != CmpInst::BAD_ICMP_PREDICATE)
484039ff29eSSanjay Patel     foldExtExtCmp(Ext0, Ext1, I);
485e9c79a7aSSanjay Patel   else
486039ff29eSSanjay Patel     foldExtExtBinop(Ext0, Ext1, I);
487e9c79a7aSSanjay Patel 
488e9c79a7aSSanjay Patel   return true;
489fc445589SSanjay Patel }
490fc445589SSanjay Patel 
491bef6e67eSSanjay Patel /// If this is a bitcast of a shuffle, try to bitcast the source vector to the
492bef6e67eSSanjay Patel /// destination type followed by shuffle. This can enable further transforms by
493bef6e67eSSanjay Patel /// moving bitcasts or shuffles together.
4946bdd531aSSanjay Patel bool VectorCombine::foldBitcastShuf(Instruction &I) {
495b6050ca1SSanjay Patel   Value *V;
496b6050ca1SSanjay Patel   ArrayRef<int> Mask;
4977eed772aSSanjay Patel   if (!match(&I, m_BitCast(
4987eed772aSSanjay Patel                      m_OneUse(m_Shuffle(m_Value(V), m_Undef(), m_Mask(Mask))))))
499b6050ca1SSanjay Patel     return false;
500b6050ca1SSanjay Patel 
501b4f04d71SHuihui Zhang   // 1) Do not fold bitcast shuffle for scalable type. First, shuffle cost for
502b4f04d71SHuihui Zhang   // scalable type is unknown; Second, we cannot reason if the narrowed shuffle
503b4f04d71SHuihui Zhang   // mask for scalable type is a splat or not.
504b4f04d71SHuihui Zhang   // 2) Disallow non-vector casts and length-changing shuffles.
505bef6e67eSSanjay Patel   // TODO: We could allow any shuffle.
506b4f04d71SHuihui Zhang   auto *DestTy = dyn_cast<FixedVectorType>(I.getType());
507b4f04d71SHuihui Zhang   auto *SrcTy = dyn_cast<FixedVectorType>(V->getType());
508b4f04d71SHuihui Zhang   if (!SrcTy || !DestTy || I.getOperand(0)->getType() != SrcTy)
509b6050ca1SSanjay Patel     return false;
510b6050ca1SSanjay Patel 
511b6050ca1SSanjay Patel   // The new shuffle must not cost more than the old shuffle. The bitcast is
512b6050ca1SSanjay Patel   // moved ahead of the shuffle, so assume that it has the same cost as before.
513b6050ca1SSanjay Patel   if (TTI.getShuffleCost(TargetTransformInfo::SK_PermuteSingleSrc, DestTy) >
514b6050ca1SSanjay Patel       TTI.getShuffleCost(TargetTransformInfo::SK_PermuteSingleSrc, SrcTy))
515b6050ca1SSanjay Patel     return false;
516b6050ca1SSanjay Patel 
517b4f04d71SHuihui Zhang   unsigned DestNumElts = DestTy->getNumElements();
518b4f04d71SHuihui Zhang   unsigned SrcNumElts = SrcTy->getNumElements();
519b6050ca1SSanjay Patel   SmallVector<int, 16> NewMask;
520bef6e67eSSanjay Patel   if (SrcNumElts <= DestNumElts) {
521bef6e67eSSanjay Patel     // The bitcast is from wide to narrow/equal elements. The shuffle mask can
522bef6e67eSSanjay Patel     // always be expanded to the equivalent form choosing narrower elements.
523b6050ca1SSanjay Patel     assert(DestNumElts % SrcNumElts == 0 && "Unexpected shuffle mask");
524b6050ca1SSanjay Patel     unsigned ScaleFactor = DestNumElts / SrcNumElts;
5251318ddbcSSanjay Patel     narrowShuffleMaskElts(ScaleFactor, Mask, NewMask);
526bef6e67eSSanjay Patel   } else {
527bef6e67eSSanjay Patel     // The bitcast is from narrow elements to wide elements. The shuffle mask
528bef6e67eSSanjay Patel     // must choose consecutive elements to allow casting first.
529bef6e67eSSanjay Patel     assert(SrcNumElts % DestNumElts == 0 && "Unexpected shuffle mask");
530bef6e67eSSanjay Patel     unsigned ScaleFactor = SrcNumElts / DestNumElts;
531bef6e67eSSanjay Patel     if (!widenShuffleMaskElts(ScaleFactor, Mask, NewMask))
532bef6e67eSSanjay Patel       return false;
533bef6e67eSSanjay Patel   }
534bef6e67eSSanjay Patel   // bitcast (shuf V, MaskC) --> shuf (bitcast V), MaskC'
5357aeb41b3SRoman Lebedev   ++NumShufOfBitcast;
536bef6e67eSSanjay Patel   Value *CastV = Builder.CreateBitCast(V, DestTy);
5371e6b240dSSanjay Patel   Value *Shuf = Builder.CreateShuffleVector(CastV, NewMask);
53898c2f4eeSSanjay Patel   replaceValue(I, *Shuf);
539b6050ca1SSanjay Patel   return true;
540b6050ca1SSanjay Patel }
541b6050ca1SSanjay Patel 
542ed67f5e7SSanjay Patel /// Match a vector binop or compare instruction with at least one inserted
543ed67f5e7SSanjay Patel /// scalar operand and convert to scalar binop/cmp followed by insertelement.
5446bdd531aSSanjay Patel bool VectorCombine::scalarizeBinopOrCmp(Instruction &I) {
545ed67f5e7SSanjay Patel   CmpInst::Predicate Pred = CmpInst::BAD_ICMP_PREDICATE;
5465dc4e7c2SSimon Pilgrim   Value *Ins0, *Ins1;
547ed67f5e7SSanjay Patel   if (!match(&I, m_BinOp(m_Value(Ins0), m_Value(Ins1))) &&
548ed67f5e7SSanjay Patel       !match(&I, m_Cmp(Pred, m_Value(Ins0), m_Value(Ins1))))
549ed67f5e7SSanjay Patel     return false;
550ed67f5e7SSanjay Patel 
551ed67f5e7SSanjay Patel   // Do not convert the vector condition of a vector select into a scalar
552ed67f5e7SSanjay Patel   // condition. That may cause problems for codegen because of differences in
553ed67f5e7SSanjay Patel   // boolean formats and register-file transfers.
554ed67f5e7SSanjay Patel   // TODO: Can we account for that in the cost model?
555ed67f5e7SSanjay Patel   bool IsCmp = Pred != CmpInst::Predicate::BAD_ICMP_PREDICATE;
556ed67f5e7SSanjay Patel   if (IsCmp)
557ed67f5e7SSanjay Patel     for (User *U : I.users())
558ed67f5e7SSanjay Patel       if (match(U, m_Select(m_Specific(&I), m_Value(), m_Value())))
5590d2a0b44SSanjay Patel         return false;
5600d2a0b44SSanjay Patel 
5615dc4e7c2SSimon Pilgrim   // Match against one or both scalar values being inserted into constant
5625dc4e7c2SSimon Pilgrim   // vectors:
563ed67f5e7SSanjay Patel   // vec_op VecC0, (inselt VecC1, V1, Index)
564ed67f5e7SSanjay Patel   // vec_op (inselt VecC0, V0, Index), VecC1
565ed67f5e7SSanjay Patel   // vec_op (inselt VecC0, V0, Index), (inselt VecC1, V1, Index)
5660d2a0b44SSanjay Patel   // TODO: Deal with mismatched index constants and variable indexes?
5675dc4e7c2SSimon Pilgrim   Constant *VecC0 = nullptr, *VecC1 = nullptr;
5685dc4e7c2SSimon Pilgrim   Value *V0 = nullptr, *V1 = nullptr;
5695dc4e7c2SSimon Pilgrim   uint64_t Index0 = 0, Index1 = 0;
5707eed772aSSanjay Patel   if (!match(Ins0, m_InsertElt(m_Constant(VecC0), m_Value(V0),
5715dc4e7c2SSimon Pilgrim                                m_ConstantInt(Index0))) &&
5725dc4e7c2SSimon Pilgrim       !match(Ins0, m_Constant(VecC0)))
5735dc4e7c2SSimon Pilgrim     return false;
5745dc4e7c2SSimon Pilgrim   if (!match(Ins1, m_InsertElt(m_Constant(VecC1), m_Value(V1),
5755dc4e7c2SSimon Pilgrim                                m_ConstantInt(Index1))) &&
5765dc4e7c2SSimon Pilgrim       !match(Ins1, m_Constant(VecC1)))
5770d2a0b44SSanjay Patel     return false;
5780d2a0b44SSanjay Patel 
5795dc4e7c2SSimon Pilgrim   bool IsConst0 = !V0;
5805dc4e7c2SSimon Pilgrim   bool IsConst1 = !V1;
5815dc4e7c2SSimon Pilgrim   if (IsConst0 && IsConst1)
5825dc4e7c2SSimon Pilgrim     return false;
5835dc4e7c2SSimon Pilgrim   if (!IsConst0 && !IsConst1 && Index0 != Index1)
5845dc4e7c2SSimon Pilgrim     return false;
5855dc4e7c2SSimon Pilgrim 
5865dc4e7c2SSimon Pilgrim   // Bail for single insertion if it is a load.
5875dc4e7c2SSimon Pilgrim   // TODO: Handle this once getVectorInstrCost can cost for load/stores.
5885dc4e7c2SSimon Pilgrim   auto *I0 = dyn_cast_or_null<Instruction>(V0);
5895dc4e7c2SSimon Pilgrim   auto *I1 = dyn_cast_or_null<Instruction>(V1);
5905dc4e7c2SSimon Pilgrim   if ((IsConst0 && I1 && I1->mayReadFromMemory()) ||
5915dc4e7c2SSimon Pilgrim       (IsConst1 && I0 && I0->mayReadFromMemory()))
5925dc4e7c2SSimon Pilgrim     return false;
5935dc4e7c2SSimon Pilgrim 
5945dc4e7c2SSimon Pilgrim   uint64_t Index = IsConst0 ? Index1 : Index0;
5955dc4e7c2SSimon Pilgrim   Type *ScalarTy = IsConst0 ? V1->getType() : V0->getType();
5960d2a0b44SSanjay Patel   Type *VecTy = I.getType();
5975dc4e7c2SSimon Pilgrim   assert(VecTy->isVectorTy() &&
5985dc4e7c2SSimon Pilgrim          (IsConst0 || IsConst1 || V0->getType() == V1->getType()) &&
599741e20f3SSanjay Patel          (ScalarTy->isIntegerTy() || ScalarTy->isFloatingPointTy() ||
600741e20f3SSanjay Patel           ScalarTy->isPointerTy()) &&
601741e20f3SSanjay Patel          "Unexpected types for insert element into binop or cmp");
6020d2a0b44SSanjay Patel 
603ed67f5e7SSanjay Patel   unsigned Opcode = I.getOpcode();
604ed67f5e7SSanjay Patel   int ScalarOpCost, VectorOpCost;
605ed67f5e7SSanjay Patel   if (IsCmp) {
606ed67f5e7SSanjay Patel     ScalarOpCost = TTI.getCmpSelInstrCost(Opcode, ScalarTy);
607ed67f5e7SSanjay Patel     VectorOpCost = TTI.getCmpSelInstrCost(Opcode, VecTy);
608ed67f5e7SSanjay Patel   } else {
609ed67f5e7SSanjay Patel     ScalarOpCost = TTI.getArithmeticInstrCost(Opcode, ScalarTy);
610ed67f5e7SSanjay Patel     VectorOpCost = TTI.getArithmeticInstrCost(Opcode, VecTy);
611ed67f5e7SSanjay Patel   }
6120d2a0b44SSanjay Patel 
6130d2a0b44SSanjay Patel   // Get cost estimate for the insert element. This cost will factor into
6140d2a0b44SSanjay Patel   // both sequences.
6150d2a0b44SSanjay Patel   int InsertCost =
6160d2a0b44SSanjay Patel       TTI.getVectorInstrCost(Instruction::InsertElement, VecTy, Index);
6175dc4e7c2SSimon Pilgrim   int OldCost = (IsConst0 ? 0 : InsertCost) + (IsConst1 ? 0 : InsertCost) +
6185dc4e7c2SSimon Pilgrim                 VectorOpCost;
6195f730b64SSanjay Patel   int NewCost = ScalarOpCost + InsertCost +
6205dc4e7c2SSimon Pilgrim                 (IsConst0 ? 0 : !Ins0->hasOneUse() * InsertCost) +
6215dc4e7c2SSimon Pilgrim                 (IsConst1 ? 0 : !Ins1->hasOneUse() * InsertCost);
6220d2a0b44SSanjay Patel 
6230d2a0b44SSanjay Patel   // We want to scalarize unless the vector variant actually has lower cost.
6240d2a0b44SSanjay Patel   if (OldCost < NewCost)
6250d2a0b44SSanjay Patel     return false;
6260d2a0b44SSanjay Patel 
627ed67f5e7SSanjay Patel   // vec_op (inselt VecC0, V0, Index), (inselt VecC1, V1, Index) -->
628ed67f5e7SSanjay Patel   // inselt NewVecC, (scalar_op V0, V1), Index
629ed67f5e7SSanjay Patel   if (IsCmp)
630ed67f5e7SSanjay Patel     ++NumScalarCmp;
631ed67f5e7SSanjay Patel   else
6320d2a0b44SSanjay Patel     ++NumScalarBO;
6335dc4e7c2SSimon Pilgrim 
6345dc4e7c2SSimon Pilgrim   // For constant cases, extract the scalar element, this should constant fold.
6355dc4e7c2SSimon Pilgrim   if (IsConst0)
6365dc4e7c2SSimon Pilgrim     V0 = ConstantExpr::getExtractElement(VecC0, Builder.getInt64(Index));
6375dc4e7c2SSimon Pilgrim   if (IsConst1)
6385dc4e7c2SSimon Pilgrim     V1 = ConstantExpr::getExtractElement(VecC1, Builder.getInt64(Index));
6395dc4e7c2SSimon Pilgrim 
640ed67f5e7SSanjay Patel   Value *Scalar =
64146a285adSSanjay Patel       IsCmp ? Builder.CreateCmp(Pred, V0, V1)
642ed67f5e7SSanjay Patel             : Builder.CreateBinOp((Instruction::BinaryOps)Opcode, V0, V1);
643ed67f5e7SSanjay Patel 
644ed67f5e7SSanjay Patel   Scalar->setName(I.getName() + ".scalar");
6450d2a0b44SSanjay Patel 
6460d2a0b44SSanjay Patel   // All IR flags are safe to back-propagate. There is no potential for extra
6470d2a0b44SSanjay Patel   // poison to be created by the scalar instruction.
6480d2a0b44SSanjay Patel   if (auto *ScalarInst = dyn_cast<Instruction>(Scalar))
6490d2a0b44SSanjay Patel     ScalarInst->copyIRFlags(&I);
6500d2a0b44SSanjay Patel 
6510d2a0b44SSanjay Patel   // Fold the vector constants in the original vectors into a new base vector.
652ed67f5e7SSanjay Patel   Constant *NewVecC = IsCmp ? ConstantExpr::getCompare(Pred, VecC0, VecC1)
653ed67f5e7SSanjay Patel                             : ConstantExpr::get(Opcode, VecC0, VecC1);
6540d2a0b44SSanjay Patel   Value *Insert = Builder.CreateInsertElement(NewVecC, Scalar, Index);
65598c2f4eeSSanjay Patel   replaceValue(I, *Insert);
6560d2a0b44SSanjay Patel   return true;
6570d2a0b44SSanjay Patel }
6580d2a0b44SSanjay Patel 
659b6315aeeSSanjay Patel /// Try to combine a scalar binop + 2 scalar compares of extracted elements of
660b6315aeeSSanjay Patel /// a vector into vector operations followed by extract. Note: The SLP pass
661b6315aeeSSanjay Patel /// may miss this pattern because of implementation problems.
662b6315aeeSSanjay Patel bool VectorCombine::foldExtractedCmps(Instruction &I) {
663b6315aeeSSanjay Patel   // We are looking for a scalar binop of booleans.
664b6315aeeSSanjay Patel   // binop i1 (cmp Pred I0, C0), (cmp Pred I1, C1)
665b6315aeeSSanjay Patel   if (!I.isBinaryOp() || !I.getType()->isIntegerTy(1))
666b6315aeeSSanjay Patel     return false;
667b6315aeeSSanjay Patel 
668b6315aeeSSanjay Patel   // The compare predicates should match, and each compare should have a
669b6315aeeSSanjay Patel   // constant operand.
670b6315aeeSSanjay Patel   // TODO: Relax the one-use constraints.
671b6315aeeSSanjay Patel   Value *B0 = I.getOperand(0), *B1 = I.getOperand(1);
672b6315aeeSSanjay Patel   Instruction *I0, *I1;
673b6315aeeSSanjay Patel   Constant *C0, *C1;
674b6315aeeSSanjay Patel   CmpInst::Predicate P0, P1;
675b6315aeeSSanjay Patel   if (!match(B0, m_OneUse(m_Cmp(P0, m_Instruction(I0), m_Constant(C0)))) ||
676b6315aeeSSanjay Patel       !match(B1, m_OneUse(m_Cmp(P1, m_Instruction(I1), m_Constant(C1)))) ||
677b6315aeeSSanjay Patel       P0 != P1)
678b6315aeeSSanjay Patel     return false;
679b6315aeeSSanjay Patel 
680b6315aeeSSanjay Patel   // The compare operands must be extracts of the same vector with constant
681b6315aeeSSanjay Patel   // extract indexes.
682b6315aeeSSanjay Patel   // TODO: Relax the one-use constraints.
683b6315aeeSSanjay Patel   Value *X;
684b6315aeeSSanjay Patel   uint64_t Index0, Index1;
685b6315aeeSSanjay Patel   if (!match(I0, m_OneUse(m_ExtractElt(m_Value(X), m_ConstantInt(Index0)))) ||
686b6315aeeSSanjay Patel       !match(I1, m_OneUse(m_ExtractElt(m_Specific(X), m_ConstantInt(Index1)))))
687b6315aeeSSanjay Patel     return false;
688b6315aeeSSanjay Patel 
689b6315aeeSSanjay Patel   auto *Ext0 = cast<ExtractElementInst>(I0);
690b6315aeeSSanjay Patel   auto *Ext1 = cast<ExtractElementInst>(I1);
691b6315aeeSSanjay Patel   ExtractElementInst *ConvertToShuf = getShuffleExtract(Ext0, Ext1);
692b6315aeeSSanjay Patel   if (!ConvertToShuf)
693b6315aeeSSanjay Patel     return false;
694b6315aeeSSanjay Patel 
695b6315aeeSSanjay Patel   // The original scalar pattern is:
696b6315aeeSSanjay Patel   // binop i1 (cmp Pred (ext X, Index0), C0), (cmp Pred (ext X, Index1), C1)
697b6315aeeSSanjay Patel   CmpInst::Predicate Pred = P0;
698b6315aeeSSanjay Patel   unsigned CmpOpcode = CmpInst::isFPPredicate(Pred) ? Instruction::FCmp
699b6315aeeSSanjay Patel                                                     : Instruction::ICmp;
700b6315aeeSSanjay Patel   auto *VecTy = dyn_cast<FixedVectorType>(X->getType());
701b6315aeeSSanjay Patel   if (!VecTy)
702b6315aeeSSanjay Patel     return false;
703b6315aeeSSanjay Patel 
704b6315aeeSSanjay Patel   int OldCost = TTI.getVectorInstrCost(Ext0->getOpcode(), VecTy, Index0);
705b6315aeeSSanjay Patel   OldCost += TTI.getVectorInstrCost(Ext1->getOpcode(), VecTy, Index1);
706b6315aeeSSanjay Patel   OldCost += TTI.getCmpSelInstrCost(CmpOpcode, I0->getType()) * 2;
707b6315aeeSSanjay Patel   OldCost += TTI.getArithmeticInstrCost(I.getOpcode(), I.getType());
708b6315aeeSSanjay Patel 
709b6315aeeSSanjay Patel   // The proposed vector pattern is:
710b6315aeeSSanjay Patel   // vcmp = cmp Pred X, VecC
711b6315aeeSSanjay Patel   // ext (binop vNi1 vcmp, (shuffle vcmp, Index1)), Index0
712b6315aeeSSanjay Patel   int CheapIndex = ConvertToShuf == Ext0 ? Index1 : Index0;
713b6315aeeSSanjay Patel   int ExpensiveIndex = ConvertToShuf == Ext0 ? Index0 : Index1;
714b6315aeeSSanjay Patel   auto *CmpTy = cast<FixedVectorType>(CmpInst::makeCmpResultType(X->getType()));
715b6315aeeSSanjay Patel   int NewCost = TTI.getCmpSelInstrCost(CmpOpcode, X->getType());
716b6315aeeSSanjay Patel   NewCost +=
717b6315aeeSSanjay Patel       TTI.getShuffleCost(TargetTransformInfo::SK_PermuteSingleSrc, CmpTy);
718b6315aeeSSanjay Patel   NewCost += TTI.getArithmeticInstrCost(I.getOpcode(), CmpTy);
719b6315aeeSSanjay Patel   NewCost += TTI.getVectorInstrCost(Ext0->getOpcode(), CmpTy, CheapIndex);
720b6315aeeSSanjay Patel 
721b6315aeeSSanjay Patel   // Aggressively form vector ops if the cost is equal because the transform
722b6315aeeSSanjay Patel   // may enable further optimization.
723b6315aeeSSanjay Patel   // Codegen can reverse this transform (scalarize) if it was not profitable.
724b6315aeeSSanjay Patel   if (OldCost < NewCost)
725b6315aeeSSanjay Patel     return false;
726b6315aeeSSanjay Patel 
727b6315aeeSSanjay Patel   // Create a vector constant from the 2 scalar constants.
728b6315aeeSSanjay Patel   SmallVector<Constant *, 32> CmpC(VecTy->getNumElements(),
729b6315aeeSSanjay Patel                                    UndefValue::get(VecTy->getElementType()));
730b6315aeeSSanjay Patel   CmpC[Index0] = C0;
731b6315aeeSSanjay Patel   CmpC[Index1] = C1;
732b6315aeeSSanjay Patel   Value *VCmp = Builder.CreateCmp(Pred, X, ConstantVector::get(CmpC));
733b6315aeeSSanjay Patel 
734b6315aeeSSanjay Patel   Value *Shuf = createShiftShuffle(VCmp, ExpensiveIndex, CheapIndex, Builder);
735b6315aeeSSanjay Patel   Value *VecLogic = Builder.CreateBinOp(cast<BinaryOperator>(I).getOpcode(),
736b6315aeeSSanjay Patel                                         VCmp, Shuf);
737b6315aeeSSanjay Patel   Value *NewExt = Builder.CreateExtractElement(VecLogic, CheapIndex);
738b6315aeeSSanjay Patel   replaceValue(I, *NewExt);
739b6315aeeSSanjay Patel   ++NumVecCmpBO;
740b6315aeeSSanjay Patel   return true;
741b6315aeeSSanjay Patel }
742b6315aeeSSanjay Patel 
743a17f03bdSSanjay Patel /// This is the entry point for all transforms. Pass manager differences are
744a17f03bdSSanjay Patel /// handled in the callers of this function.
7456bdd531aSSanjay Patel bool VectorCombine::run() {
74625c6544fSSanjay Patel   if (DisableVectorCombine)
74725c6544fSSanjay Patel     return false;
74825c6544fSSanjay Patel 
749cc892fd9SSanjay Patel   // Don't attempt vectorization if the target does not support vectors.
750cc892fd9SSanjay Patel   if (!TTI.getNumberOfRegisters(TTI.getRegisterClassForType(/*Vector*/ true)))
751cc892fd9SSanjay Patel     return false;
752cc892fd9SSanjay Patel 
753a17f03bdSSanjay Patel   bool MadeChange = false;
754a17f03bdSSanjay Patel   for (BasicBlock &BB : F) {
755a17f03bdSSanjay Patel     // Ignore unreachable basic blocks.
756a17f03bdSSanjay Patel     if (!DT.isReachableFromEntry(&BB))
757a17f03bdSSanjay Patel       continue;
758a17f03bdSSanjay Patel     // Do not delete instructions under here and invalidate the iterator.
75981e9ede3SSanjay Patel     // Walk the block forwards to enable simple iterative chains of transforms.
760a17f03bdSSanjay Patel     // TODO: It could be more efficient to remove dead instructions
761a17f03bdSSanjay Patel     //       iteratively in this loop rather than waiting until the end.
76281e9ede3SSanjay Patel     for (Instruction &I : BB) {
763fc3cc8a4SSanjay Patel       if (isa<DbgInfoIntrinsic>(I))
764fc3cc8a4SSanjay Patel         continue;
765de65b356SSanjay Patel       Builder.SetInsertPoint(&I);
76643bdac29SSanjay Patel       MadeChange |= vectorizeLoadInsert(I);
7676bdd531aSSanjay Patel       MadeChange |= foldExtractExtract(I);
7686bdd531aSSanjay Patel       MadeChange |= foldBitcastShuf(I);
7696bdd531aSSanjay Patel       MadeChange |= scalarizeBinopOrCmp(I);
770b6315aeeSSanjay Patel       MadeChange |= foldExtractedCmps(I);
771a17f03bdSSanjay Patel     }
772fc3cc8a4SSanjay Patel   }
773a17f03bdSSanjay Patel 
774a17f03bdSSanjay Patel   // We're done with transforms, so remove dead instructions.
775a17f03bdSSanjay Patel   if (MadeChange)
776a17f03bdSSanjay Patel     for (BasicBlock &BB : F)
777a17f03bdSSanjay Patel       SimplifyInstructionsInBlock(&BB);
778a17f03bdSSanjay Patel 
779a17f03bdSSanjay Patel   return MadeChange;
780a17f03bdSSanjay Patel }
781a17f03bdSSanjay Patel 
782a17f03bdSSanjay Patel // Pass manager boilerplate below here.
783a17f03bdSSanjay Patel 
784a17f03bdSSanjay Patel namespace {
785a17f03bdSSanjay Patel class VectorCombineLegacyPass : public FunctionPass {
786a17f03bdSSanjay Patel public:
787a17f03bdSSanjay Patel   static char ID;
788a17f03bdSSanjay Patel   VectorCombineLegacyPass() : FunctionPass(ID) {
789a17f03bdSSanjay Patel     initializeVectorCombineLegacyPassPass(*PassRegistry::getPassRegistry());
790a17f03bdSSanjay Patel   }
791a17f03bdSSanjay Patel 
792a17f03bdSSanjay Patel   void getAnalysisUsage(AnalysisUsage &AU) const override {
793a17f03bdSSanjay Patel     AU.addRequired<DominatorTreeWrapperPass>();
794a17f03bdSSanjay Patel     AU.addRequired<TargetTransformInfoWrapperPass>();
795a17f03bdSSanjay Patel     AU.setPreservesCFG();
796a17f03bdSSanjay Patel     AU.addPreserved<DominatorTreeWrapperPass>();
797a17f03bdSSanjay Patel     AU.addPreserved<GlobalsAAWrapperPass>();
798024098aeSSanjay Patel     AU.addPreserved<AAResultsWrapperPass>();
799024098aeSSanjay Patel     AU.addPreserved<BasicAAWrapperPass>();
800a17f03bdSSanjay Patel     FunctionPass::getAnalysisUsage(AU);
801a17f03bdSSanjay Patel   }
802a17f03bdSSanjay Patel 
803a17f03bdSSanjay Patel   bool runOnFunction(Function &F) override {
804a17f03bdSSanjay Patel     if (skipFunction(F))
805a17f03bdSSanjay Patel       return false;
806a17f03bdSSanjay Patel     auto &TTI = getAnalysis<TargetTransformInfoWrapperPass>().getTTI(F);
807a17f03bdSSanjay Patel     auto &DT = getAnalysis<DominatorTreeWrapperPass>().getDomTree();
8086bdd531aSSanjay Patel     VectorCombine Combiner(F, TTI, DT);
8096bdd531aSSanjay Patel     return Combiner.run();
810a17f03bdSSanjay Patel   }
811a17f03bdSSanjay Patel };
812a17f03bdSSanjay Patel } // namespace
813a17f03bdSSanjay Patel 
814a17f03bdSSanjay Patel char VectorCombineLegacyPass::ID = 0;
815a17f03bdSSanjay Patel INITIALIZE_PASS_BEGIN(VectorCombineLegacyPass, "vector-combine",
816a17f03bdSSanjay Patel                       "Optimize scalar/vector ops", false,
817a17f03bdSSanjay Patel                       false)
818a17f03bdSSanjay Patel INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)
819a17f03bdSSanjay Patel INITIALIZE_PASS_END(VectorCombineLegacyPass, "vector-combine",
820a17f03bdSSanjay Patel                     "Optimize scalar/vector ops", false, false)
821a17f03bdSSanjay Patel Pass *llvm::createVectorCombinePass() {
822a17f03bdSSanjay Patel   return new VectorCombineLegacyPass();
823a17f03bdSSanjay Patel }
824a17f03bdSSanjay Patel 
825a17f03bdSSanjay Patel PreservedAnalyses VectorCombinePass::run(Function &F,
826a17f03bdSSanjay Patel                                          FunctionAnalysisManager &FAM) {
827a17f03bdSSanjay Patel   TargetTransformInfo &TTI = FAM.getResult<TargetIRAnalysis>(F);
828a17f03bdSSanjay Patel   DominatorTree &DT = FAM.getResult<DominatorTreeAnalysis>(F);
8296bdd531aSSanjay Patel   VectorCombine Combiner(F, TTI, DT);
8306bdd531aSSanjay Patel   if (!Combiner.run())
831a17f03bdSSanjay Patel     return PreservedAnalyses::all();
832a17f03bdSSanjay Patel   PreservedAnalyses PA;
833a17f03bdSSanjay Patel   PA.preserveSet<CFGAnalyses>();
834a17f03bdSSanjay Patel   PA.preserve<GlobalsAA>();
835024098aeSSanjay Patel   PA.preserve<AAManager>();
836024098aeSSanjay Patel   PA.preserve<BasicAA>();
837a17f03bdSSanjay Patel   return PA;
838a17f03bdSSanjay Patel }
839