1 //===------- VectorCombine.cpp - Optimize partial vector operations -------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This pass optimizes scalar/vector interactions using target cost models. The
10 // transforms implemented here may not fit in traditional loop-based or SLP
11 // vectorization passes.
12 //
13 //===----------------------------------------------------------------------===//
14 
15 #include "llvm/Transforms/Vectorize/VectorCombine.h"
16 #include "llvm/ADT/Statistic.h"
17 #include "llvm/Analysis/BasicAliasAnalysis.h"
18 #include "llvm/Analysis/GlobalsModRef.h"
19 #include "llvm/Analysis/TargetTransformInfo.h"
20 #include "llvm/Analysis/ValueTracking.h"
21 #include "llvm/Analysis/VectorUtils.h"
22 #include "llvm/IR/Dominators.h"
23 #include "llvm/IR/Function.h"
24 #include "llvm/IR/IRBuilder.h"
25 #include "llvm/IR/PatternMatch.h"
26 #include "llvm/InitializePasses.h"
27 #include "llvm/Pass.h"
28 #include "llvm/Support/CommandLine.h"
29 #include "llvm/Transforms/Utils/Local.h"
30 #include "llvm/Transforms/Vectorize.h"
31 
32 using namespace llvm;
33 using namespace llvm::PatternMatch;
34 
35 #define DEBUG_TYPE "vector-combine"
36 STATISTIC(NumVecCmp, "Number of vector compares formed");
37 STATISTIC(NumVecBO, "Number of vector binops formed");
38 STATISTIC(NumShufOfBitcast, "Number of shuffles moved after bitcast");
39 STATISTIC(NumScalarBO, "Number of scalar binops formed");
40 STATISTIC(NumScalarCmp, "Number of scalar compares formed");
41 
42 static cl::opt<bool> DisableVectorCombine(
43     "disable-vector-combine", cl::init(false), cl::Hidden,
44     cl::desc("Disable all vector combine transforms"));
45 
46 static cl::opt<bool> DisableBinopExtractShuffle(
47     "disable-binop-extract-shuffle", cl::init(false), cl::Hidden,
48     cl::desc("Disable binop extract to shuffle transforms"));
49 
50 
51 /// Compare the relative costs of 2 extracts followed by scalar operation vs.
52 /// vector operation(s) followed by extract. Return true if the existing
53 /// instructions are cheaper than a vector alternative. Otherwise, return false
54 /// and if one of the extracts should be transformed to a shufflevector, set
55 /// \p ConvertToShuffle to that extract instruction.
56 static bool isExtractExtractCheap(Instruction *Ext0, Instruction *Ext1,
57                                   unsigned Opcode,
58                                   const TargetTransformInfo &TTI,
59                                   Instruction *&ConvertToShuffle,
60                                   unsigned PreferredExtractIndex) {
61   assert(isa<ConstantInt>(Ext0->getOperand(1)) &&
62          isa<ConstantInt>(Ext1->getOperand(1)) &&
63          "Expected constant extract indexes");
64   Type *ScalarTy = Ext0->getType();
65   auto *VecTy = cast<VectorType>(Ext0->getOperand(0)->getType());
66   int ScalarOpCost, VectorOpCost;
67 
68   // Get cost estimates for scalar and vector versions of the operation.
69   bool IsBinOp = Instruction::isBinaryOp(Opcode);
70   if (IsBinOp) {
71     ScalarOpCost = TTI.getArithmeticInstrCost(Opcode, ScalarTy);
72     VectorOpCost = TTI.getArithmeticInstrCost(Opcode, VecTy);
73   } else {
74     assert((Opcode == Instruction::ICmp || Opcode == Instruction::FCmp) &&
75            "Expected a compare");
76     ScalarOpCost = TTI.getCmpSelInstrCost(Opcode, ScalarTy,
77                                           CmpInst::makeCmpResultType(ScalarTy));
78     VectorOpCost = TTI.getCmpSelInstrCost(Opcode, VecTy,
79                                           CmpInst::makeCmpResultType(VecTy));
80   }
81 
82   // Get cost estimates for the extract elements. These costs will factor into
83   // both sequences.
84   unsigned Ext0Index = cast<ConstantInt>(Ext0->getOperand(1))->getZExtValue();
85   unsigned Ext1Index = cast<ConstantInt>(Ext1->getOperand(1))->getZExtValue();
86 
87   int Extract0Cost = TTI.getVectorInstrCost(Instruction::ExtractElement,
88                                             VecTy, Ext0Index);
89   int Extract1Cost = TTI.getVectorInstrCost(Instruction::ExtractElement,
90                                             VecTy, Ext1Index);
91 
92   // A more expensive extract will always be replaced by a splat shuffle.
93   // For example, if Ext0 is more expensive:
94   // opcode (extelt V0, Ext0), (ext V1, Ext1) -->
95   // extelt (opcode (splat V0, Ext0), V1), Ext1
96   // TODO: Evaluate whether that always results in lowest cost. Alternatively,
97   //       check the cost of creating a broadcast shuffle and shuffling both
98   //       operands to element 0.
99   int CheapExtractCost = std::min(Extract0Cost, Extract1Cost);
100 
101   // Extra uses of the extracts mean that we include those costs in the
102   // vector total because those instructions will not be eliminated.
103   int OldCost, NewCost;
104   if (Ext0->getOperand(0) == Ext1->getOperand(0) && Ext0Index == Ext1Index) {
105     // Handle a special case. If the 2 extracts are identical, adjust the
106     // formulas to account for that. The extra use charge allows for either the
107     // CSE'd pattern or an unoptimized form with identical values:
108     // opcode (extelt V, C), (extelt V, C) --> extelt (opcode V, V), C
109     bool HasUseTax = Ext0 == Ext1 ? !Ext0->hasNUses(2)
110                                   : !Ext0->hasOneUse() || !Ext1->hasOneUse();
111     OldCost = CheapExtractCost + ScalarOpCost;
112     NewCost = VectorOpCost + CheapExtractCost + HasUseTax * CheapExtractCost;
113   } else {
114     // Handle the general case. Each extract is actually a different value:
115     // opcode (extelt V0, C0), (extelt V1, C1) --> extelt (opcode V0, V1), C
116     OldCost = Extract0Cost + Extract1Cost + ScalarOpCost;
117     NewCost = VectorOpCost + CheapExtractCost +
118               !Ext0->hasOneUse() * Extract0Cost +
119               !Ext1->hasOneUse() * Extract1Cost;
120   }
121 
122   if (Ext0Index == Ext1Index) {
123     // If the extract indexes are identical, no shuffle is needed.
124     ConvertToShuffle = nullptr;
125   } else {
126     if (IsBinOp && DisableBinopExtractShuffle)
127       return true;
128 
129     // If we are extracting from 2 different indexes, then one operand must be
130     // shuffled before performing the vector operation. The shuffle mask is
131     // undefined except for 1 lane that is being translated to the remaining
132     // extraction lane. Therefore, it is a splat shuffle. Ex:
133     // ShufMask = { undef, undef, 0, undef }
134     // TODO: The cost model has an option for a "broadcast" shuffle
135     //       (splat-from-element-0), but no option for a more general splat.
136     NewCost +=
137         TTI.getShuffleCost(TargetTransformInfo::SK_PermuteSingleSrc, VecTy);
138 
139     // The more expensive extract will be replaced by a shuffle. If the costs
140     // are equal and there is a preferred extract index, shuffle the opposite
141     // operand. Otherwise, replace the extract with the higher index.
142     if (Extract0Cost > Extract1Cost)
143       ConvertToShuffle = Ext0;
144     else if (Extract1Cost > Extract0Cost)
145       ConvertToShuffle = Ext1;
146     else if (PreferredExtractIndex == Ext0Index)
147       ConvertToShuffle = Ext1;
148     else if (PreferredExtractIndex == Ext1Index)
149       ConvertToShuffle = Ext0;
150     else
151       ConvertToShuffle = Ext0Index > Ext1Index ? Ext0 : Ext1;
152   }
153 
154   // Aggressively form a vector op if the cost is equal because the transform
155   // may enable further optimization.
156   // Codegen can reverse this transform (scalarize) if it was not profitable.
157   return OldCost < NewCost;
158 }
159 
160 /// Try to reduce extract element costs by converting scalar compares to vector
161 /// compares followed by extract.
162 /// cmp (ext0 V0, C), (ext1 V1, C)
163 static void foldExtExtCmp(Instruction *Ext0, Instruction *Ext1,
164                           Instruction &I) {
165   assert(isa<CmpInst>(&I) && "Expected a compare");
166 
167   // cmp Pred (extelt V0, C), (extelt V1, C) --> extelt (cmp Pred V0, V1), C
168   ++NumVecCmp;
169   IRBuilder<> Builder(&I);
170   CmpInst::Predicate Pred = cast<CmpInst>(&I)->getPredicate();
171   Value *V0 = Ext0->getOperand(0), *V1 = Ext1->getOperand(0);
172   Value *VecCmp = Builder.CreateCmp(Pred, V0, V1);
173   Value *Extract = Builder.CreateExtractElement(VecCmp, Ext0->getOperand(1));
174   I.replaceAllUsesWith(Extract);
175 }
176 
177 /// Try to reduce extract element costs by converting scalar binops to vector
178 /// binops followed by extract.
179 /// bo (ext0 V0, C), (ext1 V1, C)
180 static void foldExtExtBinop(Instruction *Ext0, Instruction *Ext1,
181                             Instruction &I) {
182   assert(isa<BinaryOperator>(&I) && "Expected a binary operator");
183 
184   // bo (extelt V0, C), (extelt V1, C) --> extelt (bo V0, V1), C
185   ++NumVecBO;
186   IRBuilder<> Builder(&I);
187   Value *V0 = Ext0->getOperand(0), *V1 = Ext1->getOperand(0);
188   Value *VecBO =
189       Builder.CreateBinOp(cast<BinaryOperator>(&I)->getOpcode(), V0, V1);
190 
191   // All IR flags are safe to back-propagate because any potential poison
192   // created in unused vector elements is discarded by the extract.
193   if (auto *VecBOInst = dyn_cast<Instruction>(VecBO))
194     VecBOInst->copyIRFlags(&I);
195 
196   Value *Extract = Builder.CreateExtractElement(VecBO, Ext0->getOperand(1));
197   I.replaceAllUsesWith(Extract);
198 }
199 
200 /// Match an instruction with extracted vector operands.
201 static bool foldExtractExtract(Instruction &I, const TargetTransformInfo &TTI) {
202   // It is not safe to transform things like div, urem, etc. because we may
203   // create undefined behavior when executing those on unknown vector elements.
204   if (!isSafeToSpeculativelyExecute(&I))
205     return false;
206 
207   Instruction *Ext0, *Ext1;
208   CmpInst::Predicate Pred = CmpInst::BAD_ICMP_PREDICATE;
209   if (!match(&I, m_Cmp(Pred, m_Instruction(Ext0), m_Instruction(Ext1))) &&
210       !match(&I, m_BinOp(m_Instruction(Ext0), m_Instruction(Ext1))))
211     return false;
212 
213   Value *V0, *V1;
214   uint64_t C0, C1;
215   if (!match(Ext0, m_ExtractElt(m_Value(V0), m_ConstantInt(C0))) ||
216       !match(Ext1, m_ExtractElt(m_Value(V1), m_ConstantInt(C1))) ||
217       V0->getType() != V1->getType())
218     return false;
219 
220   // If the scalar value 'I' is going to be re-inserted into a vector, then try
221   // to create an extract to that same element. The extract/insert can be
222   // reduced to a "select shuffle".
223   // TODO: If we add a larger pattern match that starts from an insert, this
224   //       probably becomes unnecessary.
225   uint64_t InsertIndex = std::numeric_limits<uint64_t>::max();
226   if (I.hasOneUse())
227     match(I.user_back(),
228           m_InsertElt(m_Value(), m_Value(), m_ConstantInt(InsertIndex)));
229 
230   Instruction *ConvertToShuffle;
231   if (isExtractExtractCheap(Ext0, Ext1, I.getOpcode(), TTI, ConvertToShuffle,
232                             InsertIndex))
233     return false;
234 
235   if (ConvertToShuffle) {
236     // The shuffle mask is undefined except for 1 lane that is being translated
237     // to the cheap extraction lane. Example:
238     // ShufMask = { 2, undef, undef, undef }
239     uint64_t SplatIndex = ConvertToShuffle == Ext0 ? C0 : C1;
240     uint64_t CheapExtIndex = ConvertToShuffle == Ext0 ? C1 : C0;
241     auto *VecTy = cast<VectorType>(V0->getType());
242     SmallVector<int, 32> ShufMask(VecTy->getNumElements(), -1);
243     ShufMask[CheapExtIndex] = SplatIndex;
244     IRBuilder<> Builder(ConvertToShuffle);
245 
246     // extelt X, C --> extelt (splat X), C'
247     Value *Shuf = Builder.CreateShuffleVector(ConvertToShuffle->getOperand(0),
248                                               UndefValue::get(VecTy), ShufMask);
249     Value *NewExt = Builder.CreateExtractElement(Shuf, CheapExtIndex);
250     if (ConvertToShuffle == Ext0)
251       Ext0 = cast<Instruction>(NewExt);
252     else
253       Ext1 = cast<Instruction>(NewExt);
254   }
255 
256   if (Pred != CmpInst::BAD_ICMP_PREDICATE)
257     foldExtExtCmp(Ext0, Ext1, I);
258   else
259     foldExtExtBinop(Ext0, Ext1, I);
260 
261   return true;
262 }
263 
264 /// If this is a bitcast of a shuffle, try to bitcast the source vector to the
265 /// destination type followed by shuffle. This can enable further transforms by
266 /// moving bitcasts or shuffles together.
267 static bool foldBitcastShuf(Instruction &I, const TargetTransformInfo &TTI) {
268   Value *V;
269   ArrayRef<int> Mask;
270   if (!match(&I, m_BitCast(
271                      m_OneUse(m_Shuffle(m_Value(V), m_Undef(), m_Mask(Mask))))))
272     return false;
273 
274   // Disallow non-vector casts and length-changing shuffles.
275   // TODO: We could allow any shuffle.
276   auto *DestTy = dyn_cast<VectorType>(I.getType());
277   auto *SrcTy = cast<VectorType>(V->getType());
278   if (!DestTy || I.getOperand(0)->getType() != SrcTy)
279     return false;
280 
281   // The new shuffle must not cost more than the old shuffle. The bitcast is
282   // moved ahead of the shuffle, so assume that it has the same cost as before.
283   if (TTI.getShuffleCost(TargetTransformInfo::SK_PermuteSingleSrc, DestTy) >
284       TTI.getShuffleCost(TargetTransformInfo::SK_PermuteSingleSrc, SrcTy))
285     return false;
286 
287   unsigned DestNumElts = DestTy->getNumElements();
288   unsigned SrcNumElts = SrcTy->getNumElements();
289   SmallVector<int, 16> NewMask;
290   if (SrcNumElts <= DestNumElts) {
291     // The bitcast is from wide to narrow/equal elements. The shuffle mask can
292     // always be expanded to the equivalent form choosing narrower elements.
293     assert(DestNumElts % SrcNumElts == 0 && "Unexpected shuffle mask");
294     unsigned ScaleFactor = DestNumElts / SrcNumElts;
295     narrowShuffleMaskElts(ScaleFactor, Mask, NewMask);
296   } else {
297     // The bitcast is from narrow elements to wide elements. The shuffle mask
298     // must choose consecutive elements to allow casting first.
299     assert(SrcNumElts % DestNumElts == 0 && "Unexpected shuffle mask");
300     unsigned ScaleFactor = SrcNumElts / DestNumElts;
301     if (!widenShuffleMaskElts(ScaleFactor, Mask, NewMask))
302       return false;
303   }
304   // bitcast (shuf V, MaskC) --> shuf (bitcast V), MaskC'
305   ++NumShufOfBitcast;
306   IRBuilder<> Builder(&I);
307   Value *CastV = Builder.CreateBitCast(V, DestTy);
308   Value *Shuf =
309       Builder.CreateShuffleVector(CastV, UndefValue::get(DestTy), NewMask);
310   I.replaceAllUsesWith(Shuf);
311   return true;
312 }
313 
314 /// Match a vector binop or compare instruction with at least one inserted
315 /// scalar operand and convert to scalar binop/cmp followed by insertelement.
316 static bool scalarizeBinopOrCmp(Instruction &I,
317                                 const TargetTransformInfo &TTI) {
318   CmpInst::Predicate Pred = CmpInst::BAD_ICMP_PREDICATE;
319   Value *Ins0, *Ins1;
320   if (!match(&I, m_BinOp(m_Value(Ins0), m_Value(Ins1))) &&
321       !match(&I, m_Cmp(Pred, m_Value(Ins0), m_Value(Ins1))))
322     return false;
323 
324   // Do not convert the vector condition of a vector select into a scalar
325   // condition. That may cause problems for codegen because of differences in
326   // boolean formats and register-file transfers.
327   // TODO: Can we account for that in the cost model?
328   bool IsCmp = Pred != CmpInst::Predicate::BAD_ICMP_PREDICATE;
329   if (IsCmp)
330     for (User *U : I.users())
331       if (match(U, m_Select(m_Specific(&I), m_Value(), m_Value())))
332         return false;
333 
334   // Match against one or both scalar values being inserted into constant
335   // vectors:
336   // vec_op VecC0, (inselt VecC1, V1, Index)
337   // vec_op (inselt VecC0, V0, Index), VecC1
338   // vec_op (inselt VecC0, V0, Index), (inselt VecC1, V1, Index)
339   // TODO: Deal with mismatched index constants and variable indexes?
340   Constant *VecC0 = nullptr, *VecC1 = nullptr;
341   Value *V0 = nullptr, *V1 = nullptr;
342   uint64_t Index0 = 0, Index1 = 0;
343   if (!match(Ins0, m_InsertElt(m_Constant(VecC0), m_Value(V0),
344                                m_ConstantInt(Index0))) &&
345       !match(Ins0, m_Constant(VecC0)))
346     return false;
347   if (!match(Ins1, m_InsertElt(m_Constant(VecC1), m_Value(V1),
348                                m_ConstantInt(Index1))) &&
349       !match(Ins1, m_Constant(VecC1)))
350     return false;
351 
352   bool IsConst0 = !V0;
353   bool IsConst1 = !V1;
354   if (IsConst0 && IsConst1)
355     return false;
356   if (!IsConst0 && !IsConst1 && Index0 != Index1)
357     return false;
358 
359   // Bail for single insertion if it is a load.
360   // TODO: Handle this once getVectorInstrCost can cost for load/stores.
361   auto *I0 = dyn_cast_or_null<Instruction>(V0);
362   auto *I1 = dyn_cast_or_null<Instruction>(V1);
363   if ((IsConst0 && I1 && I1->mayReadFromMemory()) ||
364       (IsConst1 && I0 && I0->mayReadFromMemory()))
365     return false;
366 
367   uint64_t Index = IsConst0 ? Index1 : Index0;
368   Type *ScalarTy = IsConst0 ? V1->getType() : V0->getType();
369   Type *VecTy = I.getType();
370   assert(VecTy->isVectorTy() &&
371          (IsConst0 || IsConst1 || V0->getType() == V1->getType()) &&
372          (ScalarTy->isIntegerTy() || ScalarTy->isFloatingPointTy()) &&
373          "Unexpected types for insert into binop");
374 
375   unsigned Opcode = I.getOpcode();
376   int ScalarOpCost, VectorOpCost;
377   if (IsCmp) {
378     ScalarOpCost = TTI.getCmpSelInstrCost(Opcode, ScalarTy);
379     VectorOpCost = TTI.getCmpSelInstrCost(Opcode, VecTy);
380   } else {
381     ScalarOpCost = TTI.getArithmeticInstrCost(Opcode, ScalarTy);
382     VectorOpCost = TTI.getArithmeticInstrCost(Opcode, VecTy);
383   }
384 
385   // Get cost estimate for the insert element. This cost will factor into
386   // both sequences.
387   int InsertCost =
388       TTI.getVectorInstrCost(Instruction::InsertElement, VecTy, Index);
389   int OldCost = (IsConst0 ? 0 : InsertCost) + (IsConst1 ? 0 : InsertCost) +
390                 VectorOpCost;
391   int NewCost = ScalarOpCost + InsertCost +
392                 (IsConst0 ? 0 : !Ins0->hasOneUse() * InsertCost) +
393                 (IsConst1 ? 0 : !Ins1->hasOneUse() * InsertCost);
394 
395   // We want to scalarize unless the vector variant actually has lower cost.
396   if (OldCost < NewCost)
397     return false;
398 
399   // vec_op (inselt VecC0, V0, Index), (inselt VecC1, V1, Index) -->
400   // inselt NewVecC, (scalar_op V0, V1), Index
401   if (IsCmp)
402     ++NumScalarCmp;
403   else
404     ++NumScalarBO;
405 
406   // For constant cases, extract the scalar element, this should constant fold.
407   IRBuilder<> Builder(&I);
408   if (IsConst0)
409     V0 = ConstantExpr::getExtractElement(VecC0, Builder.getInt64(Index));
410   if (IsConst1)
411     V1 = ConstantExpr::getExtractElement(VecC1, Builder.getInt64(Index));
412 
413   Value *Scalar =
414       IsCmp ? Builder.CreateCmp(Pred, V0, V1)
415             : Builder.CreateBinOp((Instruction::BinaryOps)Opcode, V0, V1);
416 
417   Scalar->setName(I.getName() + ".scalar");
418 
419   // All IR flags are safe to back-propagate. There is no potential for extra
420   // poison to be created by the scalar instruction.
421   if (auto *ScalarInst = dyn_cast<Instruction>(Scalar))
422     ScalarInst->copyIRFlags(&I);
423 
424   // Fold the vector constants in the original vectors into a new base vector.
425   Constant *NewVecC = IsCmp ? ConstantExpr::getCompare(Pred, VecC0, VecC1)
426                             : ConstantExpr::get(Opcode, VecC0, VecC1);
427   Value *Insert = Builder.CreateInsertElement(NewVecC, Scalar, Index);
428   I.replaceAllUsesWith(Insert);
429   Insert->takeName(&I);
430   return true;
431 }
432 
433 /// This is the entry point for all transforms. Pass manager differences are
434 /// handled in the callers of this function.
435 static bool runImpl(Function &F, const TargetTransformInfo &TTI,
436                     const DominatorTree &DT) {
437   if (DisableVectorCombine)
438     return false;
439 
440   bool MadeChange = false;
441   for (BasicBlock &BB : F) {
442     // Ignore unreachable basic blocks.
443     if (!DT.isReachableFromEntry(&BB))
444       continue;
445     // Do not delete instructions under here and invalidate the iterator.
446     // Walk the block forwards to enable simple iterative chains of transforms.
447     // TODO: It could be more efficient to remove dead instructions
448     //       iteratively in this loop rather than waiting until the end.
449     for (Instruction &I : BB) {
450       if (isa<DbgInfoIntrinsic>(I))
451         continue;
452       MadeChange |= foldExtractExtract(I, TTI);
453       MadeChange |= foldBitcastShuf(I, TTI);
454       MadeChange |= scalarizeBinopOrCmp(I, TTI);
455     }
456   }
457 
458   // We're done with transforms, so remove dead instructions.
459   if (MadeChange)
460     for (BasicBlock &BB : F)
461       SimplifyInstructionsInBlock(&BB);
462 
463   return MadeChange;
464 }
465 
466 // Pass manager boilerplate below here.
467 
468 namespace {
469 class VectorCombineLegacyPass : public FunctionPass {
470 public:
471   static char ID;
472   VectorCombineLegacyPass() : FunctionPass(ID) {
473     initializeVectorCombineLegacyPassPass(*PassRegistry::getPassRegistry());
474   }
475 
476   void getAnalysisUsage(AnalysisUsage &AU) const override {
477     AU.addRequired<DominatorTreeWrapperPass>();
478     AU.addRequired<TargetTransformInfoWrapperPass>();
479     AU.setPreservesCFG();
480     AU.addPreserved<DominatorTreeWrapperPass>();
481     AU.addPreserved<GlobalsAAWrapperPass>();
482     AU.addPreserved<AAResultsWrapperPass>();
483     AU.addPreserved<BasicAAWrapperPass>();
484     FunctionPass::getAnalysisUsage(AU);
485   }
486 
487   bool runOnFunction(Function &F) override {
488     if (skipFunction(F))
489       return false;
490     auto &TTI = getAnalysis<TargetTransformInfoWrapperPass>().getTTI(F);
491     auto &DT = getAnalysis<DominatorTreeWrapperPass>().getDomTree();
492     return runImpl(F, TTI, DT);
493   }
494 };
495 } // namespace
496 
497 char VectorCombineLegacyPass::ID = 0;
498 INITIALIZE_PASS_BEGIN(VectorCombineLegacyPass, "vector-combine",
499                       "Optimize scalar/vector ops", false,
500                       false)
501 INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)
502 INITIALIZE_PASS_END(VectorCombineLegacyPass, "vector-combine",
503                     "Optimize scalar/vector ops", false, false)
504 Pass *llvm::createVectorCombinePass() {
505   return new VectorCombineLegacyPass();
506 }
507 
508 PreservedAnalyses VectorCombinePass::run(Function &F,
509                                          FunctionAnalysisManager &FAM) {
510   TargetTransformInfo &TTI = FAM.getResult<TargetIRAnalysis>(F);
511   DominatorTree &DT = FAM.getResult<DominatorTreeAnalysis>(F);
512   if (!runImpl(F, TTI, DT))
513     return PreservedAnalyses::all();
514   PreservedAnalyses PA;
515   PA.preserveSet<CFGAnalyses>();
516   PA.preserve<GlobalsAA>();
517   PA.preserve<AAManager>();
518   PA.preserve<BasicAA>();
519   return PA;
520 }
521