1 //===----- CodeGen/ExpandVectorPredication.cpp - Expand VP intrinsics -----===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This pass implements IR expansion for vector predication intrinsics, allowing
10 // targets to enable vector predication until just before codegen.
11 //
12 //===----------------------------------------------------------------------===//
13 
14 #include "llvm/CodeGen/ExpandVectorPredication.h"
15 #include "llvm/ADT/Statistic.h"
16 #include "llvm/Analysis/TargetTransformInfo.h"
17 #include "llvm/Analysis/ValueTracking.h"
18 #include "llvm/CodeGen/Passes.h"
19 #include "llvm/IR/Constants.h"
20 #include "llvm/IR/Function.h"
21 #include "llvm/IR/IRBuilder.h"
22 #include "llvm/IR/InstIterator.h"
23 #include "llvm/IR/Instructions.h"
24 #include "llvm/IR/IntrinsicInst.h"
25 #include "llvm/IR/Intrinsics.h"
26 #include "llvm/InitializePasses.h"
27 #include "llvm/Pass.h"
28 #include "llvm/Support/CommandLine.h"
29 #include "llvm/Support/Compiler.h"
30 #include "llvm/Support/Debug.h"
31 
32 using namespace llvm;
33 
34 using VPLegalization = TargetTransformInfo::VPLegalization;
35 using VPTransform = TargetTransformInfo::VPLegalization::VPTransform;
36 
37 // Keep this in sync with TargetTransformInfo::VPLegalization.
38 #define VPINTERNAL_VPLEGAL_CASES                                               \
39   VPINTERNAL_CASE(Legal)                                                       \
40   VPINTERNAL_CASE(Discard)                                                     \
41   VPINTERNAL_CASE(Convert)
42 
43 #define VPINTERNAL_CASE(X) "|" #X
44 
45 // Override options.
46 static cl::opt<std::string> EVLTransformOverride(
47     "expandvp-override-evl-transform", cl::init(""), cl::Hidden,
48     cl::desc("Options: <empty>" VPINTERNAL_VPLEGAL_CASES
49              ". If non-empty, ignore "
50              "TargetTransformInfo and "
51              "always use this transformation for the %evl parameter (Used in "
52              "testing)."));
53 
54 static cl::opt<std::string> MaskTransformOverride(
55     "expandvp-override-mask-transform", cl::init(""), cl::Hidden,
56     cl::desc("Options: <empty>" VPINTERNAL_VPLEGAL_CASES
57              ". If non-empty, Ignore "
58              "TargetTransformInfo and "
59              "always use this transformation for the %mask parameter (Used in "
60              "testing)."));
61 
62 #undef VPINTERNAL_CASE
63 #define VPINTERNAL_CASE(X) .Case(#X, VPLegalization::X)
64 
65 static VPTransform parseOverrideOption(const std::string &TextOpt) {
66   return StringSwitch<VPTransform>(TextOpt) VPINTERNAL_VPLEGAL_CASES;
67 }
68 
69 #undef VPINTERNAL_VPLEGAL_CASES
70 
71 // Whether any override options are set.
72 static bool anyExpandVPOverridesSet() {
73   return !EVLTransformOverride.empty() || !MaskTransformOverride.empty();
74 }
75 
76 #define DEBUG_TYPE "expandvp"
77 
78 STATISTIC(NumFoldedVL, "Number of folded vector length params");
79 STATISTIC(NumLoweredVPOps, "Number of folded vector predication operations");
80 
81 ///// Helpers {
82 
83 /// \returns Whether the vector mask \p MaskVal has all lane bits set.
84 static bool isAllTrueMask(Value *MaskVal) {
85   auto *ConstVec = dyn_cast<ConstantVector>(MaskVal);
86   return ConstVec && ConstVec->isAllOnesValue();
87 }
88 
89 /// \returns A non-excepting divisor constant for this type.
90 static Constant *getSafeDivisor(Type *DivTy) {
91   assert(DivTy->isIntOrIntVectorTy() && "Unsupported divisor type");
92   return ConstantInt::get(DivTy, 1u, false);
93 }
94 
95 /// Transfer operation properties from \p OldVPI to \p NewVal.
96 static void transferDecorations(Value &NewVal, VPIntrinsic &VPI) {
97   auto *NewInst = dyn_cast<Instruction>(&NewVal);
98   if (!NewInst || !isa<FPMathOperator>(NewVal))
99     return;
100 
101   auto *OldFMOp = dyn_cast<FPMathOperator>(&VPI);
102   if (!OldFMOp)
103     return;
104 
105   NewInst->setFastMathFlags(OldFMOp->getFastMathFlags());
106 }
107 
108 /// Transfer all properties from \p OldOp to \p NewOp and replace all uses.
109 /// OldVP gets erased.
110 static void replaceOperation(Value &NewOp, VPIntrinsic &OldOp) {
111   transferDecorations(NewOp, OldOp);
112   OldOp.replaceAllUsesWith(&NewOp);
113   OldOp.eraseFromParent();
114 }
115 
116 //// } Helpers
117 
118 namespace {
119 
120 // Expansion pass state at function scope.
121 struct CachingVPExpander {
122   Function &F;
123   const TargetTransformInfo &TTI;
124 
125   /// \returns A (fixed length) vector with ascending integer indices
126   /// (<0, 1, ..., NumElems-1>).
127   /// \p Builder
128   ///    Used for instruction creation.
129   /// \p LaneTy
130   ///    Integer element type of the result vector.
131   /// \p NumElems
132   ///    Number of vector elements.
133   Value *createStepVector(IRBuilder<> &Builder, Type *LaneTy,
134                           unsigned NumElems);
135 
136   /// \returns A bitmask that is true where the lane position is less-than \p
137   /// EVLParam
138   ///
139   /// \p Builder
140   ///    Used for instruction creation.
141   /// \p VLParam
142   ///    The explicit vector length parameter to test against the lane
143   ///    positions.
144   /// \p ElemCount
145   ///    Static (potentially scalable) number of vector elements.
146   Value *convertEVLToMask(IRBuilder<> &Builder, Value *EVLParam,
147                           ElementCount ElemCount);
148 
149   Value *foldEVLIntoMask(VPIntrinsic &VPI);
150 
151   /// "Remove" the %evl parameter of \p PI by setting it to the static vector
152   /// length of the operation.
153   void discardEVLParameter(VPIntrinsic &PI);
154 
155   /// \brief Lower this VP binary operator to a unpredicated binary operator.
156   Value *expandPredicationInBinaryOperator(IRBuilder<> &Builder,
157                                            VPIntrinsic &PI);
158 
159   /// \brief Lower this VP reduction to a call to an unpredicated reduction
160   /// intrinsic.
161   Value *expandPredicationInReduction(IRBuilder<> &Builder,
162                                       VPReductionIntrinsic &PI);
163 
164   /// \brief Query TTI and expand the vector predication in \p P accordingly.
165   Value *expandPredication(VPIntrinsic &PI);
166 
167   /// \brief  Determine how and whether the VPIntrinsic \p VPI shall be
168   /// expanded. This overrides TTI with the cl::opts listed at the top of this
169   /// file.
170   VPLegalization getVPLegalizationStrategy(const VPIntrinsic &VPI) const;
171   bool UsingTTIOverrides;
172 
173 public:
174   CachingVPExpander(Function &F, const TargetTransformInfo &TTI)
175       : F(F), TTI(TTI), UsingTTIOverrides(anyExpandVPOverridesSet()) {}
176 
177   bool expandVectorPredication();
178 };
179 
180 //// CachingVPExpander {
181 
182 Value *CachingVPExpander::createStepVector(IRBuilder<> &Builder, Type *LaneTy,
183                                            unsigned NumElems) {
184   // TODO add caching
185   SmallVector<Constant *, 16> ConstElems;
186 
187   for (unsigned Idx = 0; Idx < NumElems; ++Idx)
188     ConstElems.push_back(ConstantInt::get(LaneTy, Idx, false));
189 
190   return ConstantVector::get(ConstElems);
191 }
192 
193 Value *CachingVPExpander::convertEVLToMask(IRBuilder<> &Builder,
194                                            Value *EVLParam,
195                                            ElementCount ElemCount) {
196   // TODO add caching
197   // Scalable vector %evl conversion.
198   if (ElemCount.isScalable()) {
199     auto *M = Builder.GetInsertBlock()->getModule();
200     Type *BoolVecTy = VectorType::get(Builder.getInt1Ty(), ElemCount);
201     Function *ActiveMaskFunc = Intrinsic::getDeclaration(
202         M, Intrinsic::get_active_lane_mask, {BoolVecTy, EVLParam->getType()});
203     // `get_active_lane_mask` performs an implicit less-than comparison.
204     Value *ConstZero = Builder.getInt32(0);
205     return Builder.CreateCall(ActiveMaskFunc, {ConstZero, EVLParam});
206   }
207 
208   // Fixed vector %evl conversion.
209   Type *LaneTy = EVLParam->getType();
210   unsigned NumElems = ElemCount.getFixedValue();
211   Value *VLSplat = Builder.CreateVectorSplat(NumElems, EVLParam);
212   Value *IdxVec = createStepVector(Builder, LaneTy, NumElems);
213   return Builder.CreateICmp(CmpInst::ICMP_ULT, IdxVec, VLSplat);
214 }
215 
216 Value *
217 CachingVPExpander::expandPredicationInBinaryOperator(IRBuilder<> &Builder,
218                                                      VPIntrinsic &VPI) {
219   assert((isSafeToSpeculativelyExecute(&VPI) ||
220           VPI.canIgnoreVectorLengthParam()) &&
221          "Implicitly dropping %evl in non-speculatable operator!");
222 
223   auto OC = static_cast<Instruction::BinaryOps>(*VPI.getFunctionalOpcode());
224   assert(Instruction::isBinaryOp(OC));
225 
226   Value *Op0 = VPI.getOperand(0);
227   Value *Op1 = VPI.getOperand(1);
228   Value *Mask = VPI.getMaskParam();
229 
230   // Blend in safe operands.
231   if (Mask && !isAllTrueMask(Mask)) {
232     switch (OC) {
233     default:
234       // Can safely ignore the predicate.
235       break;
236 
237     // Division operators need a safe divisor on masked-off lanes (1).
238     case Instruction::UDiv:
239     case Instruction::SDiv:
240     case Instruction::URem:
241     case Instruction::SRem:
242       // 2nd operand must not be zero.
243       Value *SafeDivisor = getSafeDivisor(VPI.getType());
244       Op1 = Builder.CreateSelect(Mask, Op1, SafeDivisor);
245     }
246   }
247 
248   Value *NewBinOp = Builder.CreateBinOp(OC, Op0, Op1, VPI.getName());
249 
250   replaceOperation(*NewBinOp, VPI);
251   return NewBinOp;
252 }
253 
254 static Value *getNeutralReductionElement(const VPReductionIntrinsic &VPI,
255                                          Type *EltTy) {
256   bool Negative = false;
257   unsigned EltBits = EltTy->getScalarSizeInBits();
258   switch (VPI.getIntrinsicID()) {
259   default:
260     llvm_unreachable("Expecting a VP reduction intrinsic");
261   case Intrinsic::vp_reduce_add:
262   case Intrinsic::vp_reduce_or:
263   case Intrinsic::vp_reduce_xor:
264   case Intrinsic::vp_reduce_umax:
265     return Constant::getNullValue(EltTy);
266   case Intrinsic::vp_reduce_mul:
267     return ConstantInt::get(EltTy, 1, /*IsSigned*/ false);
268   case Intrinsic::vp_reduce_and:
269   case Intrinsic::vp_reduce_umin:
270     return ConstantInt::getAllOnesValue(EltTy);
271   case Intrinsic::vp_reduce_smin:
272     return ConstantInt::get(EltTy->getContext(),
273                             APInt::getSignedMaxValue(EltBits));
274   case Intrinsic::vp_reduce_smax:
275     return ConstantInt::get(EltTy->getContext(),
276                             APInt::getSignedMinValue(EltBits));
277   case Intrinsic::vp_reduce_fmax:
278     Negative = true;
279     LLVM_FALLTHROUGH;
280   case Intrinsic::vp_reduce_fmin: {
281     FastMathFlags Flags = VPI.getFastMathFlags();
282     const fltSemantics &Semantics = EltTy->getFltSemantics();
283     return !Flags.noNaNs() ? ConstantFP::getQNaN(EltTy, Negative)
284            : !Flags.noInfs()
285                ? ConstantFP::getInfinity(EltTy, Negative)
286                : ConstantFP::get(EltTy,
287                                  APFloat::getLargest(Semantics, Negative));
288   }
289   case Intrinsic::vp_reduce_fadd:
290     return ConstantFP::getNegativeZero(EltTy);
291   case Intrinsic::vp_reduce_fmul:
292     return ConstantFP::get(EltTy, 1.0);
293   }
294 }
295 
296 Value *
297 CachingVPExpander::expandPredicationInReduction(IRBuilder<> &Builder,
298                                                 VPReductionIntrinsic &VPI) {
299   assert((isSafeToSpeculativelyExecute(&VPI) ||
300           VPI.canIgnoreVectorLengthParam()) &&
301          "Implicitly dropping %evl in non-speculatable operator!");
302 
303   Value *Mask = VPI.getMaskParam();
304   Value *RedOp = VPI.getOperand(VPI.getVectorParamPos());
305 
306   // Insert neutral element in masked-out positions
307   if (Mask && !isAllTrueMask(Mask)) {
308     auto *NeutralElt = getNeutralReductionElement(VPI, VPI.getType());
309     auto *NeutralVector = Builder.CreateVectorSplat(
310         cast<VectorType>(RedOp->getType())->getElementCount(), NeutralElt);
311     RedOp = Builder.CreateSelect(Mask, RedOp, NeutralVector);
312   }
313 
314   Value *Reduction;
315   Value *Start = VPI.getOperand(VPI.getStartParamPos());
316 
317   switch (VPI.getIntrinsicID()) {
318   default:
319     llvm_unreachable("Impossible reduction kind");
320   case Intrinsic::vp_reduce_add:
321     Reduction = Builder.CreateAddReduce(RedOp);
322     Reduction = Builder.CreateAdd(Reduction, Start);
323     break;
324   case Intrinsic::vp_reduce_mul:
325     Reduction = Builder.CreateMulReduce(RedOp);
326     Reduction = Builder.CreateMul(Reduction, Start);
327     break;
328   case Intrinsic::vp_reduce_and:
329     Reduction = Builder.CreateAndReduce(RedOp);
330     Reduction = Builder.CreateAnd(Reduction, Start);
331     break;
332   case Intrinsic::vp_reduce_or:
333     Reduction = Builder.CreateOrReduce(RedOp);
334     Reduction = Builder.CreateOr(Reduction, Start);
335     break;
336   case Intrinsic::vp_reduce_xor:
337     Reduction = Builder.CreateXorReduce(RedOp);
338     Reduction = Builder.CreateXor(Reduction, Start);
339     break;
340   case Intrinsic::vp_reduce_smax:
341     Reduction = Builder.CreateIntMaxReduce(RedOp, /*IsSigned*/ true);
342     Reduction =
343         Builder.CreateBinaryIntrinsic(Intrinsic::smax, Reduction, Start);
344     break;
345   case Intrinsic::vp_reduce_smin:
346     Reduction = Builder.CreateIntMinReduce(RedOp, /*IsSigned*/ true);
347     Reduction =
348         Builder.CreateBinaryIntrinsic(Intrinsic::smin, Reduction, Start);
349     break;
350   case Intrinsic::vp_reduce_umax:
351     Reduction = Builder.CreateIntMaxReduce(RedOp, /*IsSigned*/ false);
352     Reduction =
353         Builder.CreateBinaryIntrinsic(Intrinsic::umax, Reduction, Start);
354     break;
355   case Intrinsic::vp_reduce_umin:
356     Reduction = Builder.CreateIntMinReduce(RedOp, /*IsSigned*/ false);
357     Reduction =
358         Builder.CreateBinaryIntrinsic(Intrinsic::umin, Reduction, Start);
359     break;
360   case Intrinsic::vp_reduce_fmax:
361     Reduction = Builder.CreateFPMaxReduce(RedOp);
362     transferDecorations(*Reduction, VPI);
363     Reduction =
364         Builder.CreateBinaryIntrinsic(Intrinsic::maxnum, Reduction, Start);
365     break;
366   case Intrinsic::vp_reduce_fmin:
367     Reduction = Builder.CreateFPMinReduce(RedOp);
368     transferDecorations(*Reduction, VPI);
369     Reduction =
370         Builder.CreateBinaryIntrinsic(Intrinsic::minnum, Reduction, Start);
371     break;
372   case Intrinsic::vp_reduce_fadd:
373     Reduction = Builder.CreateFAddReduce(Start, RedOp);
374     break;
375   case Intrinsic::vp_reduce_fmul:
376     Reduction = Builder.CreateFMulReduce(Start, RedOp);
377     break;
378   }
379 
380   replaceOperation(*Reduction, VPI);
381   return Reduction;
382 }
383 
384 void CachingVPExpander::discardEVLParameter(VPIntrinsic &VPI) {
385   LLVM_DEBUG(dbgs() << "Discard EVL parameter in " << VPI << "\n");
386 
387   if (VPI.canIgnoreVectorLengthParam())
388     return;
389 
390   Value *EVLParam = VPI.getVectorLengthParam();
391   if (!EVLParam)
392     return;
393 
394   ElementCount StaticElemCount = VPI.getStaticVectorLength();
395   Value *MaxEVL = nullptr;
396   Type *Int32Ty = Type::getInt32Ty(VPI.getContext());
397   if (StaticElemCount.isScalable()) {
398     // TODO add caching
399     auto *M = VPI.getModule();
400     Function *VScaleFunc =
401         Intrinsic::getDeclaration(M, Intrinsic::vscale, Int32Ty);
402     IRBuilder<> Builder(VPI.getParent(), VPI.getIterator());
403     Value *FactorConst = Builder.getInt32(StaticElemCount.getKnownMinValue());
404     Value *VScale = Builder.CreateCall(VScaleFunc, {}, "vscale");
405     MaxEVL = Builder.CreateMul(VScale, FactorConst, "scalable_size",
406                                /*NUW*/ true, /*NSW*/ false);
407   } else {
408     MaxEVL = ConstantInt::get(Int32Ty, StaticElemCount.getFixedValue(), false);
409   }
410   VPI.setVectorLengthParam(MaxEVL);
411 }
412 
413 Value *CachingVPExpander::foldEVLIntoMask(VPIntrinsic &VPI) {
414   LLVM_DEBUG(dbgs() << "Folding vlen for " << VPI << '\n');
415 
416   IRBuilder<> Builder(&VPI);
417 
418   // Ineffective %evl parameter and so nothing to do here.
419   if (VPI.canIgnoreVectorLengthParam())
420     return &VPI;
421 
422   // Only VP intrinsics can have an %evl parameter.
423   Value *OldMaskParam = VPI.getMaskParam();
424   Value *OldEVLParam = VPI.getVectorLengthParam();
425   assert(OldMaskParam && "no mask param to fold the vl param into");
426   assert(OldEVLParam && "no EVL param to fold away");
427 
428   LLVM_DEBUG(dbgs() << "OLD evl: " << *OldEVLParam << '\n');
429   LLVM_DEBUG(dbgs() << "OLD mask: " << *OldMaskParam << '\n');
430 
431   // Convert the %evl predication into vector mask predication.
432   ElementCount ElemCount = VPI.getStaticVectorLength();
433   Value *VLMask = convertEVLToMask(Builder, OldEVLParam, ElemCount);
434   Value *NewMaskParam = Builder.CreateAnd(VLMask, OldMaskParam);
435   VPI.setMaskParam(NewMaskParam);
436 
437   // Drop the %evl parameter.
438   discardEVLParameter(VPI);
439   assert(VPI.canIgnoreVectorLengthParam() &&
440          "transformation did not render the evl param ineffective!");
441 
442   // Reassess the modified instruction.
443   return &VPI;
444 }
445 
446 Value *CachingVPExpander::expandPredication(VPIntrinsic &VPI) {
447   LLVM_DEBUG(dbgs() << "Lowering to unpredicated op: " << VPI << '\n');
448 
449   IRBuilder<> Builder(&VPI);
450 
451   // Try lowering to a LLVM instruction first.
452   auto OC = VPI.getFunctionalOpcode();
453 
454   if (OC && Instruction::isBinaryOp(*OC))
455     return expandPredicationInBinaryOperator(Builder, VPI);
456 
457   if (auto *VPRI = dyn_cast<VPReductionIntrinsic>(&VPI))
458     return expandPredicationInReduction(Builder, *VPRI);
459 
460   return &VPI;
461 }
462 
463 //// } CachingVPExpander
464 
465 struct TransformJob {
466   VPIntrinsic *PI;
467   TargetTransformInfo::VPLegalization Strategy;
468   TransformJob(VPIntrinsic *PI, TargetTransformInfo::VPLegalization InitStrat)
469       : PI(PI), Strategy(InitStrat) {}
470 
471   bool isDone() const { return Strategy.shouldDoNothing(); }
472 };
473 
474 void sanitizeStrategy(Instruction &I, VPLegalization &LegalizeStrat) {
475   // Speculatable instructions do not strictly need predication.
476   if (isSafeToSpeculativelyExecute(&I)) {
477     // Converting a speculatable VP intrinsic means dropping %mask and %evl.
478     // No need to expand %evl into the %mask only to ignore that code.
479     if (LegalizeStrat.OpStrategy == VPLegalization::Convert)
480       LegalizeStrat.EVLParamStrategy = VPLegalization::Discard;
481     return;
482   }
483 
484   // We have to preserve the predicating effect of %evl for this
485   // non-speculatable VP intrinsic.
486   // 1) Never discard %evl.
487   // 2) If this VP intrinsic will be expanded to non-VP code, make sure that
488   //    %evl gets folded into %mask.
489   if ((LegalizeStrat.EVLParamStrategy == VPLegalization::Discard) ||
490       (LegalizeStrat.OpStrategy == VPLegalization::Convert)) {
491     LegalizeStrat.EVLParamStrategy = VPLegalization::Convert;
492   }
493 }
494 
495 VPLegalization
496 CachingVPExpander::getVPLegalizationStrategy(const VPIntrinsic &VPI) const {
497   auto VPStrat = TTI.getVPLegalizationStrategy(VPI);
498   if (LLVM_LIKELY(!UsingTTIOverrides)) {
499     // No overrides - we are in production.
500     return VPStrat;
501   }
502 
503   // Overrides set - we are in testing, the following does not need to be
504   // efficient.
505   VPStrat.EVLParamStrategy = parseOverrideOption(EVLTransformOverride);
506   VPStrat.OpStrategy = parseOverrideOption(MaskTransformOverride);
507   return VPStrat;
508 }
509 
510 /// \brief Expand llvm.vp.* intrinsics as requested by \p TTI.
511 bool CachingVPExpander::expandVectorPredication() {
512   SmallVector<TransformJob, 16> Worklist;
513 
514   // Collect all VPIntrinsics that need expansion and determine their expansion
515   // strategy.
516   for (auto &I : instructions(F)) {
517     auto *VPI = dyn_cast<VPIntrinsic>(&I);
518     if (!VPI)
519       continue;
520     auto VPStrat = getVPLegalizationStrategy(*VPI);
521     sanitizeStrategy(I, VPStrat);
522     if (!VPStrat.shouldDoNothing())
523       Worklist.emplace_back(VPI, VPStrat);
524   }
525   if (Worklist.empty())
526     return false;
527 
528   // Transform all VPIntrinsics on the worklist.
529   LLVM_DEBUG(dbgs() << "\n:::: Transforming " << Worklist.size()
530                     << " instructions ::::\n");
531   for (TransformJob Job : Worklist) {
532     // Transform the EVL parameter.
533     switch (Job.Strategy.EVLParamStrategy) {
534     case VPLegalization::Legal:
535       break;
536     case VPLegalization::Discard:
537       discardEVLParameter(*Job.PI);
538       break;
539     case VPLegalization::Convert:
540       if (foldEVLIntoMask(*Job.PI))
541         ++NumFoldedVL;
542       break;
543     }
544     Job.Strategy.EVLParamStrategy = VPLegalization::Legal;
545 
546     // Replace with a non-predicated operation.
547     switch (Job.Strategy.OpStrategy) {
548     case VPLegalization::Legal:
549       break;
550     case VPLegalization::Discard:
551       llvm_unreachable("Invalid strategy for operators.");
552     case VPLegalization::Convert:
553       expandPredication(*Job.PI);
554       ++NumLoweredVPOps;
555       break;
556     }
557     Job.Strategy.OpStrategy = VPLegalization::Legal;
558 
559     assert(Job.isDone() && "incomplete transformation");
560   }
561 
562   return true;
563 }
564 class ExpandVectorPredication : public FunctionPass {
565 public:
566   static char ID;
567   ExpandVectorPredication() : FunctionPass(ID) {
568     initializeExpandVectorPredicationPass(*PassRegistry::getPassRegistry());
569   }
570 
571   bool runOnFunction(Function &F) override {
572     const auto *TTI = &getAnalysis<TargetTransformInfoWrapperPass>().getTTI(F);
573     CachingVPExpander VPExpander(F, *TTI);
574     return VPExpander.expandVectorPredication();
575   }
576 
577   void getAnalysisUsage(AnalysisUsage &AU) const override {
578     AU.addRequired<TargetTransformInfoWrapperPass>();
579     AU.setPreservesCFG();
580   }
581 };
582 } // namespace
583 
584 char ExpandVectorPredication::ID;
585 INITIALIZE_PASS_BEGIN(ExpandVectorPredication, "expandvp",
586                       "Expand vector predication intrinsics", false, false)
587 INITIALIZE_PASS_DEPENDENCY(TargetTransformInfoWrapperPass)
588 INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)
589 INITIALIZE_PASS_END(ExpandVectorPredication, "expandvp",
590                     "Expand vector predication intrinsics", false, false)
591 
592 FunctionPass *llvm::createExpandVectorPredicationPass() {
593   return new ExpandVectorPredication();
594 }
595 
596 PreservedAnalyses
597 ExpandVectorPredicationPass::run(Function &F, FunctionAnalysisManager &AM) {
598   const auto &TTI = AM.getResult<TargetIRAnalysis>(F);
599   CachingVPExpander VPExpander(F, TTI);
600   if (!VPExpander.expandVectorPredication())
601     return PreservedAnalyses::all();
602   PreservedAnalyses PA;
603   PA.preserveSet<CFGAnalyses>();
604   return PA;
605 }
606