1 //===- MVETailPredication.cpp - MVE Tail Predication ------------*- C++ -*-===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 /// \file
10 /// Armv8.1m introduced MVE, M-Profile Vector Extension, and low-overhead
11 /// branches to help accelerate DSP applications. These two extensions,
12 /// combined with a new form of predication called tail-predication, can be used
13 /// to provide implicit vector predication within a low-overhead loop.
14 /// This is implicit because the predicate of active/inactive lanes is
15 /// calculated by hardware, and thus does not need to be explicitly passed
16 /// to vector instructions. The instructions responsible for this are the
17 /// DLSTP and WLSTP instructions, which setup a tail-predicated loop and the
18 /// the total number of data elements processed by the loop. The loop-end
19 /// LETP instruction is responsible for decrementing and setting the remaining
20 /// elements to be processed and generating the mask of active lanes.
21 ///
22 /// The HardwareLoops pass inserts intrinsics identifying loops that the
23 /// backend will attempt to convert into a low-overhead loop. The vectorizer is
24 /// responsible for generating a vectorized loop in which the lanes are
25 /// predicated upon the iteration counter. This pass looks at these predicated
26 /// vector loops, that are targets for low-overhead loops, and prepares it for
27 /// code generation. Once the vectorizer has produced a masked loop, there's a
28 /// couple of final forms:
29 /// - A tail-predicated loop, with implicit predication.
30 /// - A loop containing multiple VCPT instructions, predicating multiple VPT
31 ///   blocks of instructions operating on different vector types.
32 ///
33 /// This pass:
34 /// 1) Checks if the predicates of the masked load/store instructions are
35 ///    generated by intrinsic @llvm.get.active.lanes(). This intrinsic consumes
36 ///    the Backedge Taken Count (BTC) of the scalar loop as its second argument,
37 ///    which we extract to set up the number of elements processed by the loop.
38 /// 2) Intrinsic @llvm.get.active.lanes() is then replaced by the MVE target
39 ///    specific VCTP intrinsic to represent the effect of tail predication.
40 ///    This will be picked up by the ARM Low-overhead loop pass, which performs
41 ///    the final transformation to a DLSTP or WLSTP tail-predicated loop.
42 
43 #include "ARM.h"
44 #include "ARMSubtarget.h"
45 #include "llvm/Analysis/LoopInfo.h"
46 #include "llvm/Analysis/LoopPass.h"
47 #include "llvm/Analysis/ScalarEvolution.h"
48 #include "llvm/Analysis/ScalarEvolutionExpressions.h"
49 #include "llvm/Analysis/TargetLibraryInfo.h"
50 #include "llvm/Analysis/TargetTransformInfo.h"
51 #include "llvm/CodeGen/TargetPassConfig.h"
52 #include "llvm/IR/IRBuilder.h"
53 #include "llvm/IR/Instructions.h"
54 #include "llvm/IR/IntrinsicsARM.h"
55 #include "llvm/IR/PatternMatch.h"
56 #include "llvm/InitializePasses.h"
57 #include "llvm/Support/Debug.h"
58 #include "llvm/Transforms/Utils/BasicBlockUtils.h"
59 #include "llvm/Transforms/Utils/LoopUtils.h"
60 #include "llvm/Transforms/Utils/ScalarEvolutionExpander.h"
61 
62 using namespace llvm;
63 
64 #define DEBUG_TYPE "mve-tail-predication"
65 #define DESC "Transform predicated vector loops to use MVE tail predication"
66 
67 static cl::opt<bool>
68 ForceTailPredication("force-mve-tail-predication", cl::Hidden, cl::init(false),
69                      cl::desc("Force MVE tail-predication even if it might be "
70                               "unsafe (e.g. possible overflow in loop "
71                               "counters)"));
72 
73 cl::opt<bool>
74 DisableTailPredication("disable-mve-tail-predication", cl::Hidden,
75                        cl::init(true),
76                        cl::desc("Disable MVE Tail Predication"));
77 namespace {
78 
79 class MVETailPredication : public LoopPass {
80   SmallVector<IntrinsicInst*, 4> MaskedInsts;
81   Loop *L = nullptr;
82   LoopInfo *LI = nullptr;
83   const DataLayout *DL;
84   DominatorTree *DT = nullptr;
85   ScalarEvolution *SE = nullptr;
86   TargetTransformInfo *TTI = nullptr;
87   TargetLibraryInfo *TLI = nullptr;
88   bool ClonedVCTPInExitBlock = false;
89 
90 public:
91   static char ID;
92 
93   MVETailPredication() : LoopPass(ID) { }
94 
95   void getAnalysisUsage(AnalysisUsage &AU) const override {
96     AU.addRequired<ScalarEvolutionWrapperPass>();
97     AU.addRequired<LoopInfoWrapperPass>();
98     AU.addRequired<TargetPassConfig>();
99     AU.addRequired<TargetTransformInfoWrapperPass>();
100     AU.addRequired<DominatorTreeWrapperPass>();
101     AU.addRequired<TargetLibraryInfoWrapperPass>();
102     AU.addPreserved<LoopInfoWrapperPass>();
103     AU.setPreservesCFG();
104   }
105 
106   bool runOnLoop(Loop *L, LPPassManager&) override;
107 
108 private:
109   /// Perform the relevant checks on the loop and convert if possible.
110   bool TryConvert(Value *TripCount);
111 
112   /// Return whether this is a vectorized loop, that contains masked
113   /// load/stores.
114   bool IsPredicatedVectorLoop();
115 
116   /// Perform checks on the arguments of @llvm.get.active.lane.mask
117   /// intrinsic: check if the first is a loop induction variable, and for the
118   /// the second check that no overflow can occur in the expression that use
119   /// this backedge-taken count.
120   bool IsSafeActiveMask(IntrinsicInst *ActiveLaneMask, Value *TripCount,
121                         FixedVectorType *VecTy);
122 
123   /// Insert the intrinsic to represent the effect of tail predication.
124   void InsertVCTPIntrinsic(IntrinsicInst *ActiveLaneMask, Value *TripCount,
125                            FixedVectorType *VecTy,
126                            DenseMap<Instruction *, Instruction *> &NewPredicates);
127 
128   /// Rematerialize the iteration count in exit blocks, which enables
129   /// ARMLowOverheadLoops to better optimise away loop update statements inside
130   /// hardware-loops.
131   void RematerializeIterCount();
132 };
133 
134 } // end namespace
135 
136 static bool IsDecrement(Instruction &I) {
137   auto *Call = dyn_cast<IntrinsicInst>(&I);
138   if (!Call)
139     return false;
140 
141   Intrinsic::ID ID = Call->getIntrinsicID();
142   return ID == Intrinsic::loop_decrement_reg;
143 }
144 
145 static bool IsMasked(Instruction *I) {
146   auto *Call = dyn_cast<IntrinsicInst>(I);
147   if (!Call)
148     return false;
149 
150   Intrinsic::ID ID = Call->getIntrinsicID();
151   // TODO: Support gather/scatter expand/compress operations.
152   return ID == Intrinsic::masked_store || ID == Intrinsic::masked_load;
153 }
154 
155 void MVETailPredication::RematerializeIterCount() {
156   SmallVector<WeakTrackingVH, 16> DeadInsts;
157   SCEVExpander Rewriter(*SE, *DL, "mvetp");
158   ReplaceExitVal ReplaceExitValue = AlwaysRepl;
159 
160   formLCSSARecursively(*L, *DT, LI, SE);
161   rewriteLoopExitValues(L, LI, TLI, SE, TTI, Rewriter, DT, ReplaceExitValue,
162                         DeadInsts);
163 }
164 
165 bool MVETailPredication::runOnLoop(Loop *L, LPPassManager&) {
166   if (skipLoop(L) || DisableTailPredication)
167     return false;
168 
169   MaskedInsts.clear();
170   Function &F = *L->getHeader()->getParent();
171   auto &TPC = getAnalysis<TargetPassConfig>();
172   auto &TM = TPC.getTM<TargetMachine>();
173   auto *ST = &TM.getSubtarget<ARMSubtarget>(F);
174   DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree();
175   LI = &getAnalysis<LoopInfoWrapperPass>().getLoopInfo();
176   TTI = &getAnalysis<TargetTransformInfoWrapperPass>().getTTI(F);
177   SE = &getAnalysis<ScalarEvolutionWrapperPass>().getSE();
178   auto *TLIP = getAnalysisIfAvailable<TargetLibraryInfoWrapperPass>();
179   TLI = TLIP ? &TLIP->getTLI(*L->getHeader()->getParent()) : nullptr;
180   DL = &L->getHeader()->getModule()->getDataLayout();
181   this->L = L;
182 
183   // The MVE and LOB extensions are combined to enable tail-predication, but
184   // there's nothing preventing us from generating VCTP instructions for v8.1m.
185   if (!ST->hasMVEIntegerOps() || !ST->hasV8_1MMainlineOps()) {
186     LLVM_DEBUG(dbgs() << "ARM TP: Not a v8.1m.main+mve target.\n");
187     return false;
188   }
189 
190   BasicBlock *Preheader = L->getLoopPreheader();
191   if (!Preheader)
192     return false;
193 
194   auto FindLoopIterations = [](BasicBlock *BB) -> IntrinsicInst* {
195     for (auto &I : *BB) {
196       auto *Call = dyn_cast<IntrinsicInst>(&I);
197       if (!Call)
198         continue;
199 
200       Intrinsic::ID ID = Call->getIntrinsicID();
201       if (ID == Intrinsic::set_loop_iterations ||
202           ID == Intrinsic::test_set_loop_iterations)
203         return cast<IntrinsicInst>(&I);
204     }
205     return nullptr;
206   };
207 
208   // Look for the hardware loop intrinsic that sets the iteration count.
209   IntrinsicInst *Setup = FindLoopIterations(Preheader);
210 
211   // The test.set iteration could live in the pre-preheader.
212   if (!Setup) {
213     if (!Preheader->getSinglePredecessor())
214       return false;
215     Setup = FindLoopIterations(Preheader->getSinglePredecessor());
216     if (!Setup)
217       return false;
218   }
219 
220   // Search for the hardware loop intrinic that decrements the loop counter.
221   IntrinsicInst *Decrement = nullptr;
222   for (auto *BB : L->getBlocks()) {
223     for (auto &I : *BB) {
224       if (IsDecrement(I)) {
225         Decrement = cast<IntrinsicInst>(&I);
226         break;
227       }
228     }
229   }
230 
231   if (!Decrement)
232     return false;
233 
234   ClonedVCTPInExitBlock = false;
235   LLVM_DEBUG(dbgs() << "ARM TP: Running on Loop: " << *L << *Setup << "\n"
236              << *Decrement << "\n");
237 
238   if (!TryConvert(Setup->getArgOperand(0))) {
239     LLVM_DEBUG(dbgs() << "ARM TP: Can't tail-predicate this loop.\n");
240     return false;
241   }
242 
243   if (ClonedVCTPInExitBlock)
244     RematerializeIterCount();
245   return true;
246 }
247 
248 static FixedVectorType *getVectorType(IntrinsicInst *I) {
249   unsigned TypeOp = I->getIntrinsicID() == Intrinsic::masked_load ? 0 : 1;
250   auto *PtrTy = cast<PointerType>(I->getOperand(TypeOp)->getType());
251   auto *VecTy = cast<FixedVectorType>(PtrTy->getElementType());
252   assert(VecTy && "No scalable vectors expected here");
253   return VecTy;
254 }
255 
256 bool MVETailPredication::IsPredicatedVectorLoop() {
257   // Check that the loop contains at least one masked load/store intrinsic.
258   // We only support 'normal' vector instructions - other than masked
259   // load/stores.
260   bool ActiveLaneMask = false;
261   for (auto *BB : L->getBlocks()) {
262     for (auto &I : *BB) {
263       auto *Int = dyn_cast<IntrinsicInst>(&I);
264       if (!Int)
265         continue;
266 
267       switch (Int->getIntrinsicID()) {
268       case Intrinsic::get_active_lane_mask:
269         ActiveLaneMask = true;
270         LLVM_FALLTHROUGH;
271       case Intrinsic::fma:
272       case Intrinsic::sadd_sat:
273       case Intrinsic::uadd_sat:
274         continue;
275       default:
276         break;
277       }
278 
279       if (IsMasked(&I)) {
280         auto *VecTy = getVectorType(Int);
281         unsigned Lanes = VecTy->getNumElements();
282         unsigned ElementWidth = VecTy->getScalarSizeInBits();
283         // MVE vectors are 128-bit, but don't support 128 x i1.
284         // TODO: Can we support vectors larger than 128-bits?
285         unsigned MaxWidth = TTI->getRegisterBitWidth(true);
286         if (Lanes * ElementWidth > MaxWidth || Lanes == MaxWidth)
287           return false;
288         MaskedInsts.push_back(cast<IntrinsicInst>(&I));
289         continue;
290       }
291 
292       for (const Use &U : Int->args()) {
293         if (isa<VectorType>(U->getType()))
294           return false;
295       }
296     }
297   }
298 
299   if (!ActiveLaneMask) {
300     LLVM_DEBUG(dbgs() << "ARM TP: No get.active.lane.mask intrinsic found.\n");
301     return false;
302   }
303   return !MaskedInsts.empty();
304 }
305 
306 // Look through the exit block to see whether there's a duplicate predicate
307 // instruction. This can happen when we need to perform a select on values
308 // from the last and previous iteration. Instead of doing a straight
309 // replacement of that predicate with the vctp, clone the vctp and place it
310 // in the block. This means that the VPR doesn't have to be live into the
311 // exit block which should make it easier to convert this loop into a proper
312 // tail predicated loop.
313 static bool Cleanup(DenseMap<Instruction*, Instruction*> &NewPredicates,
314                     SetVector<Instruction*> &MaybeDead, Loop *L) {
315   BasicBlock *Exit = L->getUniqueExitBlock();
316   if (!Exit) {
317     LLVM_DEBUG(dbgs() << "ARM TP: can't find loop exit block\n");
318     return false;
319   }
320 
321   bool ClonedVCTPInExitBlock = false;
322 
323   for (auto &Pair : NewPredicates) {
324     Instruction *OldPred = Pair.first;
325     Instruction *NewPred = Pair.second;
326 
327     for (auto &I : *Exit) {
328       if (I.isSameOperationAs(OldPred)) {
329         Instruction *PredClone = NewPred->clone();
330         PredClone->insertBefore(&I);
331         I.replaceAllUsesWith(PredClone);
332         MaybeDead.insert(&I);
333         ClonedVCTPInExitBlock = true;
334         LLVM_DEBUG(dbgs() << "ARM TP: replacing: "; I.dump();
335                    dbgs() << "ARM TP: with:      "; PredClone->dump());
336         break;
337       }
338     }
339   }
340 
341   // Drop references and add operands to check for dead.
342   SmallPtrSet<Instruction*, 4> Dead;
343   while (!MaybeDead.empty()) {
344     auto *I = MaybeDead.front();
345     MaybeDead.remove(I);
346     if (I->hasNUsesOrMore(1))
347       continue;
348 
349     for (auto &U : I->operands())
350       if (auto *OpI = dyn_cast<Instruction>(U))
351         MaybeDead.insert(OpI);
352 
353     Dead.insert(I);
354   }
355 
356   for (auto *I : Dead) {
357     LLVM_DEBUG(dbgs() << "ARM TP: removing dead insn: "; I->dump());
358     I->eraseFromParent();
359   }
360 
361   for (auto I : L->blocks())
362     DeleteDeadPHIs(I);
363 
364   return ClonedVCTPInExitBlock;
365 }
366 
367 // The active lane intrinsic has this form:
368 //
369 //    @llvm.get.active.lane.mask(IV, BTC)
370 //
371 // Here we perform checks that this intrinsic behaves as expected,
372 // which means:
373 //
374 // 1) The element count, which is calculated with BTC + 1, cannot overflow.
375 // 2) The element count needs to be sufficiently large that the decrement of
376 //    element counter doesn't overflow, which means that we need to prove:
377 //        ceil(ElementCount / VectorWidth) >= TripCount
378 //    by rounding up ElementCount up:
379 //        ((ElementCount + (VectorWidth - 1)) / VectorWidth
380 //    and evaluate if expression isKnownNonNegative:
381 //        (((ElementCount + (VectorWidth - 1)) / VectorWidth) - TripCount
382 // 3) The IV must be an induction phi with an increment equal to the
383 //    vector width.
384 bool MVETailPredication::IsSafeActiveMask(IntrinsicInst *ActiveLaneMask,
385     Value *TripCount, FixedVectorType *VecTy) {
386   // 1) Test whether entry to the loop is protected by a conditional
387   // BTC + 1 < 0. In other words, if the scalar trip count overflows,
388   // becomes negative, we shouldn't enter the loop and creating
389   // tripcount expression BTC + 1 is not safe. So, check that BTC
390   // isn't max. This is evaluated in unsigned, because the semantics
391   // of @get.active.lane.mask is a ULE comparison.
392 
393   int VectorWidth = VecTy->getNumElements();
394   auto *BackedgeTakenCount = ActiveLaneMask->getOperand(1);
395   auto *BTC = SE->getSCEV(BackedgeTakenCount);
396 
397   if (!llvm::cannotBeMaxInLoop(BTC, L, *SE, false /*Signed*/) &&
398       !ForceTailPredication) {
399     LLVM_DEBUG(dbgs() << "ARM TP: Overflow possible, BTC can be max: ";
400                BTC->dump());
401     return false;
402   }
403 
404   // 2) Prove that the sub expression is non-negative, i.e. it doesn't overflow:
405   //
406   //      (((ElementCount + (VectorWidth - 1)) / VectorWidth) - TripCount
407   //
408   // 2.1) First prove overflow can't happen in:
409   //
410   //      ElementCount + (VectorWidth - 1)
411   //
412   // Because of a lack of context, it is difficult to get a useful bounds on
413   // this expression. But since ElementCount uses the same variables as the
414   // TripCount (TC), for which we can find meaningful value ranges, we use that
415   // instead and assert that:
416   //
417   //     upperbound(TC) <= UINT_MAX - VectorWidth
418   //
419   auto *TC = SE->getSCEV(TripCount);
420   unsigned SizeInBits = TripCount->getType()->getScalarSizeInBits();
421   auto Diff =  APInt(SizeInBits, ~0) - APInt(SizeInBits, VectorWidth);
422   uint64_t MaxMinusVW = Diff.getZExtValue();
423   uint64_t UpperboundTC = SE->getSignedRange(TC).getUpper().getZExtValue();
424 
425   if (UpperboundTC > MaxMinusVW && !ForceTailPredication) {
426     LLVM_DEBUG(dbgs() << "ARM TP: Overflow possible in tripcount rounding:\n";
427                dbgs() << "upperbound(TC) <= UINT_MAX - VectorWidth\n";
428                dbgs() << UpperboundTC << " <= " << MaxMinusVW << "== false\n";);
429     return false;
430   }
431 
432   // 2.2) Make sure overflow doesn't happen in final expression:
433   //  (((ElementCount + (VectorWidth - 1)) / VectorWidth) - TripCount,
434   // To do this, compare the full ranges of these subexpressions:
435   //
436   //     Range(Ceil) <= Range(TC)
437   //
438   // where Ceil = ElementCount + (VW-1) / VW. If Ceil and TC are runtime
439   // values (and not constants), we have to compensate for the lowerbound value
440   // range to be off by 1. The reason is that BTC lives in the preheader in
441   // this form:
442   //
443   //     %trip.count.minus = add nsw nuw i32 %N, -1
444   //
445   // For the loop to be executed, %N has to be >= 1 and as a result the value
446   // range of %trip.count.minus has a lower bound of 0. Value %TC has this form:
447   //
448   //     %5 = add nuw nsw i32 %4, 1
449   //     call void @llvm.set.loop.iterations.i32(i32 %5)
450   //
451   // where %5 is some expression using %N, which needs to have a lower bound of
452   // 1. Thus, if the ranges of Ceil and TC are not a single constant but a set,
453   // we first add 0 to TC such that we can do the <= comparison on both sets.
454   //
455   auto *One = SE->getOne(TripCount->getType());
456   // ElementCount = BTC + 1
457   auto *ElementCount = SE->getAddExpr(BTC, One);
458   // Tmp = ElementCount + (VW-1)
459   auto *ECPlusVWMinus1 = SE->getAddExpr(ElementCount,
460       SE->getSCEV(ConstantInt::get(TripCount->getType(), VectorWidth - 1)));
461   // Ceil = ElementCount + (VW-1) / VW
462   auto *Ceil = SE->getUDivExpr(ECPlusVWMinus1,
463       SE->getSCEV(ConstantInt::get(TripCount->getType(), VectorWidth)));
464 
465   ConstantRange RangeCeil = SE->getSignedRange(Ceil) ;
466   ConstantRange RangeTC = SE->getSignedRange(TC) ;
467   if (!RangeTC.isSingleElement()) {
468     auto ZeroRange =
469         ConstantRange(APInt(TripCount->getType()->getScalarSizeInBits(), 0));
470     RangeTC = RangeTC.unionWith(ZeroRange);
471   }
472   if (!RangeTC.contains(RangeCeil) && !ForceTailPredication) {
473     LLVM_DEBUG(dbgs() << "ARM TP: Overflow possible in sub\n");
474     return false;
475   }
476 
477   // 3) Find out if IV is an induction phi. Note that We can't use Loop
478   // helpers here to get the induction variable, because the hardware loop is
479   // no longer in loopsimplify form, and also the hwloop intrinsic use a
480   // different counter.  Using SCEV, we check that the induction is of the
481   // form i = i + 4, where the increment must be equal to the VectorWidth.
482   auto *IV = ActiveLaneMask->getOperand(0);
483   auto *IVExpr = SE->getSCEV(IV);
484   auto *AddExpr = dyn_cast<SCEVAddRecExpr>(IVExpr);
485   if (!AddExpr) {
486     LLVM_DEBUG(dbgs() << "ARM TP: induction not an add expr: "; IVExpr->dump());
487     return false;
488   }
489   // Check that this AddRec is associated with this loop.
490   if (AddExpr->getLoop() != L) {
491     LLVM_DEBUG(dbgs() << "ARM TP: phi not part of this loop\n");
492     return false;
493   }
494   auto *Step = dyn_cast<SCEVConstant>(AddExpr->getOperand(1));
495   if (!Step) {
496     LLVM_DEBUG(dbgs() << "ARM TP: induction step is not a constant: ";
497                AddExpr->getOperand(1)->dump());
498     return false;
499   }
500   auto StepValue = Step->getValue()->getSExtValue();
501   if (VectorWidth == StepValue)
502     return true;
503 
504   LLVM_DEBUG(dbgs() << "ARM TP: Step value " << StepValue << " doesn't match "
505              "vector width " << VectorWidth << "\n");
506 
507   return false;
508 }
509 
510 // Materialize NumElements in the preheader block.
511 static Value *getNumElements(BasicBlock *Preheader, Value *BTC) {
512   // First, check the preheader if it not already exist:
513   //
514   // preheader:
515   //    %BTC = add i32 %N, -1
516   //    ..
517   // vector.body:
518   //
519   // if %BTC already exists. We don't need to emit %NumElems = %BTC + 1,
520   // but instead can just return %N.
521   for (auto &I : *Preheader) {
522     if (I.getOpcode() != Instruction::Add || &I != BTC)
523       continue;
524     ConstantInt *MinusOne = nullptr;
525     if (!(MinusOne = dyn_cast<ConstantInt>(I.getOperand(1))))
526       continue;
527     if (MinusOne->getSExtValue() == -1) {
528       LLVM_DEBUG(dbgs() << "ARM TP: Found num elems: " << I << "\n");
529       return I.getOperand(0);
530     }
531   }
532 
533   // But we do need to materialise BTC if it is not already there,
534   // e.g. if it is a constant.
535   IRBuilder<> Builder(Preheader->getTerminator());
536   Value *NumElements = Builder.CreateAdd(BTC,
537         ConstantInt::get(BTC->getType(), 1), "num.elements");
538   LLVM_DEBUG(dbgs() << "ARM TP: Created num elems: " << *NumElements << "\n");
539   return NumElements;
540 }
541 
542 void MVETailPredication::InsertVCTPIntrinsic(IntrinsicInst *ActiveLaneMask,
543     Value *TripCount, FixedVectorType *VecTy,
544     DenseMap<Instruction*, Instruction*> &NewPredicates) {
545   IRBuilder<> Builder(L->getLoopPreheader()->getTerminator());
546   Module *M = L->getHeader()->getModule();
547   Type *Ty = IntegerType::get(M->getContext(), 32);
548   unsigned VectorWidth = VecTy->getNumElements();
549 
550   // The backedge-taken count in @llvm.get.active.lane.mask, its 2nd operand,
551   // is one less than the trip count. So we need to find or create
552   // %num.elements = %BTC + 1 in the preheader.
553   Value *BTC = ActiveLaneMask->getOperand(1);
554   Builder.SetInsertPoint(L->getLoopPreheader()->getTerminator());
555   Value *NumElements = getNumElements(L->getLoopPreheader(), BTC);
556 
557   // Insert a phi to count the number of elements processed by the loop.
558   Builder.SetInsertPoint(L->getHeader()->getFirstNonPHI()  );
559   PHINode *Processed = Builder.CreatePHI(Ty, 2);
560   Processed->addIncoming(NumElements, L->getLoopPreheader());
561 
562   // Replace @llvm.get.active.mask() with the ARM specific VCTP intrinic, and thus
563   // represent the effect of tail predication.
564   Builder.SetInsertPoint(ActiveLaneMask);
565   ConstantInt *Factor =
566     ConstantInt::get(cast<IntegerType>(Ty), VectorWidth);
567 
568   Intrinsic::ID VCTPID;
569   switch (VectorWidth) {
570   default:
571     llvm_unreachable("unexpected number of lanes");
572   case 4:  VCTPID = Intrinsic::arm_mve_vctp32; break;
573   case 8:  VCTPID = Intrinsic::arm_mve_vctp16; break;
574   case 16: VCTPID = Intrinsic::arm_mve_vctp8; break;
575 
576     // FIXME: vctp64 currently not supported because the predicate
577     // vector wants to be <2 x i1>, but v2i1 is not a legal MVE
578     // type, so problems happen at isel time.
579     // Intrinsic::arm_mve_vctp64 exists for ACLE intrinsics
580     // purposes, but takes a v4i1 instead of a v2i1.
581   }
582   Function *VCTP = Intrinsic::getDeclaration(M, VCTPID);
583   Value *VCTPCall = Builder.CreateCall(VCTP, Processed);
584   ActiveLaneMask->replaceAllUsesWith(VCTPCall);
585   NewPredicates[ActiveLaneMask] = cast<Instruction>(VCTPCall);
586 
587   // Add the incoming value to the new phi.
588   // TODO: This add likely already exists in the loop.
589   Value *Remaining = Builder.CreateSub(Processed, Factor);
590   Processed->addIncoming(Remaining, L->getLoopLatch());
591   LLVM_DEBUG(dbgs() << "ARM TP: Insert processed elements phi: "
592              << *Processed << "\n"
593              << "ARM TP: Inserted VCTP: " << *VCTPCall << "\n");
594 }
595 
596 bool MVETailPredication::TryConvert(Value *TripCount) {
597   if (!IsPredicatedVectorLoop()) {
598     LLVM_DEBUG(dbgs() << "ARM TP: no masked instructions in loop.\n");
599     return false;
600   }
601 
602   LLVM_DEBUG(dbgs() << "ARM TP: Found predicated vector loop.\n");
603 
604   SetVector<Instruction*> Predicates;
605   DenseMap<Instruction*, Instruction*> NewPredicates;
606 
607   // Walk through the masked intrinsics and try to find whether the predicate
608   // operand is generated by intrinsic @llvm.get.active.lane.mask().
609   for (auto *I : MaskedInsts) {
610     unsigned PredOp = I->getIntrinsicID() == Intrinsic::masked_load ? 2 : 3;
611     auto *Predicate = dyn_cast<Instruction>(I->getArgOperand(PredOp));
612     if (!Predicate || Predicates.count(Predicate))
613       continue;
614 
615     auto *ActiveLaneMask = dyn_cast<IntrinsicInst>(Predicate);
616     if (!ActiveLaneMask ||
617         ActiveLaneMask->getIntrinsicID() != Intrinsic::get_active_lane_mask)
618       continue;
619 
620     Predicates.insert(Predicate);
621     LLVM_DEBUG(dbgs() << "ARM TP: Found active lane mask: "
622                       << *ActiveLaneMask << "\n");
623 
624     auto *VecTy = getVectorType(I);
625     if (!IsSafeActiveMask(ActiveLaneMask, TripCount, VecTy)) {
626       LLVM_DEBUG(dbgs() << "ARM TP: Not safe to insert VCTP.\n");
627       return false;
628     }
629     LLVM_DEBUG(dbgs() << "ARM TP: Safe to insert VCTP.\n");
630     InsertVCTPIntrinsic(ActiveLaneMask, TripCount, VecTy, NewPredicates);
631   }
632 
633   // Now clean up.
634   ClonedVCTPInExitBlock = Cleanup(NewPredicates, Predicates, L);
635   return true;
636 }
637 
638 Pass *llvm::createMVETailPredicationPass() {
639   return new MVETailPredication();
640 }
641 
642 char MVETailPredication::ID = 0;
643 
644 INITIALIZE_PASS_BEGIN(MVETailPredication, DEBUG_TYPE, DESC, false, false)
645 INITIALIZE_PASS_END(MVETailPredication, DEBUG_TYPE, DESC, false, false)
646