1 //===- MVETailPredication.cpp - MVE Tail Predication ------------*- C++ -*-===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 /// \file 10 /// Armv8.1m introduced MVE, M-Profile Vector Extension, and low-overhead 11 /// branches to help accelerate DSP applications. These two extensions, 12 /// combined with a new form of predication called tail-predication, can be used 13 /// to provide implicit vector predication within a low-overhead loop. 14 /// This is implicit because the predicate of active/inactive lanes is 15 /// calculated by hardware, and thus does not need to be explicitly passed 16 /// to vector instructions. The instructions responsible for this are the 17 /// DLSTP and WLSTP instructions, which setup a tail-predicated loop and the 18 /// the total number of data elements processed by the loop. The loop-end 19 /// LETP instruction is responsible for decrementing and setting the remaining 20 /// elements to be processed and generating the mask of active lanes. 21 /// 22 /// The HardwareLoops pass inserts intrinsics identifying loops that the 23 /// backend will attempt to convert into a low-overhead loop. The vectorizer is 24 /// responsible for generating a vectorized loop in which the lanes are 25 /// predicated upon the iteration counter. This pass looks at these predicated 26 /// vector loops, that are targets for low-overhead loops, and prepares it for 27 /// code generation. Once the vectorizer has produced a masked loop, there's a 28 /// couple of final forms: 29 /// - A tail-predicated loop, with implicit predication. 30 /// - A loop containing multiple VCPT instructions, predicating multiple VPT 31 /// blocks of instructions operating on different vector types. 32 /// 33 /// This pass: 34 /// 1) Checks if the predicates of the masked load/store instructions are 35 /// generated by intrinsic @llvm.get.active.lanes(). This intrinsic consumes 36 /// the Backedge Taken Count (BTC) of the scalar loop as its second argument, 37 /// which we extract to set up the number of elements processed by the loop. 38 /// 2) Intrinsic @llvm.get.active.lanes() is then replaced by the MVE target 39 /// specific VCTP intrinsic to represent the effect of tail predication. 40 /// This will be picked up by the ARM Low-overhead loop pass, which performs 41 /// the final transformation to a DLSTP or WLSTP tail-predicated loop. 42 43 #include "ARM.h" 44 #include "ARMSubtarget.h" 45 #include "ARMTargetTransformInfo.h" 46 #include "llvm/Analysis/LoopInfo.h" 47 #include "llvm/Analysis/LoopPass.h" 48 #include "llvm/Analysis/ScalarEvolution.h" 49 #include "llvm/Analysis/ScalarEvolutionExpressions.h" 50 #include "llvm/Analysis/TargetLibraryInfo.h" 51 #include "llvm/Analysis/TargetTransformInfo.h" 52 #include "llvm/CodeGen/TargetPassConfig.h" 53 #include "llvm/IR/IRBuilder.h" 54 #include "llvm/IR/Instructions.h" 55 #include "llvm/IR/IntrinsicsARM.h" 56 #include "llvm/IR/PatternMatch.h" 57 #include "llvm/InitializePasses.h" 58 #include "llvm/Support/Debug.h" 59 #include "llvm/Transforms/Utils/BasicBlockUtils.h" 60 #include "llvm/Transforms/Utils/LoopUtils.h" 61 #include "llvm/Transforms/Utils/ScalarEvolutionExpander.h" 62 63 using namespace llvm; 64 65 #define DEBUG_TYPE "mve-tail-predication" 66 #define DESC "Transform predicated vector loops to use MVE tail predication" 67 68 cl::opt<TailPredication::Mode> EnableTailPredication( 69 "tail-predication", cl::desc("MVE tail-predication options"), 70 cl::init(TailPredication::Disabled), 71 cl::values(clEnumValN(TailPredication::Disabled, "disabled", 72 "Don't tail-predicate loops"), 73 clEnumValN(TailPredication::EnabledNoReductions, 74 "enabled-no-reductions", 75 "Enable tail-predication, but not for reduction loops"), 76 clEnumValN(TailPredication::Enabled, 77 "enabled", 78 "Enable tail-predication, including reduction loops"), 79 clEnumValN(TailPredication::ForceEnabledNoReductions, 80 "force-enabled-no-reductions", 81 "Enable tail-predication, but not for reduction loops, " 82 "and force this which might be unsafe"), 83 clEnumValN(TailPredication::ForceEnabled, 84 "force-enabled", 85 "Enable tail-predication, including reduction loops, " 86 "and force this which might be unsafe"))); 87 88 89 namespace { 90 91 class MVETailPredication : public LoopPass { 92 SmallVector<IntrinsicInst*, 4> MaskedInsts; 93 Loop *L = nullptr; 94 ScalarEvolution *SE = nullptr; 95 TargetTransformInfo *TTI = nullptr; 96 const ARMSubtarget *ST = nullptr; 97 98 public: 99 static char ID; 100 101 MVETailPredication() : LoopPass(ID) { } 102 103 void getAnalysisUsage(AnalysisUsage &AU) const override { 104 AU.addRequired<ScalarEvolutionWrapperPass>(); 105 AU.addRequired<LoopInfoWrapperPass>(); 106 AU.addRequired<TargetPassConfig>(); 107 AU.addRequired<TargetTransformInfoWrapperPass>(); 108 AU.addPreserved<LoopInfoWrapperPass>(); 109 AU.setPreservesCFG(); 110 } 111 112 bool runOnLoop(Loop *L, LPPassManager&) override; 113 114 private: 115 /// Perform the relevant checks on the loop and convert if possible. 116 bool TryConvert(Value *TripCount); 117 118 /// Return whether this is a vectorized loop, that contains masked 119 /// load/stores. 120 bool IsPredicatedVectorLoop(); 121 122 /// Perform checks on the arguments of @llvm.get.active.lane.mask 123 /// intrinsic: check if the first is a loop induction variable, and for the 124 /// the second check that no overflow can occur in the expression that use 125 /// this backedge-taken count. 126 bool IsSafeActiveMask(IntrinsicInst *ActiveLaneMask, Value *TripCount, 127 FixedVectorType *VecTy); 128 129 /// Insert the intrinsic to represent the effect of tail predication. 130 void InsertVCTPIntrinsic(IntrinsicInst *ActiveLaneMask, Value *TripCount, 131 FixedVectorType *VecTy); 132 133 /// Rematerialize the iteration count in exit blocks, which enables 134 /// ARMLowOverheadLoops to better optimise away loop update statements inside 135 /// hardware-loops. 136 void RematerializeIterCount(); 137 }; 138 139 } // end namespace 140 141 static bool IsDecrement(Instruction &I) { 142 auto *Call = dyn_cast<IntrinsicInst>(&I); 143 if (!Call) 144 return false; 145 146 Intrinsic::ID ID = Call->getIntrinsicID(); 147 return ID == Intrinsic::loop_decrement_reg; 148 } 149 150 static bool IsMasked(Instruction *I) { 151 auto *Call = dyn_cast<IntrinsicInst>(I); 152 if (!Call) 153 return false; 154 155 Intrinsic::ID ID = Call->getIntrinsicID(); 156 // TODO: Support gather/scatter expand/compress operations. 157 return ID == Intrinsic::masked_store || ID == Intrinsic::masked_load; 158 } 159 160 bool MVETailPredication::runOnLoop(Loop *L, LPPassManager&) { 161 if (skipLoop(L) || !EnableTailPredication) 162 return false; 163 164 MaskedInsts.clear(); 165 Function &F = *L->getHeader()->getParent(); 166 auto &TPC = getAnalysis<TargetPassConfig>(); 167 auto &TM = TPC.getTM<TargetMachine>(); 168 ST = &TM.getSubtarget<ARMSubtarget>(F); 169 TTI = &getAnalysis<TargetTransformInfoWrapperPass>().getTTI(F); 170 SE = &getAnalysis<ScalarEvolutionWrapperPass>().getSE(); 171 this->L = L; 172 173 // The MVE and LOB extensions are combined to enable tail-predication, but 174 // there's nothing preventing us from generating VCTP instructions for v8.1m. 175 if (!ST->hasMVEIntegerOps() || !ST->hasV8_1MMainlineOps()) { 176 LLVM_DEBUG(dbgs() << "ARM TP: Not a v8.1m.main+mve target.\n"); 177 return false; 178 } 179 180 BasicBlock *Preheader = L->getLoopPreheader(); 181 if (!Preheader) 182 return false; 183 184 auto FindLoopIterations = [](BasicBlock *BB) -> IntrinsicInst* { 185 for (auto &I : *BB) { 186 auto *Call = dyn_cast<IntrinsicInst>(&I); 187 if (!Call) 188 continue; 189 190 Intrinsic::ID ID = Call->getIntrinsicID(); 191 if (ID == Intrinsic::set_loop_iterations || 192 ID == Intrinsic::test_set_loop_iterations) 193 return cast<IntrinsicInst>(&I); 194 } 195 return nullptr; 196 }; 197 198 // Look for the hardware loop intrinsic that sets the iteration count. 199 IntrinsicInst *Setup = FindLoopIterations(Preheader); 200 201 // The test.set iteration could live in the pre-preheader. 202 if (!Setup) { 203 if (!Preheader->getSinglePredecessor()) 204 return false; 205 Setup = FindLoopIterations(Preheader->getSinglePredecessor()); 206 if (!Setup) 207 return false; 208 } 209 210 // Search for the hardware loop intrinic that decrements the loop counter. 211 IntrinsicInst *Decrement = nullptr; 212 for (auto *BB : L->getBlocks()) { 213 for (auto &I : *BB) { 214 if (IsDecrement(I)) { 215 Decrement = cast<IntrinsicInst>(&I); 216 break; 217 } 218 } 219 } 220 221 if (!Decrement) 222 return false; 223 224 LLVM_DEBUG(dbgs() << "ARM TP: Running on Loop: " << *L << *Setup << "\n" 225 << *Decrement << "\n"); 226 227 if (!TryConvert(Setup->getArgOperand(0))) { 228 LLVM_DEBUG(dbgs() << "ARM TP: Can't tail-predicate this loop.\n"); 229 return false; 230 } 231 232 return true; 233 } 234 235 static FixedVectorType *getVectorType(IntrinsicInst *I) { 236 unsigned TypeOp = I->getIntrinsicID() == Intrinsic::masked_load ? 0 : 1; 237 auto *PtrTy = cast<PointerType>(I->getOperand(TypeOp)->getType()); 238 auto *VecTy = cast<FixedVectorType>(PtrTy->getElementType()); 239 assert(VecTy && "No scalable vectors expected here"); 240 return VecTy; 241 } 242 243 bool MVETailPredication::IsPredicatedVectorLoop() { 244 // Check that the loop contains at least one masked load/store intrinsic. 245 // We only support 'normal' vector instructions - other than masked 246 // load/stores. 247 bool ActiveLaneMask = false; 248 for (auto *BB : L->getBlocks()) { 249 for (auto &I : *BB) { 250 auto *Int = dyn_cast<IntrinsicInst>(&I); 251 if (!Int) 252 continue; 253 254 switch (Int->getIntrinsicID()) { 255 case Intrinsic::get_active_lane_mask: 256 ActiveLaneMask = true; 257 continue; 258 case Intrinsic::sadd_sat: 259 case Intrinsic::uadd_sat: 260 case Intrinsic::ssub_sat: 261 case Intrinsic::usub_sat: 262 case Intrinsic::experimental_vector_reduce_add: 263 continue; 264 case Intrinsic::fma: 265 case Intrinsic::trunc: 266 case Intrinsic::rint: 267 case Intrinsic::round: 268 case Intrinsic::floor: 269 case Intrinsic::ceil: 270 case Intrinsic::fabs: 271 if (ST->hasMVEFloatOps()) 272 continue; 273 break; 274 default: 275 break; 276 } 277 278 if (IsMasked(&I)) { 279 auto *VecTy = getVectorType(Int); 280 unsigned Lanes = VecTy->getNumElements(); 281 unsigned ElementWidth = VecTy->getScalarSizeInBits(); 282 // MVE vectors are 128-bit, but don't support 128 x i1. 283 // TODO: Can we support vectors larger than 128-bits? 284 unsigned MaxWidth = TTI->getRegisterBitWidth(true); 285 if (Lanes * ElementWidth > MaxWidth || Lanes == MaxWidth) 286 return false; 287 MaskedInsts.push_back(cast<IntrinsicInst>(&I)); 288 continue; 289 } 290 291 for (const Use &U : Int->args()) { 292 if (isa<VectorType>(U->getType())) 293 return false; 294 } 295 } 296 } 297 298 if (!ActiveLaneMask) { 299 LLVM_DEBUG(dbgs() << "ARM TP: No get.active.lane.mask intrinsic found.\n"); 300 return false; 301 } 302 return !MaskedInsts.empty(); 303 } 304 305 // Look through the exit block to see whether there's a duplicate predicate 306 // instruction. This can happen when we need to perform a select on values 307 // from the last and previous iteration. Instead of doing a straight 308 // replacement of that predicate with the vctp, clone the vctp and place it 309 // in the block. This means that the VPR doesn't have to be live into the 310 // exit block which should make it easier to convert this loop into a proper 311 // tail predicated loop. 312 static void Cleanup(SetVector<Instruction*> &MaybeDead, Loop *L) { 313 BasicBlock *Exit = L->getUniqueExitBlock(); 314 if (!Exit) { 315 LLVM_DEBUG(dbgs() << "ARM TP: can't find loop exit block\n"); 316 return; 317 } 318 319 // Drop references and add operands to check for dead. 320 SmallPtrSet<Instruction*, 4> Dead; 321 while (!MaybeDead.empty()) { 322 auto *I = MaybeDead.front(); 323 MaybeDead.remove(I); 324 if (I->hasNUsesOrMore(1)) 325 continue; 326 327 for (auto &U : I->operands()) 328 if (auto *OpI = dyn_cast<Instruction>(U)) 329 MaybeDead.insert(OpI); 330 331 Dead.insert(I); 332 } 333 334 for (auto *I : Dead) { 335 LLVM_DEBUG(dbgs() << "ARM TP: removing dead insn: "; I->dump()); 336 I->eraseFromParent(); 337 } 338 339 for (auto I : L->blocks()) 340 DeleteDeadPHIs(I); 341 } 342 343 // The active lane intrinsic has this form: 344 // 345 // @llvm.get.active.lane.mask(IV, BTC) 346 // 347 // Here we perform checks that this intrinsic behaves as expected, 348 // which means: 349 // 350 // 1) The element count, which is calculated with BTC + 1, cannot overflow. 351 // 2) The element count needs to be sufficiently large that the decrement of 352 // element counter doesn't overflow, which means that we need to prove: 353 // ceil(ElementCount / VectorWidth) >= TripCount 354 // by rounding up ElementCount up: 355 // ((ElementCount + (VectorWidth - 1)) / VectorWidth 356 // and evaluate if expression isKnownNonNegative: 357 // (((ElementCount + (VectorWidth - 1)) / VectorWidth) - TripCount 358 // 3) The IV must be an induction phi with an increment equal to the 359 // vector width. 360 bool MVETailPredication::IsSafeActiveMask(IntrinsicInst *ActiveLaneMask, 361 Value *TripCount, FixedVectorType *VecTy) { 362 bool ForceTailPredication = 363 EnableTailPredication == TailPredication::ForceEnabledNoReductions || 364 EnableTailPredication == TailPredication::ForceEnabled; 365 366 // 1) Test whether entry to the loop is protected by a conditional 367 // BTC + 1 < 0. In other words, if the scalar trip count overflows, 368 // becomes negative, we shouldn't enter the loop and creating 369 // tripcount expression BTC + 1 is not safe. So, check that BTC 370 // isn't max. This is evaluated in unsigned, because the semantics 371 // of @get.active.lane.mask is a ULE comparison. 372 auto *BackedgeTakenCount = ActiveLaneMask->getOperand(1); 373 auto *BTC = SE->getSCEV(BackedgeTakenCount); 374 auto *MaxBTC = SE->getConstantMaxBackedgeTakenCount(L); 375 376 if (isa<SCEVCouldNotCompute>(MaxBTC)) { 377 LLVM_DEBUG(dbgs() << "ARM TP: Can't compute SCEV BTC expression: "; 378 BTC->dump()); 379 return false; 380 } 381 382 APInt MaxInt = APInt(BTC->getType()->getScalarSizeInBits(), ~0); 383 if (cast<SCEVConstant>(MaxBTC)->getAPInt().eq(MaxInt) && 384 !ForceTailPredication) { 385 LLVM_DEBUG(dbgs() << "ARM TP: Overflow possible, BTC can be int max: "; 386 BTC->dump()); 387 return false; 388 } 389 390 // 2) Prove that the sub expression is non-negative, i.e. it doesn't overflow: 391 // 392 // (((ElementCount + (VectorWidth - 1)) / VectorWidth) - TripCount 393 // 394 // 2.1) First prove overflow can't happen in: 395 // 396 // ElementCount + (VectorWidth - 1) 397 // 398 // Because of a lack of context, it is difficult to get a useful bounds on 399 // this expression. But since ElementCount uses the same variables as the 400 // TripCount (TC), for which we can find meaningful value ranges, we use that 401 // instead and assert that: 402 // 403 // upperbound(TC) <= UINT_MAX - VectorWidth 404 // 405 auto *TC = SE->getSCEV(TripCount); 406 unsigned SizeInBits = TripCount->getType()->getScalarSizeInBits(); 407 int VectorWidth = VecTy->getNumElements(); 408 auto Diff = APInt(SizeInBits, ~0) - APInt(SizeInBits, VectorWidth); 409 uint64_t MaxMinusVW = Diff.getZExtValue(); 410 uint64_t UpperboundTC = SE->getSignedRange(TC).getUpper().getZExtValue(); 411 412 if (UpperboundTC > MaxMinusVW && !ForceTailPredication) { 413 LLVM_DEBUG(dbgs() << "ARM TP: Overflow possible in tripcount rounding:\n"; 414 dbgs() << "upperbound(TC) <= UINT_MAX - VectorWidth\n"; 415 dbgs() << UpperboundTC << " <= " << MaxMinusVW << " == false\n";); 416 return false; 417 } 418 419 // 2.2) Make sure overflow doesn't happen in final expression: 420 // (((ElementCount + (VectorWidth - 1)) / VectorWidth) - TripCount, 421 // To do this, compare the full ranges of these subexpressions: 422 // 423 // Range(Ceil) <= Range(TC) 424 // 425 // where Ceil = ElementCount + (VW-1) / VW. If Ceil and TC are runtime 426 // values (and not constants), we have to compensate for the lowerbound value 427 // range to be off by 1. The reason is that BTC lives in the preheader in 428 // this form: 429 // 430 // %trip.count.minus = add nsw nuw i32 %N, -1 431 // 432 // For the loop to be executed, %N has to be >= 1 and as a result the value 433 // range of %trip.count.minus has a lower bound of 0. Value %TC has this form: 434 // 435 // %5 = add nuw nsw i32 %4, 1 436 // call void @llvm.set.loop.iterations.i32(i32 %5) 437 // 438 // where %5 is some expression using %N, which needs to have a lower bound of 439 // 1. Thus, if the ranges of Ceil and TC are not a single constant but a set, 440 // we first add 0 to TC such that we can do the <= comparison on both sets. 441 // 442 auto *One = SE->getOne(TripCount->getType()); 443 // ElementCount = BTC + 1 444 auto *ElementCount = SE->getAddExpr(BTC, One); 445 // Tmp = ElementCount + (VW-1) 446 auto *ECPlusVWMinus1 = SE->getAddExpr(ElementCount, 447 SE->getSCEV(ConstantInt::get(TripCount->getType(), VectorWidth - 1))); 448 // Ceil = ElementCount + (VW-1) / VW 449 auto *Ceil = SE->getUDivExpr(ECPlusVWMinus1, 450 SE->getSCEV(ConstantInt::get(TripCount->getType(), VectorWidth))); 451 452 ConstantRange RangeCeil = SE->getSignedRange(Ceil) ; 453 ConstantRange RangeTC = SE->getSignedRange(TC) ; 454 if (!RangeTC.isSingleElement()) { 455 auto ZeroRange = 456 ConstantRange(APInt(TripCount->getType()->getScalarSizeInBits(), 0)); 457 RangeTC = RangeTC.unionWith(ZeroRange); 458 } 459 if (!RangeTC.contains(RangeCeil) && !ForceTailPredication) { 460 LLVM_DEBUG(dbgs() << "ARM TP: Overflow possible in sub\n"); 461 return false; 462 } 463 464 // 3) Find out if IV is an induction phi. Note that we can't use Loop 465 // helpers here to get the induction variable, because the hardware loop is 466 // no longer in loopsimplify form, and also the hwloop intrinsic uses a 467 // different counter. Using SCEV, we check that the induction is of the 468 // form i = i + 4, where the increment must be equal to the VectorWidth. 469 auto *IV = ActiveLaneMask->getOperand(0); 470 auto *IVExpr = SE->getSCEV(IV); 471 auto *AddExpr = dyn_cast<SCEVAddRecExpr>(IVExpr); 472 if (!AddExpr) { 473 LLVM_DEBUG(dbgs() << "ARM TP: induction not an add expr: "; IVExpr->dump()); 474 return false; 475 } 476 // Check that this AddRec is associated with this loop. 477 if (AddExpr->getLoop() != L) { 478 LLVM_DEBUG(dbgs() << "ARM TP: phi not part of this loop\n"); 479 return false; 480 } 481 auto *Step = dyn_cast<SCEVConstant>(AddExpr->getOperand(1)); 482 if (!Step) { 483 LLVM_DEBUG(dbgs() << "ARM TP: induction step is not a constant: "; 484 AddExpr->getOperand(1)->dump()); 485 return false; 486 } 487 auto StepValue = Step->getValue()->getSExtValue(); 488 if (VectorWidth == StepValue) 489 return true; 490 491 LLVM_DEBUG(dbgs() << "ARM TP: Step value " << StepValue << " doesn't match " 492 "vector width " << VectorWidth << "\n"); 493 494 return false; 495 } 496 497 // Materialize NumElements in the preheader block. 498 static Value *getNumElements(BasicBlock *Preheader, Value *BTC) { 499 // First, check the preheader if it not already exist: 500 // 501 // preheader: 502 // %BTC = add i32 %N, -1 503 // .. 504 // vector.body: 505 // 506 // if %BTC already exists. We don't need to emit %NumElems = %BTC + 1, 507 // but instead can just return %N. 508 for (auto &I : *Preheader) { 509 if (I.getOpcode() != Instruction::Add || &I != BTC) 510 continue; 511 ConstantInt *MinusOne = nullptr; 512 if (!(MinusOne = dyn_cast<ConstantInt>(I.getOperand(1)))) 513 continue; 514 if (MinusOne->getSExtValue() == -1) { 515 LLVM_DEBUG(dbgs() << "ARM TP: Found num elems: " << I << "\n"); 516 return I.getOperand(0); 517 } 518 } 519 520 // But we do need to materialise BTC if it is not already there, 521 // e.g. if it is a constant. 522 IRBuilder<> Builder(Preheader->getTerminator()); 523 Value *NumElements = Builder.CreateAdd(BTC, 524 ConstantInt::get(BTC->getType(), 1), "num.elements"); 525 LLVM_DEBUG(dbgs() << "ARM TP: Created num elems: " << *NumElements << "\n"); 526 return NumElements; 527 } 528 529 void MVETailPredication::InsertVCTPIntrinsic(IntrinsicInst *ActiveLaneMask, 530 Value *TripCount, FixedVectorType *VecTy) { 531 IRBuilder<> Builder(L->getLoopPreheader()->getTerminator()); 532 Module *M = L->getHeader()->getModule(); 533 Type *Ty = IntegerType::get(M->getContext(), 32); 534 unsigned VectorWidth = VecTy->getNumElements(); 535 536 // The backedge-taken count in @llvm.get.active.lane.mask, its 2nd operand, 537 // is one less than the trip count. So we need to find or create 538 // %num.elements = %BTC + 1 in the preheader. 539 Value *BTC = ActiveLaneMask->getOperand(1); 540 Builder.SetInsertPoint(L->getLoopPreheader()->getTerminator()); 541 Value *NumElements = getNumElements(L->getLoopPreheader(), BTC); 542 543 // Insert a phi to count the number of elements processed by the loop. 544 Builder.SetInsertPoint(L->getHeader()->getFirstNonPHI() ); 545 PHINode *Processed = Builder.CreatePHI(Ty, 2); 546 Processed->addIncoming(NumElements, L->getLoopPreheader()); 547 548 // Replace @llvm.get.active.mask() with the ARM specific VCTP intrinic, and thus 549 // represent the effect of tail predication. 550 Builder.SetInsertPoint(ActiveLaneMask); 551 ConstantInt *Factor = 552 ConstantInt::get(cast<IntegerType>(Ty), VectorWidth); 553 554 Intrinsic::ID VCTPID; 555 switch (VectorWidth) { 556 default: 557 llvm_unreachable("unexpected number of lanes"); 558 case 4: VCTPID = Intrinsic::arm_mve_vctp32; break; 559 case 8: VCTPID = Intrinsic::arm_mve_vctp16; break; 560 case 16: VCTPID = Intrinsic::arm_mve_vctp8; break; 561 562 // FIXME: vctp64 currently not supported because the predicate 563 // vector wants to be <2 x i1>, but v2i1 is not a legal MVE 564 // type, so problems happen at isel time. 565 // Intrinsic::arm_mve_vctp64 exists for ACLE intrinsics 566 // purposes, but takes a v4i1 instead of a v2i1. 567 } 568 Function *VCTP = Intrinsic::getDeclaration(M, VCTPID); 569 Value *VCTPCall = Builder.CreateCall(VCTP, Processed); 570 ActiveLaneMask->replaceAllUsesWith(VCTPCall); 571 572 // Add the incoming value to the new phi. 573 // TODO: This add likely already exists in the loop. 574 Value *Remaining = Builder.CreateSub(Processed, Factor); 575 Processed->addIncoming(Remaining, L->getLoopLatch()); 576 LLVM_DEBUG(dbgs() << "ARM TP: Insert processed elements phi: " 577 << *Processed << "\n" 578 << "ARM TP: Inserted VCTP: " << *VCTPCall << "\n"); 579 } 580 581 bool MVETailPredication::TryConvert(Value *TripCount) { 582 if (!IsPredicatedVectorLoop()) { 583 LLVM_DEBUG(dbgs() << "ARM TP: no masked instructions in loop.\n"); 584 return false; 585 } 586 587 LLVM_DEBUG(dbgs() << "ARM TP: Found predicated vector loop.\n"); 588 SetVector<Instruction*> Predicates; 589 590 // Walk through the masked intrinsics and try to find whether the predicate 591 // operand is generated by intrinsic @llvm.get.active.lane.mask(). 592 for (auto *I : MaskedInsts) { 593 unsigned PredOp = I->getIntrinsicID() == Intrinsic::masked_load ? 2 : 3; 594 auto *Predicate = dyn_cast<Instruction>(I->getArgOperand(PredOp)); 595 if (!Predicate || Predicates.count(Predicate)) 596 continue; 597 598 auto *ActiveLaneMask = dyn_cast<IntrinsicInst>(Predicate); 599 if (!ActiveLaneMask || 600 ActiveLaneMask->getIntrinsicID() != Intrinsic::get_active_lane_mask) 601 continue; 602 603 Predicates.insert(Predicate); 604 LLVM_DEBUG(dbgs() << "ARM TP: Found active lane mask: " 605 << *ActiveLaneMask << "\n"); 606 607 auto *VecTy = getVectorType(I); 608 if (!IsSafeActiveMask(ActiveLaneMask, TripCount, VecTy)) { 609 LLVM_DEBUG(dbgs() << "ARM TP: Not safe to insert VCTP.\n"); 610 return false; 611 } 612 LLVM_DEBUG(dbgs() << "ARM TP: Safe to insert VCTP.\n"); 613 InsertVCTPIntrinsic(ActiveLaneMask, TripCount, VecTy); 614 } 615 616 Cleanup(Predicates, L); 617 return true; 618 } 619 620 Pass *llvm::createMVETailPredicationPass() { 621 return new MVETailPredication(); 622 } 623 624 char MVETailPredication::ID = 0; 625 626 INITIALIZE_PASS_BEGIN(MVETailPredication, DEBUG_TYPE, DESC, false, false) 627 INITIALIZE_PASS_END(MVETailPredication, DEBUG_TYPE, DESC, false, false) 628