1 //===- VPlan.cpp - Vectorizer Plan ----------------------------------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 /// 9 /// \file 10 /// This is the LLVM vectorization plan. It represents a candidate for 11 /// vectorization, allowing to plan and optimize how to vectorize a given loop 12 /// before generating LLVM-IR. 13 /// The vectorizer uses vectorization plans to estimate the costs of potential 14 /// candidates and if profitable to execute the desired plan, generating vector 15 /// LLVM-IR code. 16 /// 17 //===----------------------------------------------------------------------===// 18 19 #include "VPlan.h" 20 #include "VPlanDominatorTree.h" 21 #include "llvm/ADT/DepthFirstIterator.h" 22 #include "llvm/ADT/PostOrderIterator.h" 23 #include "llvm/ADT/STLExtras.h" 24 #include "llvm/ADT/SmallVector.h" 25 #include "llvm/ADT/Twine.h" 26 #include "llvm/Analysis/IVDescriptors.h" 27 #include "llvm/Analysis/LoopInfo.h" 28 #include "llvm/IR/BasicBlock.h" 29 #include "llvm/IR/CFG.h" 30 #include "llvm/IR/IRBuilder.h" 31 #include "llvm/IR/Instruction.h" 32 #include "llvm/IR/Instructions.h" 33 #include "llvm/IR/Type.h" 34 #include "llvm/IR/Value.h" 35 #include "llvm/Support/Casting.h" 36 #include "llvm/Support/CommandLine.h" 37 #include "llvm/Support/Debug.h" 38 #include "llvm/Support/ErrorHandling.h" 39 #include "llvm/Support/GenericDomTreeConstruction.h" 40 #include "llvm/Support/GraphWriter.h" 41 #include "llvm/Support/raw_ostream.h" 42 #include "llvm/Transforms/Utils/BasicBlockUtils.h" 43 #include "llvm/Transforms/Utils/ScalarEvolutionExpander.h" 44 #include <cassert> 45 #include <string> 46 #include <vector> 47 48 using namespace llvm; 49 extern cl::opt<bool> EnableVPlanNativePath; 50 51 #define DEBUG_TYPE "vplan" 52 53 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) 54 raw_ostream &llvm::operator<<(raw_ostream &OS, const VPValue &V) { 55 const VPInstruction *Instr = dyn_cast<VPInstruction>(&V); 56 VPSlotTracker SlotTracker( 57 (Instr && Instr->getParent()) ? Instr->getParent()->getPlan() : nullptr); 58 V.print(OS, SlotTracker); 59 return OS; 60 } 61 #endif 62 63 Value *VPLane::getAsRuntimeExpr(IRBuilderBase &Builder, 64 const ElementCount &VF) const { 65 switch (LaneKind) { 66 case VPLane::Kind::ScalableLast: 67 // Lane = RuntimeVF - VF.getKnownMinValue() + Lane 68 return Builder.CreateSub(getRuntimeVF(Builder, Builder.getInt32Ty(), VF), 69 Builder.getInt32(VF.getKnownMinValue() - Lane)); 70 case VPLane::Kind::First: 71 return Builder.getInt32(Lane); 72 } 73 llvm_unreachable("Unknown lane kind"); 74 } 75 76 VPValue::VPValue(const unsigned char SC, Value *UV, VPDef *Def) 77 : SubclassID(SC), UnderlyingVal(UV), Def(Def) { 78 if (Def) 79 Def->addDefinedValue(this); 80 } 81 82 VPValue::~VPValue() { 83 assert(Users.empty() && "trying to delete a VPValue with remaining users"); 84 if (Def) 85 Def->removeDefinedValue(this); 86 } 87 88 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) 89 void VPValue::print(raw_ostream &OS, VPSlotTracker &SlotTracker) const { 90 if (const VPRecipeBase *R = dyn_cast_or_null<VPRecipeBase>(Def)) 91 R->print(OS, "", SlotTracker); 92 else 93 printAsOperand(OS, SlotTracker); 94 } 95 96 void VPValue::dump() const { 97 const VPRecipeBase *Instr = dyn_cast_or_null<VPRecipeBase>(this->Def); 98 VPSlotTracker SlotTracker( 99 (Instr && Instr->getParent()) ? Instr->getParent()->getPlan() : nullptr); 100 print(dbgs(), SlotTracker); 101 dbgs() << "\n"; 102 } 103 104 void VPDef::dump() const { 105 const VPRecipeBase *Instr = dyn_cast_or_null<VPRecipeBase>(this); 106 VPSlotTracker SlotTracker( 107 (Instr && Instr->getParent()) ? Instr->getParent()->getPlan() : nullptr); 108 print(dbgs(), "", SlotTracker); 109 dbgs() << "\n"; 110 } 111 #endif 112 113 // Get the top-most entry block of \p Start. This is the entry block of the 114 // containing VPlan. This function is templated to support both const and non-const blocks 115 template <typename T> static T *getPlanEntry(T *Start) { 116 T *Next = Start; 117 T *Current = Start; 118 while ((Next = Next->getParent())) 119 Current = Next; 120 121 SmallSetVector<T *, 8> WorkList; 122 WorkList.insert(Current); 123 124 for (unsigned i = 0; i < WorkList.size(); i++) { 125 T *Current = WorkList[i]; 126 if (Current->getNumPredecessors() == 0) 127 return Current; 128 auto &Predecessors = Current->getPredecessors(); 129 WorkList.insert(Predecessors.begin(), Predecessors.end()); 130 } 131 132 llvm_unreachable("VPlan without any entry node without predecessors"); 133 } 134 135 VPlan *VPBlockBase::getPlan() { return getPlanEntry(this)->Plan; } 136 137 const VPlan *VPBlockBase::getPlan() const { return getPlanEntry(this)->Plan; } 138 139 /// \return the VPBasicBlock that is the entry of Block, possibly indirectly. 140 const VPBasicBlock *VPBlockBase::getEntryBasicBlock() const { 141 const VPBlockBase *Block = this; 142 while (const VPRegionBlock *Region = dyn_cast<VPRegionBlock>(Block)) 143 Block = Region->getEntry(); 144 return cast<VPBasicBlock>(Block); 145 } 146 147 VPBasicBlock *VPBlockBase::getEntryBasicBlock() { 148 VPBlockBase *Block = this; 149 while (VPRegionBlock *Region = dyn_cast<VPRegionBlock>(Block)) 150 Block = Region->getEntry(); 151 return cast<VPBasicBlock>(Block); 152 } 153 154 void VPBlockBase::setPlan(VPlan *ParentPlan) { 155 assert(ParentPlan->getEntry() == this && 156 "Can only set plan on its entry block."); 157 Plan = ParentPlan; 158 } 159 160 /// \return the VPBasicBlock that is the exit of Block, possibly indirectly. 161 const VPBasicBlock *VPBlockBase::getExitBasicBlock() const { 162 const VPBlockBase *Block = this; 163 while (const VPRegionBlock *Region = dyn_cast<VPRegionBlock>(Block)) 164 Block = Region->getExit(); 165 return cast<VPBasicBlock>(Block); 166 } 167 168 VPBasicBlock *VPBlockBase::getExitBasicBlock() { 169 VPBlockBase *Block = this; 170 while (VPRegionBlock *Region = dyn_cast<VPRegionBlock>(Block)) 171 Block = Region->getExit(); 172 return cast<VPBasicBlock>(Block); 173 } 174 175 VPBlockBase *VPBlockBase::getEnclosingBlockWithSuccessors() { 176 if (!Successors.empty() || !Parent) 177 return this; 178 assert(Parent->getExit() == this && 179 "Block w/o successors not the exit of its parent."); 180 return Parent->getEnclosingBlockWithSuccessors(); 181 } 182 183 VPBlockBase *VPBlockBase::getEnclosingBlockWithPredecessors() { 184 if (!Predecessors.empty() || !Parent) 185 return this; 186 assert(Parent->getEntry() == this && 187 "Block w/o predecessors not the entry of its parent."); 188 return Parent->getEnclosingBlockWithPredecessors(); 189 } 190 191 VPValue *VPBlockBase::getCondBit() { 192 return CondBitUser.getSingleOperandOrNull(); 193 } 194 195 const VPValue *VPBlockBase::getCondBit() const { 196 return CondBitUser.getSingleOperandOrNull(); 197 } 198 199 void VPBlockBase::setCondBit(VPValue *CV) { CondBitUser.resetSingleOpUser(CV); } 200 201 VPValue *VPBlockBase::getPredicate() { 202 return PredicateUser.getSingleOperandOrNull(); 203 } 204 205 const VPValue *VPBlockBase::getPredicate() const { 206 return PredicateUser.getSingleOperandOrNull(); 207 } 208 209 void VPBlockBase::setPredicate(VPValue *CV) { 210 PredicateUser.resetSingleOpUser(CV); 211 } 212 213 void VPBlockBase::deleteCFG(VPBlockBase *Entry) { 214 SmallVector<VPBlockBase *, 8> Blocks(depth_first(Entry)); 215 216 for (VPBlockBase *Block : Blocks) 217 delete Block; 218 } 219 220 VPBasicBlock::iterator VPBasicBlock::getFirstNonPhi() { 221 iterator It = begin(); 222 while (It != end() && It->isPhi()) 223 It++; 224 return It; 225 } 226 227 Value *VPTransformState::get(VPValue *Def, const VPIteration &Instance) { 228 if (!Def->getDef()) 229 return Def->getLiveInIRValue(); 230 231 if (hasScalarValue(Def, Instance)) { 232 return Data 233 .PerPartScalars[Def][Instance.Part][Instance.Lane.mapToCacheIndex(VF)]; 234 } 235 236 assert(hasVectorValue(Def, Instance.Part)); 237 auto *VecPart = Data.PerPartOutput[Def][Instance.Part]; 238 if (!VecPart->getType()->isVectorTy()) { 239 assert(Instance.Lane.isFirstLane() && "cannot get lane > 0 for scalar"); 240 return VecPart; 241 } 242 // TODO: Cache created scalar values. 243 Value *Lane = Instance.Lane.getAsRuntimeExpr(Builder, VF); 244 auto *Extract = Builder.CreateExtractElement(VecPart, Lane); 245 // set(Def, Extract, Instance); 246 return Extract; 247 } 248 249 BasicBlock * 250 VPBasicBlock::createEmptyBasicBlock(VPTransformState::CFGState &CFG) { 251 // BB stands for IR BasicBlocks. VPBB stands for VPlan VPBasicBlocks. 252 // Pred stands for Predessor. Prev stands for Previous - last visited/created. 253 BasicBlock *PrevBB = CFG.PrevBB; 254 BasicBlock *NewBB = BasicBlock::Create(PrevBB->getContext(), getName(), 255 PrevBB->getParent(), CFG.ExitBB); 256 LLVM_DEBUG(dbgs() << "LV: created " << NewBB->getName() << '\n'); 257 258 // Hook up the new basic block to its predecessors. 259 for (VPBlockBase *PredVPBlock : getHierarchicalPredecessors()) { 260 VPBasicBlock *PredVPBB = PredVPBlock->getExitBasicBlock(); 261 auto &PredVPSuccessors = PredVPBB->getSuccessors(); 262 BasicBlock *PredBB = CFG.VPBB2IRBB[PredVPBB]; 263 264 // In outer loop vectorization scenario, the predecessor BBlock may not yet 265 // be visited(backedge). Mark the VPBasicBlock for fixup at the end of 266 // vectorization. We do not encounter this case in inner loop vectorization 267 // as we start out by building a loop skeleton with the vector loop header 268 // and latch blocks. As a result, we never enter this function for the 269 // header block in the non VPlan-native path. 270 if (!PredBB) { 271 assert(EnableVPlanNativePath && 272 "Unexpected null predecessor in non VPlan-native path"); 273 CFG.VPBBsToFix.push_back(PredVPBB); 274 continue; 275 } 276 277 assert(PredBB && "Predecessor basic-block not found building successor."); 278 auto *PredBBTerminator = PredBB->getTerminator(); 279 LLVM_DEBUG(dbgs() << "LV: draw edge from" << PredBB->getName() << '\n'); 280 if (isa<UnreachableInst>(PredBBTerminator)) { 281 assert(PredVPSuccessors.size() == 1 && 282 "Predecessor ending w/o branch must have single successor."); 283 DebugLoc DL = PredBBTerminator->getDebugLoc(); 284 PredBBTerminator->eraseFromParent(); 285 auto *Br = BranchInst::Create(NewBB, PredBB); 286 Br->setDebugLoc(DL); 287 } else { 288 assert(PredVPSuccessors.size() == 2 && 289 "Predecessor ending with branch must have two successors."); 290 unsigned idx = PredVPSuccessors.front() == this ? 0 : 1; 291 assert(!PredBBTerminator->getSuccessor(idx) && 292 "Trying to reset an existing successor block."); 293 PredBBTerminator->setSuccessor(idx, NewBB); 294 } 295 } 296 return NewBB; 297 } 298 299 void VPBasicBlock::execute(VPTransformState *State) { 300 bool Replica = State->Instance && !State->Instance->isFirstIteration(); 301 VPBasicBlock *PrevVPBB = State->CFG.PrevVPBB; 302 VPBlockBase *SingleHPred = nullptr; 303 BasicBlock *NewBB = State->CFG.PrevBB; // Reuse it if possible. 304 305 // 1. Create an IR basic block, or reuse the last one if possible. 306 // The last IR basic block is reused, as an optimization, in three cases: 307 // A. the first VPBB reuses the loop header BB - when PrevVPBB is null; 308 // B. when the current VPBB has a single (hierarchical) predecessor which 309 // is PrevVPBB and the latter has a single (hierarchical) successor; and 310 // C. when the current VPBB is an entry of a region replica - where PrevVPBB 311 // is the exit of this region from a previous instance, or the predecessor 312 // of this region. 313 if (PrevVPBB && /* A */ 314 !((SingleHPred = getSingleHierarchicalPredecessor()) && 315 SingleHPred->getExitBasicBlock() == PrevVPBB && 316 PrevVPBB->getSingleHierarchicalSuccessor()) && /* B */ 317 !(Replica && getPredecessors().empty())) { /* C */ 318 NewBB = createEmptyBasicBlock(State->CFG); 319 State->Builder.SetInsertPoint(NewBB); 320 // Temporarily terminate with unreachable until CFG is rewired. 321 UnreachableInst *Terminator = State->Builder.CreateUnreachable(); 322 // Register NewBB in its loop. In innermost loops its the same for all BB's. 323 State->CurrentVectorLoop->addBasicBlockToLoop(NewBB, *State->LI); 324 State->Builder.SetInsertPoint(Terminator); 325 State->CFG.PrevBB = NewBB; 326 } else { 327 // If the current VPBB is re-using the header block from skeleton creation, 328 // move it to the new vector loop. 329 VPBasicBlock *HeaderVPBB = 330 getPlan()->getVectorLoopRegion()->getEntryBasicBlock(); 331 if (EnableVPlanNativePath) 332 HeaderVPBB = cast<VPBasicBlock>(HeaderVPBB->getSingleSuccessor()); 333 if (this == HeaderVPBB) { 334 assert(State->CurrentVectorLoop); 335 State->LI->removeBlock(State->CFG.PrevBB); 336 State->CurrentVectorLoop->addBasicBlockToLoop(State->CFG.PrevBB, 337 *State->LI); 338 } 339 } 340 341 // 2. Fill the IR basic block with IR instructions. 342 LLVM_DEBUG(dbgs() << "LV: vectorizing VPBB:" << getName() 343 << " in BB:" << NewBB->getName() << '\n'); 344 345 State->CFG.VPBB2IRBB[this] = NewBB; 346 State->CFG.PrevVPBB = this; 347 348 for (VPRecipeBase &Recipe : Recipes) 349 Recipe.execute(*State); 350 351 VPValue *CBV; 352 if (EnableVPlanNativePath && (CBV = getCondBit())) { 353 assert(CBV->getUnderlyingValue() && 354 "Unexpected null underlying value for condition bit"); 355 356 // Condition bit value in a VPBasicBlock is used as the branch selector. In 357 // the VPlan-native path case, since all branches are uniform we generate a 358 // branch instruction using the condition value from vector lane 0 and dummy 359 // successors. The successors are fixed later when the successor blocks are 360 // visited. 361 Value *NewCond = State->get(CBV, {0, 0}); 362 363 // Replace the temporary unreachable terminator with the new conditional 364 // branch. 365 auto *CurrentTerminator = NewBB->getTerminator(); 366 assert(isa<UnreachableInst>(CurrentTerminator) && 367 "Expected to replace unreachable terminator with conditional " 368 "branch."); 369 auto *CondBr = BranchInst::Create(NewBB, nullptr, NewCond); 370 CondBr->setSuccessor(0, nullptr); 371 ReplaceInstWithInst(CurrentTerminator, CondBr); 372 } 373 374 LLVM_DEBUG(dbgs() << "LV: filled BB:" << *NewBB); 375 } 376 377 void VPBasicBlock::dropAllReferences(VPValue *NewValue) { 378 for (VPRecipeBase &R : Recipes) { 379 for (auto *Def : R.definedValues()) 380 Def->replaceAllUsesWith(NewValue); 381 382 for (unsigned I = 0, E = R.getNumOperands(); I != E; I++) 383 R.setOperand(I, NewValue); 384 } 385 } 386 387 VPBasicBlock *VPBasicBlock::splitAt(iterator SplitAt) { 388 assert((SplitAt == end() || SplitAt->getParent() == this) && 389 "can only split at a position in the same block"); 390 391 SmallVector<VPBlockBase *, 2> Succs(successors()); 392 // First, disconnect the current block from its successors. 393 for (VPBlockBase *Succ : Succs) 394 VPBlockUtils::disconnectBlocks(this, Succ); 395 396 // Create new empty block after the block to split. 397 auto *SplitBlock = new VPBasicBlock(getName() + ".split"); 398 VPBlockUtils::insertBlockAfter(SplitBlock, this); 399 400 // Add successors for block to split to new block. 401 for (VPBlockBase *Succ : Succs) 402 VPBlockUtils::connectBlocks(SplitBlock, Succ); 403 404 // Finally, move the recipes starting at SplitAt to new block. 405 for (VPRecipeBase &ToMove : 406 make_early_inc_range(make_range(SplitAt, this->end()))) 407 ToMove.moveBefore(*SplitBlock, SplitBlock->end()); 408 409 return SplitBlock; 410 } 411 412 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) 413 void VPBlockBase::printSuccessors(raw_ostream &O, const Twine &Indent) const { 414 if (getSuccessors().empty()) { 415 O << Indent << "No successors\n"; 416 } else { 417 O << Indent << "Successor(s): "; 418 ListSeparator LS; 419 for (auto *Succ : getSuccessors()) 420 O << LS << Succ->getName(); 421 O << '\n'; 422 } 423 } 424 425 void VPBasicBlock::print(raw_ostream &O, const Twine &Indent, 426 VPSlotTracker &SlotTracker) const { 427 O << Indent << getName() << ":\n"; 428 if (const VPValue *Pred = getPredicate()) { 429 O << Indent << "BlockPredicate:"; 430 Pred->printAsOperand(O, SlotTracker); 431 if (const auto *PredInst = dyn_cast<VPInstruction>(Pred)) 432 O << " (" << PredInst->getParent()->getName() << ")"; 433 O << '\n'; 434 } 435 436 auto RecipeIndent = Indent + " "; 437 for (const VPRecipeBase &Recipe : *this) { 438 Recipe.print(O, RecipeIndent, SlotTracker); 439 O << '\n'; 440 } 441 442 printSuccessors(O, Indent); 443 444 if (const VPValue *CBV = getCondBit()) { 445 O << Indent << "CondBit: "; 446 CBV->printAsOperand(O, SlotTracker); 447 if (const auto *CBI = dyn_cast<VPInstruction>(CBV)) 448 O << " (" << CBI->getParent()->getName() << ")"; 449 O << '\n'; 450 } 451 } 452 #endif 453 454 void VPRegionBlock::dropAllReferences(VPValue *NewValue) { 455 for (VPBlockBase *Block : depth_first(Entry)) 456 // Drop all references in VPBasicBlocks and replace all uses with 457 // DummyValue. 458 Block->dropAllReferences(NewValue); 459 } 460 461 void VPRegionBlock::execute(VPTransformState *State) { 462 ReversePostOrderTraversal<VPBlockBase *> RPOT(Entry); 463 464 if (!isReplicator()) { 465 // Create and register the new vector loop. 466 Loop *PrevLoop = State->CurrentVectorLoop; 467 State->CurrentVectorLoop = State->LI->AllocateLoop(); 468 Loop *ParentLoop = State->LI->getLoopFor(State->CFG.VectorPreHeader); 469 470 // Insert the new loop into the loop nest and register the new basic blocks 471 // before calling any utilities such as SCEV that require valid LoopInfo. 472 if (ParentLoop) 473 ParentLoop->addChildLoop(State->CurrentVectorLoop); 474 else 475 State->LI->addTopLevelLoop(State->CurrentVectorLoop); 476 477 // Visit the VPBlocks connected to "this", starting from it. 478 for (VPBlockBase *Block : RPOT) { 479 if (EnableVPlanNativePath) { 480 // The inner loop vectorization path does not represent loop preheader 481 // and exit blocks as part of the VPlan. In the VPlan-native path, skip 482 // vectorizing loop preheader block. In future, we may replace this 483 // check with the check for loop preheader. 484 if (Block->getNumPredecessors() == 0) 485 continue; 486 487 // Skip vectorizing loop exit block. In future, we may replace this 488 // check with the check for loop exit. 489 if (Block->getNumSuccessors() == 0) 490 continue; 491 } 492 493 LLVM_DEBUG(dbgs() << "LV: VPBlock in RPO " << Block->getName() << '\n'); 494 Block->execute(State); 495 } 496 497 State->CurrentVectorLoop = PrevLoop; 498 return; 499 } 500 501 assert(!State->Instance && "Replicating a Region with non-null instance."); 502 503 // Enter replicating mode. 504 State->Instance = VPIteration(0, 0); 505 506 for (unsigned Part = 0, UF = State->UF; Part < UF; ++Part) { 507 State->Instance->Part = Part; 508 assert(!State->VF.isScalable() && "VF is assumed to be non scalable."); 509 for (unsigned Lane = 0, VF = State->VF.getKnownMinValue(); Lane < VF; 510 ++Lane) { 511 State->Instance->Lane = VPLane(Lane, VPLane::Kind::First); 512 // Visit the VPBlocks connected to \p this, starting from it. 513 for (VPBlockBase *Block : RPOT) { 514 LLVM_DEBUG(dbgs() << "LV: VPBlock in RPO " << Block->getName() << '\n'); 515 Block->execute(State); 516 } 517 } 518 } 519 520 // Exit replicating mode. 521 State->Instance.reset(); 522 } 523 524 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) 525 void VPRegionBlock::print(raw_ostream &O, const Twine &Indent, 526 VPSlotTracker &SlotTracker) const { 527 O << Indent << (isReplicator() ? "<xVFxUF> " : "<x1> ") << getName() << ": {"; 528 auto NewIndent = Indent + " "; 529 for (auto *BlockBase : depth_first(Entry)) { 530 O << '\n'; 531 BlockBase->print(O, NewIndent, SlotTracker); 532 } 533 O << Indent << "}\n"; 534 535 printSuccessors(O, Indent); 536 } 537 #endif 538 539 bool VPRecipeBase::mayWriteToMemory() const { 540 switch (getVPDefID()) { 541 case VPWidenMemoryInstructionSC: { 542 return cast<VPWidenMemoryInstructionRecipe>(this)->isStore(); 543 } 544 case VPReplicateSC: 545 case VPWidenCallSC: 546 return cast<Instruction>(getVPSingleValue()->getUnderlyingValue()) 547 ->mayWriteToMemory(); 548 case VPBranchOnMaskSC: 549 return false; 550 case VPWidenIntOrFpInductionSC: 551 case VPWidenCanonicalIVSC: 552 case VPWidenPHISC: 553 case VPBlendSC: 554 case VPWidenSC: 555 case VPWidenGEPSC: 556 case VPReductionSC: 557 case VPWidenSelectSC: { 558 const Instruction *I = 559 dyn_cast_or_null<Instruction>(getVPSingleValue()->getUnderlyingValue()); 560 (void)I; 561 assert((!I || !I->mayWriteToMemory()) && 562 "underlying instruction may write to memory"); 563 return false; 564 } 565 default: 566 return true; 567 } 568 } 569 570 bool VPRecipeBase::mayReadFromMemory() const { 571 switch (getVPDefID()) { 572 case VPWidenMemoryInstructionSC: { 573 return !cast<VPWidenMemoryInstructionRecipe>(this)->isStore(); 574 } 575 case VPReplicateSC: 576 case VPWidenCallSC: 577 return cast<Instruction>(getVPSingleValue()->getUnderlyingValue()) 578 ->mayReadFromMemory(); 579 case VPBranchOnMaskSC: 580 return false; 581 case VPWidenIntOrFpInductionSC: 582 case VPWidenCanonicalIVSC: 583 case VPWidenPHISC: 584 case VPBlendSC: 585 case VPWidenSC: 586 case VPWidenGEPSC: 587 case VPReductionSC: 588 case VPWidenSelectSC: { 589 const Instruction *I = 590 dyn_cast_or_null<Instruction>(getVPSingleValue()->getUnderlyingValue()); 591 (void)I; 592 assert((!I || !I->mayReadFromMemory()) && 593 "underlying instruction may read from memory"); 594 return false; 595 } 596 default: 597 return true; 598 } 599 } 600 601 bool VPRecipeBase::mayHaveSideEffects() const { 602 switch (getVPDefID()) { 603 case VPBranchOnMaskSC: 604 return false; 605 case VPWidenIntOrFpInductionSC: 606 case VPWidenPointerInductionSC: 607 case VPWidenCanonicalIVSC: 608 case VPWidenPHISC: 609 case VPBlendSC: 610 case VPWidenSC: 611 case VPWidenGEPSC: 612 case VPReductionSC: 613 case VPWidenSelectSC: 614 case VPScalarIVStepsSC: { 615 const Instruction *I = 616 dyn_cast_or_null<Instruction>(getVPSingleValue()->getUnderlyingValue()); 617 (void)I; 618 assert((!I || !I->mayHaveSideEffects()) && 619 "underlying instruction has side-effects"); 620 return false; 621 } 622 case VPReplicateSC: { 623 auto *R = cast<VPReplicateRecipe>(this); 624 return R->getUnderlyingInstr()->mayHaveSideEffects(); 625 } 626 default: 627 return true; 628 } 629 } 630 631 void VPRecipeBase::insertBefore(VPRecipeBase *InsertPos) { 632 assert(!Parent && "Recipe already in some VPBasicBlock"); 633 assert(InsertPos->getParent() && 634 "Insertion position not in any VPBasicBlock"); 635 Parent = InsertPos->getParent(); 636 Parent->getRecipeList().insert(InsertPos->getIterator(), this); 637 } 638 639 void VPRecipeBase::insertBefore(VPBasicBlock &BB, 640 iplist<VPRecipeBase>::iterator I) { 641 assert(!Parent && "Recipe already in some VPBasicBlock"); 642 assert(I == BB.end() || I->getParent() == &BB); 643 Parent = &BB; 644 BB.getRecipeList().insert(I, this); 645 } 646 647 void VPRecipeBase::insertAfter(VPRecipeBase *InsertPos) { 648 assert(!Parent && "Recipe already in some VPBasicBlock"); 649 assert(InsertPos->getParent() && 650 "Insertion position not in any VPBasicBlock"); 651 Parent = InsertPos->getParent(); 652 Parent->getRecipeList().insertAfter(InsertPos->getIterator(), this); 653 } 654 655 void VPRecipeBase::removeFromParent() { 656 assert(getParent() && "Recipe not in any VPBasicBlock"); 657 getParent()->getRecipeList().remove(getIterator()); 658 Parent = nullptr; 659 } 660 661 iplist<VPRecipeBase>::iterator VPRecipeBase::eraseFromParent() { 662 assert(getParent() && "Recipe not in any VPBasicBlock"); 663 return getParent()->getRecipeList().erase(getIterator()); 664 } 665 666 void VPRecipeBase::moveAfter(VPRecipeBase *InsertPos) { 667 removeFromParent(); 668 insertAfter(InsertPos); 669 } 670 671 void VPRecipeBase::moveBefore(VPBasicBlock &BB, 672 iplist<VPRecipeBase>::iterator I) { 673 removeFromParent(); 674 insertBefore(BB, I); 675 } 676 677 void VPInstruction::generateInstruction(VPTransformState &State, 678 unsigned Part) { 679 IRBuilderBase &Builder = State.Builder; 680 Builder.SetCurrentDebugLocation(DL); 681 682 if (Instruction::isBinaryOp(getOpcode())) { 683 Value *A = State.get(getOperand(0), Part); 684 Value *B = State.get(getOperand(1), Part); 685 Value *V = Builder.CreateBinOp((Instruction::BinaryOps)getOpcode(), A, B); 686 State.set(this, V, Part); 687 return; 688 } 689 690 switch (getOpcode()) { 691 case VPInstruction::Not: { 692 Value *A = State.get(getOperand(0), Part); 693 Value *V = Builder.CreateNot(A); 694 State.set(this, V, Part); 695 break; 696 } 697 case VPInstruction::ICmpULE: { 698 Value *IV = State.get(getOperand(0), Part); 699 Value *TC = State.get(getOperand(1), Part); 700 Value *V = Builder.CreateICmpULE(IV, TC); 701 State.set(this, V, Part); 702 break; 703 } 704 case Instruction::Select: { 705 Value *Cond = State.get(getOperand(0), Part); 706 Value *Op1 = State.get(getOperand(1), Part); 707 Value *Op2 = State.get(getOperand(2), Part); 708 Value *V = Builder.CreateSelect(Cond, Op1, Op2); 709 State.set(this, V, Part); 710 break; 711 } 712 case VPInstruction::ActiveLaneMask: { 713 // Get first lane of vector induction variable. 714 Value *VIVElem0 = State.get(getOperand(0), VPIteration(Part, 0)); 715 // Get the original loop tripcount. 716 Value *ScalarTC = State.get(getOperand(1), Part); 717 718 auto *Int1Ty = Type::getInt1Ty(Builder.getContext()); 719 auto *PredTy = VectorType::get(Int1Ty, State.VF); 720 Instruction *Call = Builder.CreateIntrinsic( 721 Intrinsic::get_active_lane_mask, {PredTy, ScalarTC->getType()}, 722 {VIVElem0, ScalarTC}, nullptr, "active.lane.mask"); 723 State.set(this, Call, Part); 724 break; 725 } 726 case VPInstruction::FirstOrderRecurrenceSplice: { 727 // Generate code to combine the previous and current values in vector v3. 728 // 729 // vector.ph: 730 // v_init = vector(..., ..., ..., a[-1]) 731 // br vector.body 732 // 733 // vector.body 734 // i = phi [0, vector.ph], [i+4, vector.body] 735 // v1 = phi [v_init, vector.ph], [v2, vector.body] 736 // v2 = a[i, i+1, i+2, i+3]; 737 // v3 = vector(v1(3), v2(0, 1, 2)) 738 739 // For the first part, use the recurrence phi (v1), otherwise v2. 740 auto *V1 = State.get(getOperand(0), 0); 741 Value *PartMinus1 = Part == 0 ? V1 : State.get(getOperand(1), Part - 1); 742 if (!PartMinus1->getType()->isVectorTy()) { 743 State.set(this, PartMinus1, Part); 744 } else { 745 Value *V2 = State.get(getOperand(1), Part); 746 State.set(this, Builder.CreateVectorSplice(PartMinus1, V2, -1), Part); 747 } 748 break; 749 } 750 751 case VPInstruction::CanonicalIVIncrement: 752 case VPInstruction::CanonicalIVIncrementNUW: { 753 Value *Next = nullptr; 754 if (Part == 0) { 755 bool IsNUW = getOpcode() == VPInstruction::CanonicalIVIncrementNUW; 756 auto *Phi = State.get(getOperand(0), 0); 757 // The loop step is equal to the vectorization factor (num of SIMD 758 // elements) times the unroll factor (num of SIMD instructions). 759 Value *Step = 760 createStepForVF(Builder, Phi->getType(), State.VF, State.UF); 761 Next = Builder.CreateAdd(Phi, Step, "index.next", IsNUW, false); 762 } else { 763 Next = State.get(this, 0); 764 } 765 766 State.set(this, Next, Part); 767 break; 768 } 769 case VPInstruction::BranchOnCount: { 770 if (Part != 0) 771 break; 772 // First create the compare. 773 Value *IV = State.get(getOperand(0), Part); 774 Value *TC = State.get(getOperand(1), Part); 775 Value *Cond = Builder.CreateICmpEQ(IV, TC); 776 777 // Now create the branch. 778 auto *Plan = getParent()->getPlan(); 779 VPRegionBlock *TopRegion = Plan->getVectorLoopRegion(); 780 VPBasicBlock *Header = TopRegion->getEntry()->getEntryBasicBlock(); 781 if (Header->empty()) { 782 assert(EnableVPlanNativePath && 783 "empty entry block only expected in VPlanNativePath"); 784 Header = cast<VPBasicBlock>(Header->getSingleSuccessor()); 785 } 786 // TODO: Once the exit block is modeled in VPlan, use it instead of going 787 // through State.CFG.ExitBB. 788 BasicBlock *Exit = State.CFG.ExitBB; 789 790 Builder.CreateCondBr(Cond, Exit, State.CFG.VPBB2IRBB[Header]); 791 Builder.GetInsertBlock()->getTerminator()->eraseFromParent(); 792 break; 793 } 794 default: 795 llvm_unreachable("Unsupported opcode for instruction"); 796 } 797 } 798 799 void VPInstruction::execute(VPTransformState &State) { 800 assert(!State.Instance && "VPInstruction executing an Instance"); 801 IRBuilderBase::FastMathFlagGuard FMFGuard(State.Builder); 802 State.Builder.setFastMathFlags(FMF); 803 for (unsigned Part = 0; Part < State.UF; ++Part) 804 generateInstruction(State, Part); 805 } 806 807 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) 808 void VPInstruction::dump() const { 809 VPSlotTracker SlotTracker(getParent()->getPlan()); 810 print(dbgs(), "", SlotTracker); 811 } 812 813 void VPInstruction::print(raw_ostream &O, const Twine &Indent, 814 VPSlotTracker &SlotTracker) const { 815 O << Indent << "EMIT "; 816 817 if (hasResult()) { 818 printAsOperand(O, SlotTracker); 819 O << " = "; 820 } 821 822 switch (getOpcode()) { 823 case VPInstruction::Not: 824 O << "not"; 825 break; 826 case VPInstruction::ICmpULE: 827 O << "icmp ule"; 828 break; 829 case VPInstruction::SLPLoad: 830 O << "combined load"; 831 break; 832 case VPInstruction::SLPStore: 833 O << "combined store"; 834 break; 835 case VPInstruction::ActiveLaneMask: 836 O << "active lane mask"; 837 break; 838 case VPInstruction::FirstOrderRecurrenceSplice: 839 O << "first-order splice"; 840 break; 841 case VPInstruction::CanonicalIVIncrement: 842 O << "VF * UF + "; 843 break; 844 case VPInstruction::CanonicalIVIncrementNUW: 845 O << "VF * UF +(nuw) "; 846 break; 847 case VPInstruction::BranchOnCount: 848 O << "branch-on-count "; 849 break; 850 default: 851 O << Instruction::getOpcodeName(getOpcode()); 852 } 853 854 O << FMF; 855 856 for (const VPValue *Operand : operands()) { 857 O << " "; 858 Operand->printAsOperand(O, SlotTracker); 859 } 860 861 if (DL) { 862 O << ", !dbg "; 863 DL.print(O); 864 } 865 } 866 #endif 867 868 void VPInstruction::setFastMathFlags(FastMathFlags FMFNew) { 869 // Make sure the VPInstruction is a floating-point operation. 870 assert((Opcode == Instruction::FAdd || Opcode == Instruction::FMul || 871 Opcode == Instruction::FNeg || Opcode == Instruction::FSub || 872 Opcode == Instruction::FDiv || Opcode == Instruction::FRem || 873 Opcode == Instruction::FCmp) && 874 "this op can't take fast-math flags"); 875 FMF = FMFNew; 876 } 877 878 void VPlan::prepareToExecute(Value *TripCountV, Value *VectorTripCountV, 879 Value *CanonicalIVStartValue, 880 VPTransformState &State) { 881 // Check if the trip count is needed, and if so build it. 882 if (TripCount && TripCount->getNumUsers()) { 883 for (unsigned Part = 0, UF = State.UF; Part < UF; ++Part) 884 State.set(TripCount, TripCountV, Part); 885 } 886 887 // Check if the backedge taken count is needed, and if so build it. 888 if (BackedgeTakenCount && BackedgeTakenCount->getNumUsers()) { 889 IRBuilder<> Builder(State.CFG.VectorPreHeader->getTerminator()); 890 auto *TCMO = Builder.CreateSub(TripCountV, 891 ConstantInt::get(TripCountV->getType(), 1), 892 "trip.count.minus.1"); 893 auto VF = State.VF; 894 Value *VTCMO = 895 VF.isScalar() ? TCMO : Builder.CreateVectorSplat(VF, TCMO, "broadcast"); 896 for (unsigned Part = 0, UF = State.UF; Part < UF; ++Part) 897 State.set(BackedgeTakenCount, VTCMO, Part); 898 } 899 900 for (unsigned Part = 0, UF = State.UF; Part < UF; ++Part) 901 State.set(&VectorTripCount, VectorTripCountV, Part); 902 903 // When vectorizing the epilogue loop, the canonical induction start value 904 // needs to be changed from zero to the value after the main vector loop. 905 if (CanonicalIVStartValue) { 906 VPValue *VPV = new VPValue(CanonicalIVStartValue); 907 addExternalDef(VPV); 908 auto *IV = getCanonicalIV(); 909 assert(all_of(IV->users(), 910 [](const VPUser *U) { 911 if (isa<VPScalarIVStepsRecipe>(U)) 912 return true; 913 auto *VPI = cast<VPInstruction>(U); 914 return VPI->getOpcode() == 915 VPInstruction::CanonicalIVIncrement || 916 VPI->getOpcode() == 917 VPInstruction::CanonicalIVIncrementNUW; 918 }) && 919 "the canonical IV should only be used by its increments or " 920 "ScalarIVSteps when " 921 "resetting the start value"); 922 IV->setOperand(0, VPV); 923 } 924 } 925 926 /// Generate the code inside the body of the vectorized loop. Assumes a single 927 /// LoopVectorBody basic-block was created for this. Introduce additional 928 /// basic-blocks as needed, and fill them all. 929 void VPlan::execute(VPTransformState *State) { 930 // Set the reverse mapping from VPValues to Values for code generation. 931 for (auto &Entry : Value2VPValue) 932 State->VPValue2Value[Entry.second] = Entry.first; 933 934 // Initialize CFG state. 935 State->CFG.PrevVPBB = nullptr; 936 BasicBlock *VectorHeaderBB = State->CFG.VectorPreHeader->getSingleSuccessor(); 937 State->CFG.PrevBB = VectorHeaderBB; 938 State->CFG.ExitBB = VectorHeaderBB->getSingleSuccessor(); 939 State->CurrentVectorLoop = State->LI->getLoopFor(VectorHeaderBB); 940 941 // Remove the edge between Header and Latch to allow other connections. 942 // Temporarily terminate with unreachable until CFG is rewired. 943 // Note: this asserts the generated code's assumption that 944 // getFirstInsertionPt() can be dereferenced into an Instruction. 945 VectorHeaderBB->getTerminator()->eraseFromParent(); 946 State->Builder.SetInsertPoint(VectorHeaderBB); 947 UnreachableInst *Terminator = State->Builder.CreateUnreachable(); 948 State->Builder.SetInsertPoint(Terminator); 949 950 // Generate code in loop body. 951 for (VPBlockBase *Block : depth_first(Entry)) 952 Block->execute(State); 953 954 // Setup branch terminator successors for VPBBs in VPBBsToFix based on 955 // VPBB's successors. 956 for (auto VPBB : State->CFG.VPBBsToFix) { 957 assert(EnableVPlanNativePath && 958 "Unexpected VPBBsToFix in non VPlan-native path"); 959 BasicBlock *BB = State->CFG.VPBB2IRBB[VPBB]; 960 assert(BB && "Unexpected null basic block for VPBB"); 961 962 unsigned Idx = 0; 963 auto *BBTerminator = BB->getTerminator(); 964 965 for (VPBlockBase *SuccVPBlock : VPBB->getHierarchicalSuccessors()) { 966 VPBasicBlock *SuccVPBB = SuccVPBlock->getEntryBasicBlock(); 967 BBTerminator->setSuccessor(Idx, State->CFG.VPBB2IRBB[SuccVPBB]); 968 ++Idx; 969 } 970 } 971 972 BasicBlock *VectorLatchBB = State->CFG.PrevBB; 973 974 // Fix the latch value of canonical, reduction and first-order recurrences 975 // phis in the vector loop. 976 VPBasicBlock *Header = getVectorLoopRegion()->getEntryBasicBlock(); 977 if (Header->empty()) { 978 assert(EnableVPlanNativePath); 979 Header = cast<VPBasicBlock>(Header->getSingleSuccessor()); 980 } 981 for (VPRecipeBase &R : Header->phis()) { 982 // Skip phi-like recipes that generate their backedege values themselves. 983 if (isa<VPWidenPHIRecipe>(&R)) 984 continue; 985 986 if (isa<VPWidenPointerInductionRecipe>(&R) || 987 isa<VPWidenIntOrFpInductionRecipe>(&R)) { 988 PHINode *Phi = nullptr; 989 if (isa<VPWidenIntOrFpInductionRecipe>(&R)) { 990 Phi = cast<PHINode>(State->get(R.getVPSingleValue(), 0)); 991 } else { 992 auto *WidenPhi = cast<VPWidenPointerInductionRecipe>(&R); 993 // TODO: Split off the case that all users of a pointer phi are scalar 994 // from the VPWidenPointerInductionRecipe. 995 if (all_of(WidenPhi->users(), [WidenPhi](const VPUser *U) { 996 return cast<VPRecipeBase>(U)->usesScalars(WidenPhi); 997 })) 998 continue; 999 1000 auto *GEP = cast<GetElementPtrInst>(State->get(WidenPhi, 0)); 1001 Phi = cast<PHINode>(GEP->getPointerOperand()); 1002 } 1003 1004 Phi->setIncomingBlock(1, VectorLatchBB); 1005 1006 // Move the last step to the end of the latch block. This ensures 1007 // consistent placement of all induction updates. 1008 Instruction *Inc = cast<Instruction>(Phi->getIncomingValue(1)); 1009 Inc->moveBefore(VectorLatchBB->getTerminator()->getPrevNode()); 1010 continue; 1011 } 1012 1013 auto *PhiR = cast<VPHeaderPHIRecipe>(&R); 1014 // For canonical IV, first-order recurrences and in-order reduction phis, 1015 // only a single part is generated, which provides the last part from the 1016 // previous iteration. For non-ordered reductions all UF parts are 1017 // generated. 1018 bool SinglePartNeeded = isa<VPCanonicalIVPHIRecipe>(PhiR) || 1019 isa<VPFirstOrderRecurrencePHIRecipe>(PhiR) || 1020 cast<VPReductionPHIRecipe>(PhiR)->isOrdered(); 1021 unsigned LastPartForNewPhi = SinglePartNeeded ? 1 : State->UF; 1022 1023 for (unsigned Part = 0; Part < LastPartForNewPhi; ++Part) { 1024 Value *Phi = State->get(PhiR, Part); 1025 Value *Val = State->get(PhiR->getBackedgeValue(), 1026 SinglePartNeeded ? State->UF - 1 : Part); 1027 cast<PHINode>(Phi)->addIncoming(Val, VectorLatchBB); 1028 } 1029 } 1030 1031 // We do not attempt to preserve DT for outer loop vectorization currently. 1032 if (!EnableVPlanNativePath) 1033 updateDominatorTree(State->DT, VectorHeaderBB, VectorLatchBB, 1034 State->CFG.ExitBB); 1035 } 1036 1037 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) 1038 LLVM_DUMP_METHOD 1039 void VPlan::print(raw_ostream &O) const { 1040 VPSlotTracker SlotTracker(this); 1041 1042 O << "VPlan '" << Name << "' {"; 1043 1044 if (VectorTripCount.getNumUsers() > 0) { 1045 O << "\nLive-in "; 1046 VectorTripCount.printAsOperand(O, SlotTracker); 1047 O << " = vector-trip-count\n"; 1048 } 1049 1050 if (BackedgeTakenCount && BackedgeTakenCount->getNumUsers()) { 1051 O << "\nLive-in "; 1052 BackedgeTakenCount->printAsOperand(O, SlotTracker); 1053 O << " = backedge-taken count\n"; 1054 } 1055 1056 for (const VPBlockBase *Block : depth_first(getEntry())) { 1057 O << '\n'; 1058 Block->print(O, "", SlotTracker); 1059 } 1060 O << "}\n"; 1061 } 1062 1063 LLVM_DUMP_METHOD 1064 void VPlan::printDOT(raw_ostream &O) const { 1065 VPlanPrinter Printer(O, *this); 1066 Printer.dump(); 1067 } 1068 1069 LLVM_DUMP_METHOD 1070 void VPlan::dump() const { print(dbgs()); } 1071 #endif 1072 1073 void VPlan::updateDominatorTree(DominatorTree *DT, BasicBlock *LoopHeaderBB, 1074 BasicBlock *LoopLatchBB, 1075 BasicBlock *LoopExitBB) { 1076 // The vector body may be more than a single basic-block by this point. 1077 // Update the dominator tree information inside the vector body by propagating 1078 // it from header to latch, expecting only triangular control-flow, if any. 1079 BasicBlock *PostDomSucc = nullptr; 1080 for (auto *BB = LoopHeaderBB; BB != LoopLatchBB; BB = PostDomSucc) { 1081 // Get the list of successors of this block. 1082 std::vector<BasicBlock *> Succs(succ_begin(BB), succ_end(BB)); 1083 assert(Succs.size() <= 2 && 1084 "Basic block in vector loop has more than 2 successors."); 1085 PostDomSucc = Succs[0]; 1086 if (Succs.size() == 1) { 1087 assert(PostDomSucc->getSinglePredecessor() && 1088 "PostDom successor has more than one predecessor."); 1089 DT->addNewBlock(PostDomSucc, BB); 1090 continue; 1091 } 1092 BasicBlock *InterimSucc = Succs[1]; 1093 if (PostDomSucc->getSingleSuccessor() == InterimSucc) { 1094 PostDomSucc = Succs[1]; 1095 InterimSucc = Succs[0]; 1096 } 1097 assert(InterimSucc->getSingleSuccessor() == PostDomSucc && 1098 "One successor of a basic block does not lead to the other."); 1099 assert(InterimSucc->getSinglePredecessor() && 1100 "Interim successor has more than one predecessor."); 1101 assert(PostDomSucc->hasNPredecessors(2) && 1102 "PostDom successor has more than two predecessors."); 1103 DT->addNewBlock(InterimSucc, BB); 1104 DT->addNewBlock(PostDomSucc, BB); 1105 } 1106 // Latch block is a new dominator for the loop exit. 1107 DT->changeImmediateDominator(LoopExitBB, LoopLatchBB); 1108 assert(DT->verify(DominatorTree::VerificationLevel::Fast)); 1109 } 1110 1111 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) 1112 Twine VPlanPrinter::getUID(const VPBlockBase *Block) { 1113 return (isa<VPRegionBlock>(Block) ? "cluster_N" : "N") + 1114 Twine(getOrCreateBID(Block)); 1115 } 1116 1117 Twine VPlanPrinter::getOrCreateName(const VPBlockBase *Block) { 1118 const std::string &Name = Block->getName(); 1119 if (!Name.empty()) 1120 return Name; 1121 return "VPB" + Twine(getOrCreateBID(Block)); 1122 } 1123 1124 void VPlanPrinter::dump() { 1125 Depth = 1; 1126 bumpIndent(0); 1127 OS << "digraph VPlan {\n"; 1128 OS << "graph [labelloc=t, fontsize=30; label=\"Vectorization Plan"; 1129 if (!Plan.getName().empty()) 1130 OS << "\\n" << DOT::EscapeString(Plan.getName()); 1131 if (Plan.BackedgeTakenCount) { 1132 OS << ", where:\\n"; 1133 Plan.BackedgeTakenCount->print(OS, SlotTracker); 1134 OS << " := BackedgeTakenCount"; 1135 } 1136 OS << "\"]\n"; 1137 OS << "node [shape=rect, fontname=Courier, fontsize=30]\n"; 1138 OS << "edge [fontname=Courier, fontsize=30]\n"; 1139 OS << "compound=true\n"; 1140 1141 for (const VPBlockBase *Block : depth_first(Plan.getEntry())) 1142 dumpBlock(Block); 1143 1144 OS << "}\n"; 1145 } 1146 1147 void VPlanPrinter::dumpBlock(const VPBlockBase *Block) { 1148 if (const VPBasicBlock *BasicBlock = dyn_cast<VPBasicBlock>(Block)) 1149 dumpBasicBlock(BasicBlock); 1150 else if (const VPRegionBlock *Region = dyn_cast<VPRegionBlock>(Block)) 1151 dumpRegion(Region); 1152 else 1153 llvm_unreachable("Unsupported kind of VPBlock."); 1154 } 1155 1156 void VPlanPrinter::drawEdge(const VPBlockBase *From, const VPBlockBase *To, 1157 bool Hidden, const Twine &Label) { 1158 // Due to "dot" we print an edge between two regions as an edge between the 1159 // exit basic block and the entry basic of the respective regions. 1160 const VPBlockBase *Tail = From->getExitBasicBlock(); 1161 const VPBlockBase *Head = To->getEntryBasicBlock(); 1162 OS << Indent << getUID(Tail) << " -> " << getUID(Head); 1163 OS << " [ label=\"" << Label << '\"'; 1164 if (Tail != From) 1165 OS << " ltail=" << getUID(From); 1166 if (Head != To) 1167 OS << " lhead=" << getUID(To); 1168 if (Hidden) 1169 OS << "; splines=none"; 1170 OS << "]\n"; 1171 } 1172 1173 void VPlanPrinter::dumpEdges(const VPBlockBase *Block) { 1174 auto &Successors = Block->getSuccessors(); 1175 if (Successors.size() == 1) 1176 drawEdge(Block, Successors.front(), false, ""); 1177 else if (Successors.size() == 2) { 1178 drawEdge(Block, Successors.front(), false, "T"); 1179 drawEdge(Block, Successors.back(), false, "F"); 1180 } else { 1181 unsigned SuccessorNumber = 0; 1182 for (auto *Successor : Successors) 1183 drawEdge(Block, Successor, false, Twine(SuccessorNumber++)); 1184 } 1185 } 1186 1187 void VPlanPrinter::dumpBasicBlock(const VPBasicBlock *BasicBlock) { 1188 // Implement dot-formatted dump by performing plain-text dump into the 1189 // temporary storage followed by some post-processing. 1190 OS << Indent << getUID(BasicBlock) << " [label =\n"; 1191 bumpIndent(1); 1192 std::string Str; 1193 raw_string_ostream SS(Str); 1194 // Use no indentation as we need to wrap the lines into quotes ourselves. 1195 BasicBlock->print(SS, "", SlotTracker); 1196 1197 // We need to process each line of the output separately, so split 1198 // single-string plain-text dump. 1199 SmallVector<StringRef, 0> Lines; 1200 StringRef(Str).rtrim('\n').split(Lines, "\n"); 1201 1202 auto EmitLine = [&](StringRef Line, StringRef Suffix) { 1203 OS << Indent << '"' << DOT::EscapeString(Line.str()) << "\\l\"" << Suffix; 1204 }; 1205 1206 // Don't need the "+" after the last line. 1207 for (auto Line : make_range(Lines.begin(), Lines.end() - 1)) 1208 EmitLine(Line, " +\n"); 1209 EmitLine(Lines.back(), "\n"); 1210 1211 bumpIndent(-1); 1212 OS << Indent << "]\n"; 1213 1214 dumpEdges(BasicBlock); 1215 } 1216 1217 void VPlanPrinter::dumpRegion(const VPRegionBlock *Region) { 1218 OS << Indent << "subgraph " << getUID(Region) << " {\n"; 1219 bumpIndent(1); 1220 OS << Indent << "fontname=Courier\n" 1221 << Indent << "label=\"" 1222 << DOT::EscapeString(Region->isReplicator() ? "<xVFxUF> " : "<x1> ") 1223 << DOT::EscapeString(Region->getName()) << "\"\n"; 1224 // Dump the blocks of the region. 1225 assert(Region->getEntry() && "Region contains no inner blocks."); 1226 for (const VPBlockBase *Block : depth_first(Region->getEntry())) 1227 dumpBlock(Block); 1228 bumpIndent(-1); 1229 OS << Indent << "}\n"; 1230 dumpEdges(Region); 1231 } 1232 1233 void VPlanIngredient::print(raw_ostream &O) const { 1234 if (auto *Inst = dyn_cast<Instruction>(V)) { 1235 if (!Inst->getType()->isVoidTy()) { 1236 Inst->printAsOperand(O, false); 1237 O << " = "; 1238 } 1239 O << Inst->getOpcodeName() << " "; 1240 unsigned E = Inst->getNumOperands(); 1241 if (E > 0) { 1242 Inst->getOperand(0)->printAsOperand(O, false); 1243 for (unsigned I = 1; I < E; ++I) 1244 Inst->getOperand(I)->printAsOperand(O << ", ", false); 1245 } 1246 } else // !Inst 1247 V->printAsOperand(O, false); 1248 } 1249 1250 void VPWidenCallRecipe::print(raw_ostream &O, const Twine &Indent, 1251 VPSlotTracker &SlotTracker) const { 1252 O << Indent << "WIDEN-CALL "; 1253 1254 auto *CI = cast<CallInst>(getUnderlyingInstr()); 1255 if (CI->getType()->isVoidTy()) 1256 O << "void "; 1257 else { 1258 printAsOperand(O, SlotTracker); 1259 O << " = "; 1260 } 1261 1262 O << "call @" << CI->getCalledFunction()->getName() << "("; 1263 printOperands(O, SlotTracker); 1264 O << ")"; 1265 } 1266 1267 void VPWidenSelectRecipe::print(raw_ostream &O, const Twine &Indent, 1268 VPSlotTracker &SlotTracker) const { 1269 O << Indent << "WIDEN-SELECT "; 1270 printAsOperand(O, SlotTracker); 1271 O << " = select "; 1272 getOperand(0)->printAsOperand(O, SlotTracker); 1273 O << ", "; 1274 getOperand(1)->printAsOperand(O, SlotTracker); 1275 O << ", "; 1276 getOperand(2)->printAsOperand(O, SlotTracker); 1277 O << (InvariantCond ? " (condition is loop invariant)" : ""); 1278 } 1279 1280 void VPWidenRecipe::print(raw_ostream &O, const Twine &Indent, 1281 VPSlotTracker &SlotTracker) const { 1282 O << Indent << "WIDEN "; 1283 printAsOperand(O, SlotTracker); 1284 O << " = " << getUnderlyingInstr()->getOpcodeName() << " "; 1285 printOperands(O, SlotTracker); 1286 } 1287 1288 void VPWidenIntOrFpInductionRecipe::print(raw_ostream &O, const Twine &Indent, 1289 VPSlotTracker &SlotTracker) const { 1290 O << Indent << "WIDEN-INDUCTION"; 1291 if (getTruncInst()) { 1292 O << "\\l\""; 1293 O << " +\n" << Indent << "\" " << VPlanIngredient(IV) << "\\l\""; 1294 O << " +\n" << Indent << "\" "; 1295 getVPValue(0)->printAsOperand(O, SlotTracker); 1296 } else 1297 O << " " << VPlanIngredient(IV); 1298 } 1299 1300 void VPWidenPointerInductionRecipe::print(raw_ostream &O, const Twine &Indent, 1301 VPSlotTracker &SlotTracker) const { 1302 O << Indent << "EMIT "; 1303 printAsOperand(O, SlotTracker); 1304 O << " = WIDEN-POINTER-INDUCTION "; 1305 getStartValue()->printAsOperand(O, SlotTracker); 1306 O << ", " << *IndDesc.getStep(); 1307 } 1308 1309 #endif 1310 1311 bool VPWidenIntOrFpInductionRecipe::isCanonical() const { 1312 auto *StartC = dyn_cast<ConstantInt>(getStartValue()->getLiveInIRValue()); 1313 auto *StepC = dyn_cast<SCEVConstant>(getInductionDescriptor().getStep()); 1314 return StartC && StartC->isZero() && StepC && StepC->isOne(); 1315 } 1316 1317 VPCanonicalIVPHIRecipe *VPScalarIVStepsRecipe::getCanonicalIV() const { 1318 return cast<VPCanonicalIVPHIRecipe>(getOperand(0)); 1319 } 1320 1321 bool VPScalarIVStepsRecipe::isCanonical() const { 1322 auto *CanIV = getCanonicalIV(); 1323 // The start value of the steps-recipe must match the start value of the 1324 // canonical induction and it must step by 1. 1325 if (CanIV->getStartValue() != getStartValue()) 1326 return false; 1327 auto *StepVPV = getStepValue(); 1328 if (StepVPV->getDef()) 1329 return false; 1330 auto *StepC = dyn_cast_or_null<ConstantInt>(StepVPV->getLiveInIRValue()); 1331 return StepC && StepC->isOne(); 1332 } 1333 1334 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) 1335 void VPScalarIVStepsRecipe::print(raw_ostream &O, const Twine &Indent, 1336 VPSlotTracker &SlotTracker) const { 1337 O << Indent; 1338 printAsOperand(O, SlotTracker); 1339 O << Indent << "= SCALAR-STEPS "; 1340 printOperands(O, SlotTracker); 1341 } 1342 1343 void VPWidenGEPRecipe::print(raw_ostream &O, const Twine &Indent, 1344 VPSlotTracker &SlotTracker) const { 1345 O << Indent << "WIDEN-GEP "; 1346 O << (IsPtrLoopInvariant ? "Inv" : "Var"); 1347 size_t IndicesNumber = IsIndexLoopInvariant.size(); 1348 for (size_t I = 0; I < IndicesNumber; ++I) 1349 O << "[" << (IsIndexLoopInvariant[I] ? "Inv" : "Var") << "]"; 1350 1351 O << " "; 1352 printAsOperand(O, SlotTracker); 1353 O << " = getelementptr "; 1354 printOperands(O, SlotTracker); 1355 } 1356 1357 void VPWidenPHIRecipe::print(raw_ostream &O, const Twine &Indent, 1358 VPSlotTracker &SlotTracker) const { 1359 O << Indent << "WIDEN-PHI "; 1360 1361 auto *OriginalPhi = cast<PHINode>(getUnderlyingValue()); 1362 // Unless all incoming values are modeled in VPlan print the original PHI 1363 // directly. 1364 // TODO: Remove once all VPWidenPHIRecipe instances keep all relevant incoming 1365 // values as VPValues. 1366 if (getNumOperands() != OriginalPhi->getNumOperands()) { 1367 O << VPlanIngredient(OriginalPhi); 1368 return; 1369 } 1370 1371 printAsOperand(O, SlotTracker); 1372 O << " = phi "; 1373 printOperands(O, SlotTracker); 1374 } 1375 1376 void VPBlendRecipe::print(raw_ostream &O, const Twine &Indent, 1377 VPSlotTracker &SlotTracker) const { 1378 O << Indent << "BLEND "; 1379 Phi->printAsOperand(O, false); 1380 O << " ="; 1381 if (getNumIncomingValues() == 1) { 1382 // Not a User of any mask: not really blending, this is a 1383 // single-predecessor phi. 1384 O << " "; 1385 getIncomingValue(0)->printAsOperand(O, SlotTracker); 1386 } else { 1387 for (unsigned I = 0, E = getNumIncomingValues(); I < E; ++I) { 1388 O << " "; 1389 getIncomingValue(I)->printAsOperand(O, SlotTracker); 1390 O << "/"; 1391 getMask(I)->printAsOperand(O, SlotTracker); 1392 } 1393 } 1394 } 1395 1396 void VPReductionRecipe::print(raw_ostream &O, const Twine &Indent, 1397 VPSlotTracker &SlotTracker) const { 1398 O << Indent << "REDUCE "; 1399 printAsOperand(O, SlotTracker); 1400 O << " = "; 1401 getChainOp()->printAsOperand(O, SlotTracker); 1402 O << " +"; 1403 if (isa<FPMathOperator>(getUnderlyingInstr())) 1404 O << getUnderlyingInstr()->getFastMathFlags(); 1405 O << " reduce." << Instruction::getOpcodeName(RdxDesc->getOpcode()) << " ("; 1406 getVecOp()->printAsOperand(O, SlotTracker); 1407 if (getCondOp()) { 1408 O << ", "; 1409 getCondOp()->printAsOperand(O, SlotTracker); 1410 } 1411 O << ")"; 1412 } 1413 1414 void VPReplicateRecipe::print(raw_ostream &O, const Twine &Indent, 1415 VPSlotTracker &SlotTracker) const { 1416 O << Indent << (IsUniform ? "CLONE " : "REPLICATE "); 1417 1418 if (!getUnderlyingInstr()->getType()->isVoidTy()) { 1419 printAsOperand(O, SlotTracker); 1420 O << " = "; 1421 } 1422 O << Instruction::getOpcodeName(getUnderlyingInstr()->getOpcode()) << " "; 1423 printOperands(O, SlotTracker); 1424 1425 if (AlsoPack) 1426 O << " (S->V)"; 1427 } 1428 1429 void VPPredInstPHIRecipe::print(raw_ostream &O, const Twine &Indent, 1430 VPSlotTracker &SlotTracker) const { 1431 O << Indent << "PHI-PREDICATED-INSTRUCTION "; 1432 printAsOperand(O, SlotTracker); 1433 O << " = "; 1434 printOperands(O, SlotTracker); 1435 } 1436 1437 void VPWidenMemoryInstructionRecipe::print(raw_ostream &O, const Twine &Indent, 1438 VPSlotTracker &SlotTracker) const { 1439 O << Indent << "WIDEN "; 1440 1441 if (!isStore()) { 1442 printAsOperand(O, SlotTracker); 1443 O << " = "; 1444 } 1445 O << Instruction::getOpcodeName(Ingredient.getOpcode()) << " "; 1446 1447 printOperands(O, SlotTracker); 1448 } 1449 #endif 1450 1451 void VPCanonicalIVPHIRecipe::execute(VPTransformState &State) { 1452 Value *Start = getStartValue()->getLiveInIRValue(); 1453 PHINode *EntryPart = PHINode::Create( 1454 Start->getType(), 2, "index", &*State.CFG.PrevBB->getFirstInsertionPt()); 1455 EntryPart->addIncoming(Start, State.CFG.VectorPreHeader); 1456 EntryPart->setDebugLoc(DL); 1457 for (unsigned Part = 0, UF = State.UF; Part < UF; ++Part) 1458 State.set(this, EntryPart, Part); 1459 } 1460 1461 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) 1462 void VPCanonicalIVPHIRecipe::print(raw_ostream &O, const Twine &Indent, 1463 VPSlotTracker &SlotTracker) const { 1464 O << Indent << "EMIT "; 1465 printAsOperand(O, SlotTracker); 1466 O << " = CANONICAL-INDUCTION"; 1467 } 1468 #endif 1469 1470 void VPExpandSCEVRecipe::execute(VPTransformState &State) { 1471 assert(!State.Instance && "cannot be used in per-lane"); 1472 const DataLayout &DL = 1473 State.CFG.VectorPreHeader->getModule()->getDataLayout(); 1474 SCEVExpander Exp(SE, DL, "induction"); 1475 Value *Res = Exp.expandCodeFor(Expr, Expr->getType(), 1476 State.CFG.VectorPreHeader->getTerminator()); 1477 1478 for (unsigned Part = 0, UF = State.UF; Part < UF; ++Part) 1479 State.set(this, Res, Part); 1480 } 1481 1482 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) 1483 void VPExpandSCEVRecipe::print(raw_ostream &O, const Twine &Indent, 1484 VPSlotTracker &SlotTracker) const { 1485 O << Indent << "EMIT "; 1486 getVPSingleValue()->printAsOperand(O, SlotTracker); 1487 O << " = EXPAND SCEV " << *Expr; 1488 } 1489 #endif 1490 1491 void VPWidenCanonicalIVRecipe::execute(VPTransformState &State) { 1492 Value *CanonicalIV = State.get(getOperand(0), 0); 1493 Type *STy = CanonicalIV->getType(); 1494 IRBuilder<> Builder(State.CFG.PrevBB->getTerminator()); 1495 ElementCount VF = State.VF; 1496 Value *VStart = VF.isScalar() 1497 ? CanonicalIV 1498 : Builder.CreateVectorSplat(VF, CanonicalIV, "broadcast"); 1499 for (unsigned Part = 0, UF = State.UF; Part < UF; ++Part) { 1500 Value *VStep = createStepForVF(Builder, STy, VF, Part); 1501 if (VF.isVector()) { 1502 VStep = Builder.CreateVectorSplat(VF, VStep); 1503 VStep = Builder.CreateAdd(VStep, Builder.CreateStepVector(VStep->getType())); 1504 } 1505 Value *CanonicalVectorIV = Builder.CreateAdd(VStart, VStep, "vec.iv"); 1506 State.set(this, CanonicalVectorIV, Part); 1507 } 1508 } 1509 1510 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) 1511 void VPWidenCanonicalIVRecipe::print(raw_ostream &O, const Twine &Indent, 1512 VPSlotTracker &SlotTracker) const { 1513 O << Indent << "EMIT "; 1514 printAsOperand(O, SlotTracker); 1515 O << " = WIDEN-CANONICAL-INDUCTION "; 1516 printOperands(O, SlotTracker); 1517 } 1518 #endif 1519 1520 void VPFirstOrderRecurrencePHIRecipe::execute(VPTransformState &State) { 1521 auto &Builder = State.Builder; 1522 // Create a vector from the initial value. 1523 auto *VectorInit = getStartValue()->getLiveInIRValue(); 1524 1525 Type *VecTy = State.VF.isScalar() 1526 ? VectorInit->getType() 1527 : VectorType::get(VectorInit->getType(), State.VF); 1528 1529 if (State.VF.isVector()) { 1530 auto *IdxTy = Builder.getInt32Ty(); 1531 auto *One = ConstantInt::get(IdxTy, 1); 1532 IRBuilder<>::InsertPointGuard Guard(Builder); 1533 Builder.SetInsertPoint(State.CFG.VectorPreHeader->getTerminator()); 1534 auto *RuntimeVF = getRuntimeVF(Builder, IdxTy, State.VF); 1535 auto *LastIdx = Builder.CreateSub(RuntimeVF, One); 1536 VectorInit = Builder.CreateInsertElement( 1537 PoisonValue::get(VecTy), VectorInit, LastIdx, "vector.recur.init"); 1538 } 1539 1540 // Create a phi node for the new recurrence. 1541 PHINode *EntryPart = PHINode::Create( 1542 VecTy, 2, "vector.recur", &*State.CFG.PrevBB->getFirstInsertionPt()); 1543 EntryPart->addIncoming(VectorInit, State.CFG.VectorPreHeader); 1544 State.set(this, EntryPart, 0); 1545 } 1546 1547 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) 1548 void VPFirstOrderRecurrencePHIRecipe::print(raw_ostream &O, const Twine &Indent, 1549 VPSlotTracker &SlotTracker) const { 1550 O << Indent << "FIRST-ORDER-RECURRENCE-PHI "; 1551 printAsOperand(O, SlotTracker); 1552 O << " = phi "; 1553 printOperands(O, SlotTracker); 1554 } 1555 #endif 1556 1557 void VPReductionPHIRecipe::execute(VPTransformState &State) { 1558 PHINode *PN = cast<PHINode>(getUnderlyingValue()); 1559 auto &Builder = State.Builder; 1560 1561 // In order to support recurrences we need to be able to vectorize Phi nodes. 1562 // Phi nodes have cycles, so we need to vectorize them in two stages. This is 1563 // stage #1: We create a new vector PHI node with no incoming edges. We'll use 1564 // this value when we vectorize all of the instructions that use the PHI. 1565 bool ScalarPHI = State.VF.isScalar() || IsInLoop; 1566 Type *VecTy = 1567 ScalarPHI ? PN->getType() : VectorType::get(PN->getType(), State.VF); 1568 1569 BasicBlock *HeaderBB = State.CFG.PrevBB; 1570 assert(State.CurrentVectorLoop->getHeader() == HeaderBB && 1571 "recipe must be in the vector loop header"); 1572 unsigned LastPartForNewPhi = isOrdered() ? 1 : State.UF; 1573 for (unsigned Part = 0; Part < LastPartForNewPhi; ++Part) { 1574 Value *EntryPart = 1575 PHINode::Create(VecTy, 2, "vec.phi", &*HeaderBB->getFirstInsertionPt()); 1576 State.set(this, EntryPart, Part); 1577 } 1578 1579 // Reductions do not have to start at zero. They can start with 1580 // any loop invariant values. 1581 VPValue *StartVPV = getStartValue(); 1582 Value *StartV = StartVPV->getLiveInIRValue(); 1583 1584 Value *Iden = nullptr; 1585 RecurKind RK = RdxDesc.getRecurrenceKind(); 1586 if (RecurrenceDescriptor::isMinMaxRecurrenceKind(RK) || 1587 RecurrenceDescriptor::isSelectCmpRecurrenceKind(RK)) { 1588 // MinMax reduction have the start value as their identify. 1589 if (ScalarPHI) { 1590 Iden = StartV; 1591 } else { 1592 IRBuilderBase::InsertPointGuard IPBuilder(Builder); 1593 Builder.SetInsertPoint(State.CFG.VectorPreHeader->getTerminator()); 1594 StartV = Iden = 1595 Builder.CreateVectorSplat(State.VF, StartV, "minmax.ident"); 1596 } 1597 } else { 1598 Iden = RdxDesc.getRecurrenceIdentity(RK, VecTy->getScalarType(), 1599 RdxDesc.getFastMathFlags()); 1600 1601 if (!ScalarPHI) { 1602 Iden = Builder.CreateVectorSplat(State.VF, Iden); 1603 IRBuilderBase::InsertPointGuard IPBuilder(Builder); 1604 Builder.SetInsertPoint(State.CFG.VectorPreHeader->getTerminator()); 1605 Constant *Zero = Builder.getInt32(0); 1606 StartV = Builder.CreateInsertElement(Iden, StartV, Zero); 1607 } 1608 } 1609 1610 for (unsigned Part = 0; Part < LastPartForNewPhi; ++Part) { 1611 Value *EntryPart = State.get(this, Part); 1612 // Make sure to add the reduction start value only to the 1613 // first unroll part. 1614 Value *StartVal = (Part == 0) ? StartV : Iden; 1615 cast<PHINode>(EntryPart)->addIncoming(StartVal, State.CFG.VectorPreHeader); 1616 } 1617 } 1618 1619 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) 1620 void VPReductionPHIRecipe::print(raw_ostream &O, const Twine &Indent, 1621 VPSlotTracker &SlotTracker) const { 1622 O << Indent << "WIDEN-REDUCTION-PHI "; 1623 1624 printAsOperand(O, SlotTracker); 1625 O << " = phi "; 1626 printOperands(O, SlotTracker); 1627 } 1628 #endif 1629 1630 template void DomTreeBuilder::Calculate<VPDominatorTree>(VPDominatorTree &DT); 1631 1632 void VPValue::replaceAllUsesWith(VPValue *New) { 1633 for (unsigned J = 0; J < getNumUsers();) { 1634 VPUser *User = Users[J]; 1635 unsigned NumUsers = getNumUsers(); 1636 for (unsigned I = 0, E = User->getNumOperands(); I < E; ++I) 1637 if (User->getOperand(I) == this) 1638 User->setOperand(I, New); 1639 // If a user got removed after updating the current user, the next user to 1640 // update will be moved to the current position, so we only need to 1641 // increment the index if the number of users did not change. 1642 if (NumUsers == getNumUsers()) 1643 J++; 1644 } 1645 } 1646 1647 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) 1648 void VPValue::printAsOperand(raw_ostream &OS, VPSlotTracker &Tracker) const { 1649 if (const Value *UV = getUnderlyingValue()) { 1650 OS << "ir<"; 1651 UV->printAsOperand(OS, false); 1652 OS << ">"; 1653 return; 1654 } 1655 1656 unsigned Slot = Tracker.getSlot(this); 1657 if (Slot == unsigned(-1)) 1658 OS << "<badref>"; 1659 else 1660 OS << "vp<%" << Tracker.getSlot(this) << ">"; 1661 } 1662 1663 void VPUser::printOperands(raw_ostream &O, VPSlotTracker &SlotTracker) const { 1664 interleaveComma(operands(), O, [&O, &SlotTracker](VPValue *Op) { 1665 Op->printAsOperand(O, SlotTracker); 1666 }); 1667 } 1668 #endif 1669 1670 void VPInterleavedAccessInfo::visitRegion(VPRegionBlock *Region, 1671 Old2NewTy &Old2New, 1672 InterleavedAccessInfo &IAI) { 1673 ReversePostOrderTraversal<VPBlockBase *> RPOT(Region->getEntry()); 1674 for (VPBlockBase *Base : RPOT) { 1675 visitBlock(Base, Old2New, IAI); 1676 } 1677 } 1678 1679 void VPInterleavedAccessInfo::visitBlock(VPBlockBase *Block, Old2NewTy &Old2New, 1680 InterleavedAccessInfo &IAI) { 1681 if (VPBasicBlock *VPBB = dyn_cast<VPBasicBlock>(Block)) { 1682 for (VPRecipeBase &VPI : *VPBB) { 1683 if (isa<VPHeaderPHIRecipe>(&VPI)) 1684 continue; 1685 assert(isa<VPInstruction>(&VPI) && "Can only handle VPInstructions"); 1686 auto *VPInst = cast<VPInstruction>(&VPI); 1687 auto *Inst = cast<Instruction>(VPInst->getUnderlyingValue()); 1688 auto *IG = IAI.getInterleaveGroup(Inst); 1689 if (!IG) 1690 continue; 1691 1692 auto NewIGIter = Old2New.find(IG); 1693 if (NewIGIter == Old2New.end()) 1694 Old2New[IG] = new InterleaveGroup<VPInstruction>( 1695 IG->getFactor(), IG->isReverse(), IG->getAlign()); 1696 1697 if (Inst == IG->getInsertPos()) 1698 Old2New[IG]->setInsertPos(VPInst); 1699 1700 InterleaveGroupMap[VPInst] = Old2New[IG]; 1701 InterleaveGroupMap[VPInst]->insertMember( 1702 VPInst, IG->getIndex(Inst), 1703 Align(IG->isReverse() ? (-1) * int(IG->getFactor()) 1704 : IG->getFactor())); 1705 } 1706 } else if (VPRegionBlock *Region = dyn_cast<VPRegionBlock>(Block)) 1707 visitRegion(Region, Old2New, IAI); 1708 else 1709 llvm_unreachable("Unsupported kind of VPBlock."); 1710 } 1711 1712 VPInterleavedAccessInfo::VPInterleavedAccessInfo(VPlan &Plan, 1713 InterleavedAccessInfo &IAI) { 1714 Old2NewTy Old2New; 1715 visitRegion(Plan.getVectorLoopRegion(), Old2New, IAI); 1716 } 1717 1718 void VPSlotTracker::assignSlot(const VPValue *V) { 1719 assert(Slots.find(V) == Slots.end() && "VPValue already has a slot!"); 1720 Slots[V] = NextSlot++; 1721 } 1722 1723 void VPSlotTracker::assignSlots(const VPlan &Plan) { 1724 1725 for (const VPValue *V : Plan.VPExternalDefs) 1726 assignSlot(V); 1727 1728 assignSlot(&Plan.VectorTripCount); 1729 if (Plan.BackedgeTakenCount) 1730 assignSlot(Plan.BackedgeTakenCount); 1731 1732 ReversePostOrderTraversal< 1733 VPBlockRecursiveTraversalWrapper<const VPBlockBase *>> 1734 RPOT(VPBlockRecursiveTraversalWrapper<const VPBlockBase *>( 1735 Plan.getEntry())); 1736 for (const VPBasicBlock *VPBB : 1737 VPBlockUtils::blocksOnly<const VPBasicBlock>(RPOT)) 1738 for (const VPRecipeBase &Recipe : *VPBB) 1739 for (VPValue *Def : Recipe.definedValues()) 1740 assignSlot(Def); 1741 } 1742 1743 bool vputils::onlyFirstLaneUsed(VPValue *Def) { 1744 return all_of(Def->users(), [Def](VPUser *U) { 1745 return cast<VPRecipeBase>(U)->onlyFirstLaneUsed(Def); 1746 }); 1747 } 1748