1 //===- LoopVectorize.cpp - A Loop Vectorizer ------------------------------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // This is the LLVM loop vectorizer. This pass modifies 'vectorizable' loops 11 // and generates target-independent LLVM-IR. 12 // The vectorizer uses the TargetTransformInfo analysis to estimate the costs 13 // of instructions in order to estimate the profitability of vectorization. 14 // 15 // The loop vectorizer combines consecutive loop iterations into a single 16 // 'wide' iteration. After this transformation the index is incremented 17 // by the SIMD vector width, and not by one. 18 // 19 // This pass has three parts: 20 // 1. The main loop pass that drives the different parts. 21 // 2. LoopVectorizationLegality - A unit that checks for the legality 22 // of the vectorization. 23 // 3. InnerLoopVectorizer - A unit that performs the actual 24 // widening of instructions. 25 // 4. LoopVectorizationCostModel - A unit that checks for the profitability 26 // of vectorization. It decides on the optimal vector width, which 27 // can be one, if vectorization is not profitable. 28 // 29 //===----------------------------------------------------------------------===// 30 // 31 // The reduction-variable vectorization is based on the paper: 32 // D. Nuzman and R. Henderson. Multi-platform Auto-vectorization. 33 // 34 // Variable uniformity checks are inspired by: 35 // Karrenberg, R. and Hack, S. Whole Function Vectorization. 36 // 37 // The interleaved access vectorization is based on the paper: 38 // Dorit Nuzman, Ira Rosen and Ayal Zaks. Auto-Vectorization of Interleaved 39 // Data for SIMD 40 // 41 // Other ideas/concepts are from: 42 // A. Zaks and D. Nuzman. Autovectorization in GCC-two years later. 43 // 44 // S. Maleki, Y. Gao, M. Garzaran, T. Wong and D. Padua. An Evaluation of 45 // Vectorizing Compilers. 46 // 47 //===----------------------------------------------------------------------===// 48 49 #include "llvm/Transforms/Vectorize/LoopVectorize.h" 50 #include "llvm/ADT/DenseMap.h" 51 #include "llvm/ADT/Hashing.h" 52 #include "llvm/ADT/MapVector.h" 53 #include "llvm/ADT/SCCIterator.h" 54 #include "llvm/ADT/SetVector.h" 55 #include "llvm/ADT/SmallPtrSet.h" 56 #include "llvm/ADT/SmallSet.h" 57 #include "llvm/ADT/SmallVector.h" 58 #include "llvm/ADT/Statistic.h" 59 #include "llvm/ADT/StringExtras.h" 60 #include "llvm/Analysis/CodeMetrics.h" 61 #include "llvm/Analysis/GlobalsModRef.h" 62 #include "llvm/Analysis/LoopInfo.h" 63 #include "llvm/Analysis/LoopIterator.h" 64 #include "llvm/Analysis/LoopPass.h" 65 #include "llvm/Analysis/ScalarEvolutionExpander.h" 66 #include "llvm/Analysis/ScalarEvolutionExpressions.h" 67 #include "llvm/Analysis/ValueTracking.h" 68 #include "llvm/Analysis/VectorUtils.h" 69 #include "llvm/IR/Constants.h" 70 #include "llvm/IR/DataLayout.h" 71 #include "llvm/IR/DebugInfo.h" 72 #include "llvm/IR/DerivedTypes.h" 73 #include "llvm/IR/DiagnosticInfo.h" 74 #include "llvm/IR/Dominators.h" 75 #include "llvm/IR/Function.h" 76 #include "llvm/IR/IRBuilder.h" 77 #include "llvm/IR/Instructions.h" 78 #include "llvm/IR/IntrinsicInst.h" 79 #include "llvm/IR/LLVMContext.h" 80 #include "llvm/IR/Module.h" 81 #include "llvm/IR/PatternMatch.h" 82 #include "llvm/IR/Type.h" 83 #include "llvm/IR/Value.h" 84 #include "llvm/IR/ValueHandle.h" 85 #include "llvm/IR/Verifier.h" 86 #include "llvm/Pass.h" 87 #include "llvm/Support/BranchProbability.h" 88 #include "llvm/Support/CommandLine.h" 89 #include "llvm/Support/Debug.h" 90 #include "llvm/Support/raw_ostream.h" 91 #include "llvm/Transforms/Scalar.h" 92 #include "llvm/Transforms/Utils/BasicBlockUtils.h" 93 #include "llvm/Transforms/Utils/Local.h" 94 #include "llvm/Transforms/Utils/LoopUtils.h" 95 #include "llvm/Transforms/Utils/LoopVersioning.h" 96 #include "llvm/Transforms/Vectorize.h" 97 #include <algorithm> 98 #include <map> 99 #include <tuple> 100 101 using namespace llvm; 102 using namespace llvm::PatternMatch; 103 104 #define LV_NAME "loop-vectorize" 105 #define DEBUG_TYPE LV_NAME 106 107 STATISTIC(LoopsVectorized, "Number of loops vectorized"); 108 STATISTIC(LoopsAnalyzed, "Number of loops analyzed for vectorization"); 109 110 static cl::opt<bool> 111 EnableIfConversion("enable-if-conversion", cl::init(true), cl::Hidden, 112 cl::desc("Enable if-conversion during vectorization.")); 113 114 /// We don't vectorize loops with a known constant trip count below this number. 115 static cl::opt<unsigned> TinyTripCountVectorThreshold( 116 "vectorizer-min-trip-count", cl::init(16), cl::Hidden, 117 cl::desc("Don't vectorize loops with a constant " 118 "trip count that is smaller than this " 119 "value.")); 120 121 static cl::opt<bool> MaximizeBandwidth( 122 "vectorizer-maximize-bandwidth", cl::init(false), cl::Hidden, 123 cl::desc("Maximize bandwidth when selecting vectorization factor which " 124 "will be determined by the smallest type in loop.")); 125 126 static cl::opt<bool> EnableInterleavedMemAccesses( 127 "enable-interleaved-mem-accesses", cl::init(false), cl::Hidden, 128 cl::desc("Enable vectorization on interleaved memory accesses in a loop")); 129 130 /// Maximum factor for an interleaved memory access. 131 static cl::opt<unsigned> MaxInterleaveGroupFactor( 132 "max-interleave-group-factor", cl::Hidden, 133 cl::desc("Maximum factor for an interleaved access group (default = 8)"), 134 cl::init(8)); 135 136 /// We don't interleave loops with a known constant trip count below this 137 /// number. 138 static const unsigned TinyTripCountInterleaveThreshold = 128; 139 140 static cl::opt<unsigned> ForceTargetNumScalarRegs( 141 "force-target-num-scalar-regs", cl::init(0), cl::Hidden, 142 cl::desc("A flag that overrides the target's number of scalar registers.")); 143 144 static cl::opt<unsigned> ForceTargetNumVectorRegs( 145 "force-target-num-vector-regs", cl::init(0), cl::Hidden, 146 cl::desc("A flag that overrides the target's number of vector registers.")); 147 148 /// Maximum vectorization interleave count. 149 static const unsigned MaxInterleaveFactor = 16; 150 151 static cl::opt<unsigned> ForceTargetMaxScalarInterleaveFactor( 152 "force-target-max-scalar-interleave", cl::init(0), cl::Hidden, 153 cl::desc("A flag that overrides the target's max interleave factor for " 154 "scalar loops.")); 155 156 static cl::opt<unsigned> ForceTargetMaxVectorInterleaveFactor( 157 "force-target-max-vector-interleave", cl::init(0), cl::Hidden, 158 cl::desc("A flag that overrides the target's max interleave factor for " 159 "vectorized loops.")); 160 161 static cl::opt<unsigned> ForceTargetInstructionCost( 162 "force-target-instruction-cost", cl::init(0), cl::Hidden, 163 cl::desc("A flag that overrides the target's expected cost for " 164 "an instruction to a single constant value. Mostly " 165 "useful for getting consistent testing.")); 166 167 static cl::opt<unsigned> SmallLoopCost( 168 "small-loop-cost", cl::init(20), cl::Hidden, 169 cl::desc( 170 "The cost of a loop that is considered 'small' by the interleaver.")); 171 172 static cl::opt<bool> LoopVectorizeWithBlockFrequency( 173 "loop-vectorize-with-block-frequency", cl::init(false), cl::Hidden, 174 cl::desc("Enable the use of the block frequency analysis to access PGO " 175 "heuristics minimizing code growth in cold regions and being more " 176 "aggressive in hot regions.")); 177 178 // Runtime interleave loops for load/store throughput. 179 static cl::opt<bool> EnableLoadStoreRuntimeInterleave( 180 "enable-loadstore-runtime-interleave", cl::init(true), cl::Hidden, 181 cl::desc( 182 "Enable runtime interleaving until load/store ports are saturated")); 183 184 /// The number of stores in a loop that are allowed to need predication. 185 static cl::opt<unsigned> NumberOfStoresToPredicate( 186 "vectorize-num-stores-pred", cl::init(1), cl::Hidden, 187 cl::desc("Max number of stores to be predicated behind an if.")); 188 189 static cl::opt<bool> EnableIndVarRegisterHeur( 190 "enable-ind-var-reg-heur", cl::init(true), cl::Hidden, 191 cl::desc("Count the induction variable only once when interleaving")); 192 193 static cl::opt<bool> EnableCondStoresVectorization( 194 "enable-cond-stores-vec", cl::init(false), cl::Hidden, 195 cl::desc("Enable if predication of stores during vectorization.")); 196 197 static cl::opt<unsigned> MaxNestedScalarReductionIC( 198 "max-nested-scalar-reduction-interleave", cl::init(2), cl::Hidden, 199 cl::desc("The maximum interleave count to use when interleaving a scalar " 200 "reduction in a nested loop.")); 201 202 static cl::opt<unsigned> PragmaVectorizeMemoryCheckThreshold( 203 "pragma-vectorize-memory-check-threshold", cl::init(128), cl::Hidden, 204 cl::desc("The maximum allowed number of runtime memory checks with a " 205 "vectorize(enable) pragma.")); 206 207 static cl::opt<unsigned> VectorizeSCEVCheckThreshold( 208 "vectorize-scev-check-threshold", cl::init(16), cl::Hidden, 209 cl::desc("The maximum number of SCEV checks allowed.")); 210 211 static cl::opt<unsigned> PragmaVectorizeSCEVCheckThreshold( 212 "pragma-vectorize-scev-check-threshold", cl::init(128), cl::Hidden, 213 cl::desc("The maximum number of SCEV checks allowed with a " 214 "vectorize(enable) pragma")); 215 216 namespace { 217 218 // Forward declarations. 219 class LoopVectorizeHints; 220 class LoopVectorizationLegality; 221 class LoopVectorizationCostModel; 222 class LoopVectorizationRequirements; 223 224 // A traits type that is intended to be used in graph algorithms. The graph it 225 // models starts at the loop header, and traverses the BasicBlocks that are in 226 // the loop body, but not the loop header. Since the loop header is skipped, 227 // the back edges are excluded. 228 struct LoopBodyTraits { 229 using NodeRef = std::pair<const Loop *, BasicBlock *>; 230 231 // This wraps a const Loop * into the iterator, so we know which edges to 232 // filter out. 233 class WrappedSuccIterator 234 : public iterator_adaptor_base< 235 WrappedSuccIterator, succ_iterator, 236 typename std::iterator_traits<succ_iterator>::iterator_category, 237 NodeRef, std::ptrdiff_t, NodeRef *, NodeRef> { 238 using BaseT = iterator_adaptor_base< 239 WrappedSuccIterator, succ_iterator, 240 typename std::iterator_traits<succ_iterator>::iterator_category, 241 NodeRef, std::ptrdiff_t, NodeRef *, NodeRef>; 242 243 const Loop *L; 244 245 public: 246 WrappedSuccIterator(succ_iterator Begin, const Loop *L) 247 : BaseT(Begin), L(L) {} 248 249 NodeRef operator*() const { return {L, *I}; } 250 }; 251 252 struct LoopBodyFilter { 253 bool operator()(NodeRef N) const { 254 const Loop *L = N.first; 255 return N.second != L->getHeader() && L->contains(N.second); 256 } 257 }; 258 259 using ChildIteratorType = 260 filter_iterator<WrappedSuccIterator, LoopBodyFilter>; 261 262 static NodeRef getEntryNode(const Loop &G) { return {&G, G.getHeader()}; } 263 264 static ChildIteratorType child_begin(NodeRef Node) { 265 return make_filter_range(make_range<WrappedSuccIterator>( 266 {succ_begin(Node.second), Node.first}, 267 {succ_end(Node.second), Node.first}), 268 LoopBodyFilter{}) 269 .begin(); 270 } 271 272 static ChildIteratorType child_end(NodeRef Node) { 273 return make_filter_range(make_range<WrappedSuccIterator>( 274 {succ_begin(Node.second), Node.first}, 275 {succ_end(Node.second), Node.first}), 276 LoopBodyFilter{}) 277 .end(); 278 } 279 }; 280 281 /// Returns true if the given loop body has a cycle, excluding the loop 282 /// itself. 283 static bool hasCyclesInLoopBody(const Loop &L) { 284 if (!L.empty()) 285 return true; 286 287 for (const auto SCC : 288 make_range(scc_iterator<Loop, LoopBodyTraits>::begin(L), 289 scc_iterator<Loop, LoopBodyTraits>::end(L))) { 290 if (SCC.size() > 1) { 291 DEBUG(dbgs() << "LVL: Detected a cycle in the loop body:\n"); 292 DEBUG(L.dump()); 293 return true; 294 } 295 } 296 return false; 297 } 298 299 /// \brief This modifies LoopAccessReport to initialize message with 300 /// loop-vectorizer-specific part. 301 class VectorizationReport : public LoopAccessReport { 302 public: 303 VectorizationReport(Instruction *I = nullptr) 304 : LoopAccessReport("loop not vectorized: ", I) {} 305 306 /// \brief This allows promotion of the loop-access analysis report into the 307 /// loop-vectorizer report. It modifies the message to add the 308 /// loop-vectorizer-specific part of the message. 309 explicit VectorizationReport(const LoopAccessReport &R) 310 : LoopAccessReport(Twine("loop not vectorized: ") + R.str(), 311 R.getInstr()) {} 312 }; 313 314 /// A helper function for converting Scalar types to vector types. 315 /// If the incoming type is void, we return void. If the VF is 1, we return 316 /// the scalar type. 317 static Type *ToVectorTy(Type *Scalar, unsigned VF) { 318 if (Scalar->isVoidTy() || VF == 1) 319 return Scalar; 320 return VectorType::get(Scalar, VF); 321 } 322 323 /// A helper function that returns GEP instruction and knows to skip a 324 /// 'bitcast'. The 'bitcast' may be skipped if the source and the destination 325 /// pointee types of the 'bitcast' have the same size. 326 /// For example: 327 /// bitcast double** %var to i64* - can be skipped 328 /// bitcast double** %var to i8* - can not 329 static GetElementPtrInst *getGEPInstruction(Value *Ptr) { 330 331 if (isa<GetElementPtrInst>(Ptr)) 332 return cast<GetElementPtrInst>(Ptr); 333 334 if (isa<BitCastInst>(Ptr) && 335 isa<GetElementPtrInst>(cast<BitCastInst>(Ptr)->getOperand(0))) { 336 Type *BitcastTy = Ptr->getType(); 337 Type *GEPTy = cast<BitCastInst>(Ptr)->getSrcTy(); 338 if (!isa<PointerType>(BitcastTy) || !isa<PointerType>(GEPTy)) 339 return nullptr; 340 Type *Pointee1Ty = cast<PointerType>(BitcastTy)->getPointerElementType(); 341 Type *Pointee2Ty = cast<PointerType>(GEPTy)->getPointerElementType(); 342 const DataLayout &DL = cast<BitCastInst>(Ptr)->getModule()->getDataLayout(); 343 if (DL.getTypeSizeInBits(Pointee1Ty) == DL.getTypeSizeInBits(Pointee2Ty)) 344 return cast<GetElementPtrInst>(cast<BitCastInst>(Ptr)->getOperand(0)); 345 } 346 return nullptr; 347 } 348 349 /// A helper function that returns the pointer operand of a load or store 350 /// instruction. 351 static Value *getPointerOperand(Value *I) { 352 if (auto *LI = dyn_cast<LoadInst>(I)) 353 return LI->getPointerOperand(); 354 if (auto *SI = dyn_cast<StoreInst>(I)) 355 return SI->getPointerOperand(); 356 return nullptr; 357 } 358 359 /// InnerLoopVectorizer vectorizes loops which contain only one basic 360 /// block to a specified vectorization factor (VF). 361 /// This class performs the widening of scalars into vectors, or multiple 362 /// scalars. This class also implements the following features: 363 /// * It inserts an epilogue loop for handling loops that don't have iteration 364 /// counts that are known to be a multiple of the vectorization factor. 365 /// * It handles the code generation for reduction variables. 366 /// * Scalarization (implementation using scalars) of un-vectorizable 367 /// instructions. 368 /// InnerLoopVectorizer does not perform any vectorization-legality 369 /// checks, and relies on the caller to check for the different legality 370 /// aspects. The InnerLoopVectorizer relies on the 371 /// LoopVectorizationLegality class to provide information about the induction 372 /// and reduction variables that were found to a given vectorization factor. 373 class InnerLoopVectorizer { 374 public: 375 InnerLoopVectorizer(Loop *OrigLoop, PredicatedScalarEvolution &PSE, 376 LoopInfo *LI, DominatorTree *DT, 377 const TargetLibraryInfo *TLI, 378 const TargetTransformInfo *TTI, AssumptionCache *AC, 379 OptimizationRemarkEmitter *ORE, unsigned VecWidth, 380 unsigned UnrollFactor) 381 : OrigLoop(OrigLoop), PSE(PSE), LI(LI), DT(DT), TLI(TLI), TTI(TTI), 382 AC(AC), ORE(ORE), VF(VecWidth), UF(UnrollFactor), 383 Builder(PSE.getSE()->getContext()), Induction(nullptr), 384 OldInduction(nullptr), WidenMap(UnrollFactor), TripCount(nullptr), 385 VectorTripCount(nullptr), Legal(nullptr), AddedSafetyChecks(false) {} 386 387 // Perform the actual loop widening (vectorization). 388 // MinimumBitWidths maps scalar integer values to the smallest bitwidth they 389 // can be validly truncated to. The cost model has assumed this truncation 390 // will happen when vectorizing. VecValuesToIgnore contains scalar values 391 // that the cost model has chosen to ignore because they will not be 392 // vectorized. 393 void vectorize(LoopVectorizationLegality *L, 394 const MapVector<Instruction *, uint64_t> &MinimumBitWidths) { 395 MinBWs = &MinimumBitWidths; 396 Legal = L; 397 // Create a new empty loop. Unlink the old loop and connect the new one. 398 createEmptyLoop(); 399 // Widen each instruction in the old loop to a new one in the new loop. 400 // Use the Legality module to find the induction and reduction variables. 401 vectorizeLoop(); 402 } 403 404 // Return true if any runtime check is added. 405 bool areSafetyChecksAdded() { return AddedSafetyChecks; } 406 407 virtual ~InnerLoopVectorizer() {} 408 409 protected: 410 /// A small list of PHINodes. 411 typedef SmallVector<PHINode *, 4> PhiVector; 412 /// When we unroll loops we have multiple vector values for each scalar. 413 /// This data structure holds the unrolled and vectorized values that 414 /// originated from one scalar instruction. 415 typedef SmallVector<Value *, 2> VectorParts; 416 417 // When we if-convert we need to create edge masks. We have to cache values 418 // so that we don't end up with exponential recursion/IR. 419 typedef DenseMap<std::pair<BasicBlock *, BasicBlock *>, VectorParts> 420 EdgeMaskCache; 421 422 /// Create an empty loop, based on the loop ranges of the old loop. 423 void createEmptyLoop(); 424 425 /// Set up the values of the IVs correctly when exiting the vector loop. 426 void fixupIVUsers(PHINode *OrigPhi, const InductionDescriptor &II, 427 Value *CountRoundDown, Value *EndValue, 428 BasicBlock *MiddleBlock); 429 430 /// Create a new induction variable inside L. 431 PHINode *createInductionVariable(Loop *L, Value *Start, Value *End, 432 Value *Step, Instruction *DL); 433 /// Copy and widen the instructions from the old loop. 434 virtual void vectorizeLoop(); 435 436 /// Fix a first-order recurrence. This is the second phase of vectorizing 437 /// this phi node. 438 void fixFirstOrderRecurrence(PHINode *Phi); 439 440 /// \brief The Loop exit block may have single value PHI nodes where the 441 /// incoming value is 'Undef'. While vectorizing we only handled real values 442 /// that were defined inside the loop. Here we fix the 'undef case'. 443 /// See PR14725. 444 void fixLCSSAPHIs(); 445 446 /// Predicate conditional stores on their respective conditions. 447 void predicateStores(); 448 449 /// Shrinks vector element sizes based on information in "MinBWs". 450 void truncateToMinimalBitwidths(); 451 452 /// A helper function that computes the predicate of the block BB, assuming 453 /// that the header block of the loop is set to True. It returns the *entry* 454 /// mask for the block BB. 455 VectorParts createBlockInMask(BasicBlock *BB); 456 /// A helper function that computes the predicate of the edge between SRC 457 /// and DST. 458 VectorParts createEdgeMask(BasicBlock *Src, BasicBlock *Dst); 459 460 /// A helper function to vectorize a single BB within the innermost loop. 461 void vectorizeBlockInLoop(BasicBlock *BB, PhiVector *PV); 462 463 /// Vectorize a single PHINode in a block. This method handles the induction 464 /// variable canonicalization. It supports both VF = 1 for unrolled loops and 465 /// arbitrary length vectors. 466 void widenPHIInstruction(Instruction *PN, VectorParts &Entry, unsigned UF, 467 unsigned VF, PhiVector *PV); 468 469 /// Insert the new loop to the loop hierarchy and pass manager 470 /// and update the analysis passes. 471 void updateAnalysis(); 472 473 /// This instruction is un-vectorizable. Implement it as a sequence 474 /// of scalars. If \p IfPredicateStore is true we need to 'hide' each 475 /// scalarized instruction behind an if block predicated on the control 476 /// dependence of the instruction. 477 virtual void scalarizeInstruction(Instruction *Instr, 478 bool IfPredicateStore = false); 479 480 /// Vectorize Load and Store instructions, 481 virtual void vectorizeMemoryInstruction(Instruction *Instr); 482 483 /// Create a broadcast instruction. This method generates a broadcast 484 /// instruction (shuffle) for loop invariant values and for the induction 485 /// value. If this is the induction variable then we extend it to N, N+1, ... 486 /// this is needed because each iteration in the loop corresponds to a SIMD 487 /// element. 488 virtual Value *getBroadcastInstrs(Value *V); 489 490 /// This function adds (StartIdx, StartIdx + Step, StartIdx + 2*Step, ...) 491 /// to each vector element of Val. The sequence starts at StartIndex. 492 /// \p Opcode is relevant for FP induction variable. 493 virtual Value *getStepVector(Value *Val, int StartIdx, Value *Step, 494 Instruction::BinaryOps Opcode = 495 Instruction::BinaryOpsEnd); 496 497 /// Compute scalar induction steps. \p ScalarIV is the scalar induction 498 /// variable on which to base the steps, \p Step is the size of the step, and 499 /// \p EntryVal is the value from the original loop that maps to the steps. 500 /// Note that \p EntryVal doesn't have to be an induction variable (e.g., it 501 /// can be a truncate instruction). 502 void buildScalarSteps(Value *ScalarIV, Value *Step, Value *EntryVal); 503 504 /// Create a vector induction phi node based on an existing scalar one. This 505 /// currently only works for integer induction variables with a constant 506 /// step. If \p TruncType is non-null, instead of widening the original IV, 507 /// we widen a version of the IV truncated to \p TruncType. 508 void createVectorIntInductionPHI(const InductionDescriptor &II, 509 VectorParts &Entry, IntegerType *TruncType); 510 511 /// Widen an integer induction variable \p IV. If \p Trunc is provided, the 512 /// induction variable will first be truncated to the corresponding type. The 513 /// widened values are placed in \p Entry. 514 void widenIntInduction(PHINode *IV, VectorParts &Entry, 515 TruncInst *Trunc = nullptr); 516 517 /// Returns true if we should generate a scalar version of \p IV. 518 bool needsScalarInduction(Instruction *IV) const; 519 520 /// When we go over instructions in the basic block we rely on previous 521 /// values within the current basic block or on loop invariant values. 522 /// When we widen (vectorize) values we place them in the map. If the values 523 /// are not within the map, they have to be loop invariant, so we simply 524 /// broadcast them into a vector. 525 VectorParts &getVectorValue(Value *V); 526 527 /// Try to vectorize the interleaved access group that \p Instr belongs to. 528 void vectorizeInterleaveGroup(Instruction *Instr); 529 530 /// Generate a shuffle sequence that will reverse the vector Vec. 531 virtual Value *reverseVector(Value *Vec); 532 533 /// Returns (and creates if needed) the original loop trip count. 534 Value *getOrCreateTripCount(Loop *NewLoop); 535 536 /// Returns (and creates if needed) the trip count of the widened loop. 537 Value *getOrCreateVectorTripCount(Loop *NewLoop); 538 539 /// Emit a bypass check to see if the trip count would overflow, or we 540 /// wouldn't have enough iterations to execute one vector loop. 541 void emitMinimumIterationCountCheck(Loop *L, BasicBlock *Bypass); 542 /// Emit a bypass check to see if the vector trip count is nonzero. 543 void emitVectorLoopEnteredCheck(Loop *L, BasicBlock *Bypass); 544 /// Emit a bypass check to see if all of the SCEV assumptions we've 545 /// had to make are correct. 546 void emitSCEVChecks(Loop *L, BasicBlock *Bypass); 547 /// Emit bypass checks to check any memory assumptions we may have made. 548 void emitMemRuntimeChecks(Loop *L, BasicBlock *Bypass); 549 550 /// Add additional metadata to \p To that was not present on \p Orig. 551 /// 552 /// Currently this is used to add the noalias annotations based on the 553 /// inserted memchecks. Use this for instructions that are *cloned* into the 554 /// vector loop. 555 void addNewMetadata(Instruction *To, const Instruction *Orig); 556 557 /// Add metadata from one instruction to another. 558 /// 559 /// This includes both the original MDs from \p From and additional ones (\see 560 /// addNewMetadata). Use this for *newly created* instructions in the vector 561 /// loop. 562 void addMetadata(Instruction *To, Instruction *From); 563 564 /// \brief Similar to the previous function but it adds the metadata to a 565 /// vector of instructions. 566 void addMetadata(ArrayRef<Value *> To, Instruction *From); 567 568 /// This is a helper class that holds the vectorizer state. It maps scalar 569 /// instructions to vector instructions. When the code is 'unrolled' then 570 /// then a single scalar value is mapped to multiple vector parts. The parts 571 /// are stored in the VectorPart type. 572 struct ValueMap { 573 /// C'tor. UnrollFactor controls the number of vectors ('parts') that 574 /// are mapped. 575 ValueMap(unsigned UnrollFactor) : UF(UnrollFactor) {} 576 577 /// \return True if 'Key' is saved in the Value Map. 578 bool has(Value *Key) const { return MapStorage.count(Key); } 579 580 /// Initializes a new entry in the map. Sets all of the vector parts to the 581 /// save value in 'Val'. 582 /// \return A reference to a vector with splat values. 583 VectorParts &splat(Value *Key, Value *Val) { 584 VectorParts &Entry = MapStorage[Key]; 585 Entry.assign(UF, Val); 586 return Entry; 587 } 588 589 ///\return A reference to the value that is stored at 'Key'. 590 VectorParts &get(Value *Key) { 591 VectorParts &Entry = MapStorage[Key]; 592 if (Entry.empty()) 593 Entry.resize(UF); 594 assert(Entry.size() == UF); 595 return Entry; 596 } 597 598 private: 599 /// The unroll factor. Each entry in the map stores this number of vector 600 /// elements. 601 unsigned UF; 602 603 /// Map storage. We use std::map and not DenseMap because insertions to a 604 /// dense map invalidates its iterators. 605 std::map<Value *, VectorParts> MapStorage; 606 }; 607 608 /// The original loop. 609 Loop *OrigLoop; 610 /// A wrapper around ScalarEvolution used to add runtime SCEV checks. Applies 611 /// dynamic knowledge to simplify SCEV expressions and converts them to a 612 /// more usable form. 613 PredicatedScalarEvolution &PSE; 614 /// Loop Info. 615 LoopInfo *LI; 616 /// Dominator Tree. 617 DominatorTree *DT; 618 /// Alias Analysis. 619 AliasAnalysis *AA; 620 /// Target Library Info. 621 const TargetLibraryInfo *TLI; 622 /// Target Transform Info. 623 const TargetTransformInfo *TTI; 624 /// Assumption Cache. 625 AssumptionCache *AC; 626 /// Interface to emit optimization remarks. 627 OptimizationRemarkEmitter *ORE; 628 629 /// \brief LoopVersioning. It's only set up (non-null) if memchecks were 630 /// used. 631 /// 632 /// This is currently only used to add no-alias metadata based on the 633 /// memchecks. The actually versioning is performed manually. 634 std::unique_ptr<LoopVersioning> LVer; 635 636 /// The vectorization SIMD factor to use. Each vector will have this many 637 /// vector elements. 638 unsigned VF; 639 640 protected: 641 /// The vectorization unroll factor to use. Each scalar is vectorized to this 642 /// many different vector instructions. 643 unsigned UF; 644 645 /// The builder that we use 646 IRBuilder<> Builder; 647 648 // --- Vectorization state --- 649 650 /// The vector-loop preheader. 651 BasicBlock *LoopVectorPreHeader; 652 /// The scalar-loop preheader. 653 BasicBlock *LoopScalarPreHeader; 654 /// Middle Block between the vector and the scalar. 655 BasicBlock *LoopMiddleBlock; 656 /// The ExitBlock of the scalar loop. 657 BasicBlock *LoopExitBlock; 658 /// The vector loop body. 659 BasicBlock *LoopVectorBody; 660 /// The scalar loop body. 661 BasicBlock *LoopScalarBody; 662 /// A list of all bypass blocks. The first block is the entry of the loop. 663 SmallVector<BasicBlock *, 4> LoopBypassBlocks; 664 665 /// The new Induction variable which was added to the new block. 666 PHINode *Induction; 667 /// The induction variable of the old basic block. 668 PHINode *OldInduction; 669 /// Maps scalars to widened vectors. 670 ValueMap WidenMap; 671 672 /// A map of induction variables from the original loop to their 673 /// corresponding VF * UF scalarized values in the vectorized loop. The 674 /// purpose of ScalarIVMap is similar to that of WidenMap. Whereas WidenMap 675 /// maps original loop values to their vector versions in the new loop, 676 /// ScalarIVMap maps induction variables from the original loop that are not 677 /// vectorized to their scalar equivalents in the vector loop. Maintaining a 678 /// separate map for scalarized induction variables allows us to avoid 679 /// unnecessary scalar-to-vector-to-scalar conversions. 680 DenseMap<Value *, SmallVector<Value *, 8>> ScalarIVMap; 681 682 /// Store instructions that should be predicated, as a pair 683 /// <StoreInst, Predicate> 684 SmallVector<std::pair<StoreInst *, Value *>, 4> PredicatedStores; 685 EdgeMaskCache MaskCache; 686 /// Trip count of the original loop. 687 Value *TripCount; 688 /// Trip count of the widened loop (TripCount - TripCount % (VF*UF)) 689 Value *VectorTripCount; 690 691 /// Map of scalar integer values to the smallest bitwidth they can be legally 692 /// represented as. The vector equivalents of these values should be truncated 693 /// to this type. 694 const MapVector<Instruction *, uint64_t> *MinBWs; 695 696 LoopVectorizationLegality *Legal; 697 698 // Record whether runtime checks are added. 699 bool AddedSafetyChecks; 700 }; 701 702 class InnerLoopUnroller : public InnerLoopVectorizer { 703 public: 704 InnerLoopUnroller(Loop *OrigLoop, PredicatedScalarEvolution &PSE, 705 LoopInfo *LI, DominatorTree *DT, 706 const TargetLibraryInfo *TLI, 707 const TargetTransformInfo *TTI, AssumptionCache *AC, 708 OptimizationRemarkEmitter *ORE, unsigned UnrollFactor) 709 : InnerLoopVectorizer(OrigLoop, PSE, LI, DT, TLI, TTI, AC, ORE, 1, 710 UnrollFactor) {} 711 712 private: 713 void scalarizeInstruction(Instruction *Instr, 714 bool IfPredicateStore = false) override; 715 void vectorizeMemoryInstruction(Instruction *Instr) override; 716 Value *getBroadcastInstrs(Value *V) override; 717 Value *getStepVector(Value *Val, int StartIdx, Value *Step, 718 Instruction::BinaryOps Opcode = 719 Instruction::BinaryOpsEnd) override; 720 Value *reverseVector(Value *Vec) override; 721 }; 722 723 /// \brief Look for a meaningful debug location on the instruction or it's 724 /// operands. 725 static Instruction *getDebugLocFromInstOrOperands(Instruction *I) { 726 if (!I) 727 return I; 728 729 DebugLoc Empty; 730 if (I->getDebugLoc() != Empty) 731 return I; 732 733 for (User::op_iterator OI = I->op_begin(), OE = I->op_end(); OI != OE; ++OI) { 734 if (Instruction *OpInst = dyn_cast<Instruction>(*OI)) 735 if (OpInst->getDebugLoc() != Empty) 736 return OpInst; 737 } 738 739 return I; 740 } 741 742 /// \brief Set the debug location in the builder using the debug location in the 743 /// instruction. 744 static void setDebugLocFromInst(IRBuilder<> &B, const Value *Ptr) { 745 if (const Instruction *Inst = dyn_cast_or_null<Instruction>(Ptr)) 746 B.SetCurrentDebugLocation(Inst->getDebugLoc()); 747 else 748 B.SetCurrentDebugLocation(DebugLoc()); 749 } 750 751 #ifndef NDEBUG 752 /// \return string containing a file name and a line # for the given loop. 753 static std::string getDebugLocString(const Loop *L) { 754 std::string Result; 755 if (L) { 756 raw_string_ostream OS(Result); 757 if (const DebugLoc LoopDbgLoc = L->getStartLoc()) 758 LoopDbgLoc.print(OS); 759 else 760 // Just print the module name. 761 OS << L->getHeader()->getParent()->getParent()->getModuleIdentifier(); 762 OS.flush(); 763 } 764 return Result; 765 } 766 #endif 767 768 void InnerLoopVectorizer::addNewMetadata(Instruction *To, 769 const Instruction *Orig) { 770 // If the loop was versioned with memchecks, add the corresponding no-alias 771 // metadata. 772 if (LVer && (isa<LoadInst>(Orig) || isa<StoreInst>(Orig))) 773 LVer->annotateInstWithNoAlias(To, Orig); 774 } 775 776 void InnerLoopVectorizer::addMetadata(Instruction *To, 777 Instruction *From) { 778 propagateMetadata(To, From); 779 addNewMetadata(To, From); 780 } 781 782 void InnerLoopVectorizer::addMetadata(ArrayRef<Value *> To, 783 Instruction *From) { 784 for (Value *V : To) { 785 if (Instruction *I = dyn_cast<Instruction>(V)) 786 addMetadata(I, From); 787 } 788 } 789 790 /// \brief The group of interleaved loads/stores sharing the same stride and 791 /// close to each other. 792 /// 793 /// Each member in this group has an index starting from 0, and the largest 794 /// index should be less than interleaved factor, which is equal to the absolute 795 /// value of the access's stride. 796 /// 797 /// E.g. An interleaved load group of factor 4: 798 /// for (unsigned i = 0; i < 1024; i+=4) { 799 /// a = A[i]; // Member of index 0 800 /// b = A[i+1]; // Member of index 1 801 /// d = A[i+3]; // Member of index 3 802 /// ... 803 /// } 804 /// 805 /// An interleaved store group of factor 4: 806 /// for (unsigned i = 0; i < 1024; i+=4) { 807 /// ... 808 /// A[i] = a; // Member of index 0 809 /// A[i+1] = b; // Member of index 1 810 /// A[i+2] = c; // Member of index 2 811 /// A[i+3] = d; // Member of index 3 812 /// } 813 /// 814 /// Note: the interleaved load group could have gaps (missing members), but 815 /// the interleaved store group doesn't allow gaps. 816 class InterleaveGroup { 817 public: 818 InterleaveGroup(Instruction *Instr, int Stride, unsigned Align) 819 : Align(Align), SmallestKey(0), LargestKey(0), InsertPos(Instr) { 820 assert(Align && "The alignment should be non-zero"); 821 822 Factor = std::abs(Stride); 823 assert(Factor > 1 && "Invalid interleave factor"); 824 825 Reverse = Stride < 0; 826 Members[0] = Instr; 827 } 828 829 bool isReverse() const { return Reverse; } 830 unsigned getFactor() const { return Factor; } 831 unsigned getAlignment() const { return Align; } 832 unsigned getNumMembers() const { return Members.size(); } 833 834 /// \brief Try to insert a new member \p Instr with index \p Index and 835 /// alignment \p NewAlign. The index is related to the leader and it could be 836 /// negative if it is the new leader. 837 /// 838 /// \returns false if the instruction doesn't belong to the group. 839 bool insertMember(Instruction *Instr, int Index, unsigned NewAlign) { 840 assert(NewAlign && "The new member's alignment should be non-zero"); 841 842 int Key = Index + SmallestKey; 843 844 // Skip if there is already a member with the same index. 845 if (Members.count(Key)) 846 return false; 847 848 if (Key > LargestKey) { 849 // The largest index is always less than the interleave factor. 850 if (Index >= static_cast<int>(Factor)) 851 return false; 852 853 LargestKey = Key; 854 } else if (Key < SmallestKey) { 855 // The largest index is always less than the interleave factor. 856 if (LargestKey - Key >= static_cast<int>(Factor)) 857 return false; 858 859 SmallestKey = Key; 860 } 861 862 // It's always safe to select the minimum alignment. 863 Align = std::min(Align, NewAlign); 864 Members[Key] = Instr; 865 return true; 866 } 867 868 /// \brief Get the member with the given index \p Index 869 /// 870 /// \returns nullptr if contains no such member. 871 Instruction *getMember(unsigned Index) const { 872 int Key = SmallestKey + Index; 873 if (!Members.count(Key)) 874 return nullptr; 875 876 return Members.find(Key)->second; 877 } 878 879 /// \brief Get the index for the given member. Unlike the key in the member 880 /// map, the index starts from 0. 881 unsigned getIndex(Instruction *Instr) const { 882 for (auto I : Members) 883 if (I.second == Instr) 884 return I.first - SmallestKey; 885 886 llvm_unreachable("InterleaveGroup contains no such member"); 887 } 888 889 Instruction *getInsertPos() const { return InsertPos; } 890 void setInsertPos(Instruction *Inst) { InsertPos = Inst; } 891 892 private: 893 unsigned Factor; // Interleave Factor. 894 bool Reverse; 895 unsigned Align; 896 DenseMap<int, Instruction *> Members; 897 int SmallestKey; 898 int LargestKey; 899 900 // To avoid breaking dependences, vectorized instructions of an interleave 901 // group should be inserted at either the first load or the last store in 902 // program order. 903 // 904 // E.g. %even = load i32 // Insert Position 905 // %add = add i32 %even // Use of %even 906 // %odd = load i32 907 // 908 // store i32 %even 909 // %odd = add i32 // Def of %odd 910 // store i32 %odd // Insert Position 911 Instruction *InsertPos; 912 }; 913 914 /// \brief Drive the analysis of interleaved memory accesses in the loop. 915 /// 916 /// Use this class to analyze interleaved accesses only when we can vectorize 917 /// a loop. Otherwise it's meaningless to do analysis as the vectorization 918 /// on interleaved accesses is unsafe. 919 /// 920 /// The analysis collects interleave groups and records the relationships 921 /// between the member and the group in a map. 922 class InterleavedAccessInfo { 923 public: 924 InterleavedAccessInfo(PredicatedScalarEvolution &PSE, Loop *L, 925 DominatorTree *DT, LoopInfo *LI) 926 : PSE(PSE), TheLoop(L), DT(DT), LI(LI), LAI(nullptr), 927 RequiresScalarEpilogue(false) {} 928 929 ~InterleavedAccessInfo() { 930 SmallSet<InterleaveGroup *, 4> DelSet; 931 // Avoid releasing a pointer twice. 932 for (auto &I : InterleaveGroupMap) 933 DelSet.insert(I.second); 934 for (auto *Ptr : DelSet) 935 delete Ptr; 936 } 937 938 /// \brief Analyze the interleaved accesses and collect them in interleave 939 /// groups. Substitute symbolic strides using \p Strides. 940 void analyzeInterleaving(const ValueToValueMap &Strides); 941 942 /// \brief Check if \p Instr belongs to any interleave group. 943 bool isInterleaved(Instruction *Instr) const { 944 return InterleaveGroupMap.count(Instr); 945 } 946 947 /// \brief Return the maximum interleave factor of all interleaved groups. 948 unsigned getMaxInterleaveFactor() const { 949 unsigned MaxFactor = 1; 950 for (auto &Entry : InterleaveGroupMap) 951 MaxFactor = std::max(MaxFactor, Entry.second->getFactor()); 952 return MaxFactor; 953 } 954 955 /// \brief Get the interleave group that \p Instr belongs to. 956 /// 957 /// \returns nullptr if doesn't have such group. 958 InterleaveGroup *getInterleaveGroup(Instruction *Instr) const { 959 if (InterleaveGroupMap.count(Instr)) 960 return InterleaveGroupMap.find(Instr)->second; 961 return nullptr; 962 } 963 964 /// \brief Returns true if an interleaved group that may access memory 965 /// out-of-bounds requires a scalar epilogue iteration for correctness. 966 bool requiresScalarEpilogue() const { return RequiresScalarEpilogue; } 967 968 /// \brief Initialize the LoopAccessInfo used for dependence checking. 969 void setLAI(const LoopAccessInfo *Info) { LAI = Info; } 970 971 private: 972 /// A wrapper around ScalarEvolution, used to add runtime SCEV checks. 973 /// Simplifies SCEV expressions in the context of existing SCEV assumptions. 974 /// The interleaved access analysis can also add new predicates (for example 975 /// by versioning strides of pointers). 976 PredicatedScalarEvolution &PSE; 977 Loop *TheLoop; 978 DominatorTree *DT; 979 LoopInfo *LI; 980 const LoopAccessInfo *LAI; 981 982 /// True if the loop may contain non-reversed interleaved groups with 983 /// out-of-bounds accesses. We ensure we don't speculatively access memory 984 /// out-of-bounds by executing at least one scalar epilogue iteration. 985 bool RequiresScalarEpilogue; 986 987 /// Holds the relationships between the members and the interleave group. 988 DenseMap<Instruction *, InterleaveGroup *> InterleaveGroupMap; 989 990 /// Holds dependences among the memory accesses in the loop. It maps a source 991 /// access to a set of dependent sink accesses. 992 DenseMap<Instruction *, SmallPtrSet<Instruction *, 2>> Dependences; 993 994 /// \brief The descriptor for a strided memory access. 995 struct StrideDescriptor { 996 StrideDescriptor(int64_t Stride, const SCEV *Scev, uint64_t Size, 997 unsigned Align) 998 : Stride(Stride), Scev(Scev), Size(Size), Align(Align) {} 999 1000 StrideDescriptor() = default; 1001 1002 // The access's stride. It is negative for a reverse access. 1003 int64_t Stride = 0; 1004 const SCEV *Scev = nullptr; // The scalar expression of this access 1005 uint64_t Size = 0; // The size of the memory object. 1006 unsigned Align = 0; // The alignment of this access. 1007 }; 1008 1009 /// \brief A type for holding instructions and their stride descriptors. 1010 typedef std::pair<Instruction *, StrideDescriptor> StrideEntry; 1011 1012 /// \brief Create a new interleave group with the given instruction \p Instr, 1013 /// stride \p Stride and alignment \p Align. 1014 /// 1015 /// \returns the newly created interleave group. 1016 InterleaveGroup *createInterleaveGroup(Instruction *Instr, int Stride, 1017 unsigned Align) { 1018 assert(!InterleaveGroupMap.count(Instr) && 1019 "Already in an interleaved access group"); 1020 InterleaveGroupMap[Instr] = new InterleaveGroup(Instr, Stride, Align); 1021 return InterleaveGroupMap[Instr]; 1022 } 1023 1024 /// \brief Release the group and remove all the relationships. 1025 void releaseGroup(InterleaveGroup *Group) { 1026 for (unsigned i = 0; i < Group->getFactor(); i++) 1027 if (Instruction *Member = Group->getMember(i)) 1028 InterleaveGroupMap.erase(Member); 1029 1030 delete Group; 1031 } 1032 1033 /// \brief Collect all the accesses with a constant stride in program order. 1034 void collectConstStrideAccesses( 1035 MapVector<Instruction *, StrideDescriptor> &AccessStrideInfo, 1036 const ValueToValueMap &Strides); 1037 1038 /// \brief Returns true if \p Stride is allowed in an interleaved group. 1039 static bool isStrided(int Stride) { 1040 unsigned Factor = std::abs(Stride); 1041 return Factor >= 2 && Factor <= MaxInterleaveGroupFactor; 1042 } 1043 1044 /// \brief Returns true if \p BB is a predicated block. 1045 bool isPredicated(BasicBlock *BB) const { 1046 return LoopAccessInfo::blockNeedsPredication(BB, TheLoop, DT); 1047 } 1048 1049 /// \brief Returns true if LoopAccessInfo can be used for dependence queries. 1050 bool areDependencesValid() const { 1051 return LAI && LAI->getDepChecker().getDependences(); 1052 } 1053 1054 /// \brief Returns true if memory accesses \p A and \p B can be reordered, if 1055 /// necessary, when constructing interleaved groups. 1056 /// 1057 /// \p A must precede \p B in program order. We return false if reordering is 1058 /// not necessary or is prevented because \p A and \p B may be dependent. 1059 bool canReorderMemAccessesForInterleavedGroups(StrideEntry *A, 1060 StrideEntry *B) const { 1061 1062 // Code motion for interleaved accesses can potentially hoist strided loads 1063 // and sink strided stores. The code below checks the legality of the 1064 // following two conditions: 1065 // 1066 // 1. Potentially moving a strided load (B) before any store (A) that 1067 // precedes B, or 1068 // 1069 // 2. Potentially moving a strided store (A) after any load or store (B) 1070 // that A precedes. 1071 // 1072 // It's legal to reorder A and B if we know there isn't a dependence from A 1073 // to B. Note that this determination is conservative since some 1074 // dependences could potentially be reordered safely. 1075 1076 // A is potentially the source of a dependence. 1077 auto *Src = A->first; 1078 auto SrcDes = A->second; 1079 1080 // B is potentially the sink of a dependence. 1081 auto *Sink = B->first; 1082 auto SinkDes = B->second; 1083 1084 // Code motion for interleaved accesses can't violate WAR dependences. 1085 // Thus, reordering is legal if the source isn't a write. 1086 if (!Src->mayWriteToMemory()) 1087 return true; 1088 1089 // At least one of the accesses must be strided. 1090 if (!isStrided(SrcDes.Stride) && !isStrided(SinkDes.Stride)) 1091 return true; 1092 1093 // If dependence information is not available from LoopAccessInfo, 1094 // conservatively assume the instructions can't be reordered. 1095 if (!areDependencesValid()) 1096 return false; 1097 1098 // If we know there is a dependence from source to sink, assume the 1099 // instructions can't be reordered. Otherwise, reordering is legal. 1100 return !Dependences.count(Src) || !Dependences.lookup(Src).count(Sink); 1101 } 1102 1103 /// \brief Collect the dependences from LoopAccessInfo. 1104 /// 1105 /// We process the dependences once during the interleaved access analysis to 1106 /// enable constant-time dependence queries. 1107 void collectDependences() { 1108 if (!areDependencesValid()) 1109 return; 1110 auto *Deps = LAI->getDepChecker().getDependences(); 1111 for (auto Dep : *Deps) 1112 Dependences[Dep.getSource(*LAI)].insert(Dep.getDestination(*LAI)); 1113 } 1114 }; 1115 1116 /// Utility class for getting and setting loop vectorizer hints in the form 1117 /// of loop metadata. 1118 /// This class keeps a number of loop annotations locally (as member variables) 1119 /// and can, upon request, write them back as metadata on the loop. It will 1120 /// initially scan the loop for existing metadata, and will update the local 1121 /// values based on information in the loop. 1122 /// We cannot write all values to metadata, as the mere presence of some info, 1123 /// for example 'force', means a decision has been made. So, we need to be 1124 /// careful NOT to add them if the user hasn't specifically asked so. 1125 class LoopVectorizeHints { 1126 enum HintKind { HK_WIDTH, HK_UNROLL, HK_FORCE }; 1127 1128 /// Hint - associates name and validation with the hint value. 1129 struct Hint { 1130 const char *Name; 1131 unsigned Value; // This may have to change for non-numeric values. 1132 HintKind Kind; 1133 1134 Hint(const char *Name, unsigned Value, HintKind Kind) 1135 : Name(Name), Value(Value), Kind(Kind) {} 1136 1137 bool validate(unsigned Val) { 1138 switch (Kind) { 1139 case HK_WIDTH: 1140 return isPowerOf2_32(Val) && Val <= VectorizerParams::MaxVectorWidth; 1141 case HK_UNROLL: 1142 return isPowerOf2_32(Val) && Val <= MaxInterleaveFactor; 1143 case HK_FORCE: 1144 return (Val <= 1); 1145 } 1146 return false; 1147 } 1148 }; 1149 1150 /// Vectorization width. 1151 Hint Width; 1152 /// Vectorization interleave factor. 1153 Hint Interleave; 1154 /// Vectorization forced 1155 Hint Force; 1156 1157 /// Return the loop metadata prefix. 1158 static StringRef Prefix() { return "llvm.loop."; } 1159 1160 /// True if there is any unsafe math in the loop. 1161 bool PotentiallyUnsafe; 1162 1163 public: 1164 enum ForceKind { 1165 FK_Undefined = -1, ///< Not selected. 1166 FK_Disabled = 0, ///< Forcing disabled. 1167 FK_Enabled = 1, ///< Forcing enabled. 1168 }; 1169 1170 LoopVectorizeHints(const Loop *L, bool DisableInterleaving, 1171 OptimizationRemarkEmitter &ORE) 1172 : Width("vectorize.width", VectorizerParams::VectorizationFactor, 1173 HK_WIDTH), 1174 Interleave("interleave.count", DisableInterleaving, HK_UNROLL), 1175 Force("vectorize.enable", FK_Undefined, HK_FORCE), 1176 PotentiallyUnsafe(false), TheLoop(L), ORE(ORE) { 1177 // Populate values with existing loop metadata. 1178 getHintsFromMetadata(); 1179 1180 // force-vector-interleave overrides DisableInterleaving. 1181 if (VectorizerParams::isInterleaveForced()) 1182 Interleave.Value = VectorizerParams::VectorizationInterleave; 1183 1184 DEBUG(if (DisableInterleaving && Interleave.Value == 1) dbgs() 1185 << "LV: Interleaving disabled by the pass manager\n"); 1186 } 1187 1188 /// Mark the loop L as already vectorized by setting the width to 1. 1189 void setAlreadyVectorized() { 1190 Width.Value = Interleave.Value = 1; 1191 Hint Hints[] = {Width, Interleave}; 1192 writeHintsToMetadata(Hints); 1193 } 1194 1195 bool allowVectorization(Function *F, Loop *L, bool AlwaysVectorize) const { 1196 if (getForce() == LoopVectorizeHints::FK_Disabled) { 1197 DEBUG(dbgs() << "LV: Not vectorizing: #pragma vectorize disable.\n"); 1198 ORE.emitOptimizationRemarkAnalysis(vectorizeAnalysisPassName(), L, 1199 emitRemark()); 1200 return false; 1201 } 1202 1203 if (!AlwaysVectorize && getForce() != LoopVectorizeHints::FK_Enabled) { 1204 DEBUG(dbgs() << "LV: Not vectorizing: No #pragma vectorize enable.\n"); 1205 ORE.emitOptimizationRemarkAnalysis(vectorizeAnalysisPassName(), L, 1206 emitRemark()); 1207 return false; 1208 } 1209 1210 if (getWidth() == 1 && getInterleave() == 1) { 1211 // FIXME: Add a separate metadata to indicate when the loop has already 1212 // been vectorized instead of setting width and count to 1. 1213 DEBUG(dbgs() << "LV: Not vectorizing: Disabled/already vectorized.\n"); 1214 // FIXME: Add interleave.disable metadata. This will allow 1215 // vectorize.disable to be used without disabling the pass and errors 1216 // to differentiate between disabled vectorization and a width of 1. 1217 ORE.emitOptimizationRemarkAnalysis( 1218 vectorizeAnalysisPassName(), L, 1219 "loop not vectorized: vectorization and interleaving are explicitly " 1220 "disabled, or vectorize width and interleave count are both set to " 1221 "1"); 1222 return false; 1223 } 1224 1225 return true; 1226 } 1227 1228 /// Dumps all the hint information. 1229 std::string emitRemark() const { 1230 VectorizationReport R; 1231 if (Force.Value == LoopVectorizeHints::FK_Disabled) 1232 R << "vectorization is explicitly disabled"; 1233 else { 1234 R << "use -Rpass-analysis=loop-vectorize for more info"; 1235 if (Force.Value == LoopVectorizeHints::FK_Enabled) { 1236 R << " (Force=true"; 1237 if (Width.Value != 0) 1238 R << ", Vector Width=" << Width.Value; 1239 if (Interleave.Value != 0) 1240 R << ", Interleave Count=" << Interleave.Value; 1241 R << ")"; 1242 } 1243 } 1244 1245 return R.str(); 1246 } 1247 1248 unsigned getWidth() const { return Width.Value; } 1249 unsigned getInterleave() const { return Interleave.Value; } 1250 enum ForceKind getForce() const { return (ForceKind)Force.Value; } 1251 1252 /// \brief If hints are provided that force vectorization, use the AlwaysPrint 1253 /// pass name to force the frontend to print the diagnostic. 1254 const char *vectorizeAnalysisPassName() const { 1255 if (getWidth() == 1) 1256 return LV_NAME; 1257 if (getForce() == LoopVectorizeHints::FK_Disabled) 1258 return LV_NAME; 1259 if (getForce() == LoopVectorizeHints::FK_Undefined && getWidth() == 0) 1260 return LV_NAME; 1261 return DiagnosticInfoOptimizationRemarkAnalysis::AlwaysPrint; 1262 } 1263 1264 bool allowReordering() const { 1265 // When enabling loop hints are provided we allow the vectorizer to change 1266 // the order of operations that is given by the scalar loop. This is not 1267 // enabled by default because can be unsafe or inefficient. For example, 1268 // reordering floating-point operations will change the way round-off 1269 // error accumulates in the loop. 1270 return getForce() == LoopVectorizeHints::FK_Enabled || getWidth() > 1; 1271 } 1272 1273 bool isPotentiallyUnsafe() const { 1274 // Avoid FP vectorization if the target is unsure about proper support. 1275 // This may be related to the SIMD unit in the target not handling 1276 // IEEE 754 FP ops properly, or bad single-to-double promotions. 1277 // Otherwise, a sequence of vectorized loops, even without reduction, 1278 // could lead to different end results on the destination vectors. 1279 return getForce() != LoopVectorizeHints::FK_Enabled && PotentiallyUnsafe; 1280 } 1281 1282 void setPotentiallyUnsafe() { PotentiallyUnsafe = true; } 1283 1284 private: 1285 /// Find hints specified in the loop metadata and update local values. 1286 void getHintsFromMetadata() { 1287 MDNode *LoopID = TheLoop->getLoopID(); 1288 if (!LoopID) 1289 return; 1290 1291 // First operand should refer to the loop id itself. 1292 assert(LoopID->getNumOperands() > 0 && "requires at least one operand"); 1293 assert(LoopID->getOperand(0) == LoopID && "invalid loop id"); 1294 1295 for (unsigned i = 1, ie = LoopID->getNumOperands(); i < ie; ++i) { 1296 const MDString *S = nullptr; 1297 SmallVector<Metadata *, 4> Args; 1298 1299 // The expected hint is either a MDString or a MDNode with the first 1300 // operand a MDString. 1301 if (const MDNode *MD = dyn_cast<MDNode>(LoopID->getOperand(i))) { 1302 if (!MD || MD->getNumOperands() == 0) 1303 continue; 1304 S = dyn_cast<MDString>(MD->getOperand(0)); 1305 for (unsigned i = 1, ie = MD->getNumOperands(); i < ie; ++i) 1306 Args.push_back(MD->getOperand(i)); 1307 } else { 1308 S = dyn_cast<MDString>(LoopID->getOperand(i)); 1309 assert(Args.size() == 0 && "too many arguments for MDString"); 1310 } 1311 1312 if (!S) 1313 continue; 1314 1315 // Check if the hint starts with the loop metadata prefix. 1316 StringRef Name = S->getString(); 1317 if (Args.size() == 1) 1318 setHint(Name, Args[0]); 1319 } 1320 } 1321 1322 /// Checks string hint with one operand and set value if valid. 1323 void setHint(StringRef Name, Metadata *Arg) { 1324 if (!Name.startswith(Prefix())) 1325 return; 1326 Name = Name.substr(Prefix().size(), StringRef::npos); 1327 1328 const ConstantInt *C = mdconst::dyn_extract<ConstantInt>(Arg); 1329 if (!C) 1330 return; 1331 unsigned Val = C->getZExtValue(); 1332 1333 Hint *Hints[] = {&Width, &Interleave, &Force}; 1334 for (auto H : Hints) { 1335 if (Name == H->Name) { 1336 if (H->validate(Val)) 1337 H->Value = Val; 1338 else 1339 DEBUG(dbgs() << "LV: ignoring invalid hint '" << Name << "'\n"); 1340 break; 1341 } 1342 } 1343 } 1344 1345 /// Create a new hint from name / value pair. 1346 MDNode *createHintMetadata(StringRef Name, unsigned V) const { 1347 LLVMContext &Context = TheLoop->getHeader()->getContext(); 1348 Metadata *MDs[] = {MDString::get(Context, Name), 1349 ConstantAsMetadata::get( 1350 ConstantInt::get(Type::getInt32Ty(Context), V))}; 1351 return MDNode::get(Context, MDs); 1352 } 1353 1354 /// Matches metadata with hint name. 1355 bool matchesHintMetadataName(MDNode *Node, ArrayRef<Hint> HintTypes) { 1356 MDString *Name = dyn_cast<MDString>(Node->getOperand(0)); 1357 if (!Name) 1358 return false; 1359 1360 for (auto H : HintTypes) 1361 if (Name->getString().endswith(H.Name)) 1362 return true; 1363 return false; 1364 } 1365 1366 /// Sets current hints into loop metadata, keeping other values intact. 1367 void writeHintsToMetadata(ArrayRef<Hint> HintTypes) { 1368 if (HintTypes.size() == 0) 1369 return; 1370 1371 // Reserve the first element to LoopID (see below). 1372 SmallVector<Metadata *, 4> MDs(1); 1373 // If the loop already has metadata, then ignore the existing operands. 1374 MDNode *LoopID = TheLoop->getLoopID(); 1375 if (LoopID) { 1376 for (unsigned i = 1, ie = LoopID->getNumOperands(); i < ie; ++i) { 1377 MDNode *Node = cast<MDNode>(LoopID->getOperand(i)); 1378 // If node in update list, ignore old value. 1379 if (!matchesHintMetadataName(Node, HintTypes)) 1380 MDs.push_back(Node); 1381 } 1382 } 1383 1384 // Now, add the missing hints. 1385 for (auto H : HintTypes) 1386 MDs.push_back(createHintMetadata(Twine(Prefix(), H.Name).str(), H.Value)); 1387 1388 // Replace current metadata node with new one. 1389 LLVMContext &Context = TheLoop->getHeader()->getContext(); 1390 MDNode *NewLoopID = MDNode::get(Context, MDs); 1391 // Set operand 0 to refer to the loop id itself. 1392 NewLoopID->replaceOperandWith(0, NewLoopID); 1393 1394 TheLoop->setLoopID(NewLoopID); 1395 } 1396 1397 /// The loop these hints belong to. 1398 const Loop *TheLoop; 1399 1400 /// Interface to emit optimization remarks. 1401 OptimizationRemarkEmitter &ORE; 1402 }; 1403 1404 static void emitAnalysisDiag(const Loop *TheLoop, 1405 const LoopVectorizeHints &Hints, 1406 OptimizationRemarkEmitter &ORE, 1407 const LoopAccessReport &Message) { 1408 const char *Name = Hints.vectorizeAnalysisPassName(); 1409 LoopAccessReport::emitAnalysis(Message, TheLoop, Name, ORE); 1410 } 1411 1412 static void emitMissedWarning(Function *F, Loop *L, 1413 const LoopVectorizeHints &LH, 1414 OptimizationRemarkEmitter *ORE) { 1415 ORE->emitOptimizationRemarkMissed(LV_NAME, L, LH.emitRemark()); 1416 1417 if (LH.getForce() == LoopVectorizeHints::FK_Enabled) { 1418 if (LH.getWidth() != 1) 1419 emitLoopVectorizeWarning( 1420 F->getContext(), *F, L->getStartLoc(), 1421 "failed explicitly specified loop vectorization"); 1422 else if (LH.getInterleave() != 1) 1423 emitLoopInterleaveWarning( 1424 F->getContext(), *F, L->getStartLoc(), 1425 "failed explicitly specified loop interleaving"); 1426 } 1427 } 1428 1429 /// LoopVectorizationLegality checks if it is legal to vectorize a loop, and 1430 /// to what vectorization factor. 1431 /// This class does not look at the profitability of vectorization, only the 1432 /// legality. This class has two main kinds of checks: 1433 /// * Memory checks - The code in canVectorizeMemory checks if vectorization 1434 /// will change the order of memory accesses in a way that will change the 1435 /// correctness of the program. 1436 /// * Scalars checks - The code in canVectorizeInstrs and canVectorizeMemory 1437 /// checks for a number of different conditions, such as the availability of a 1438 /// single induction variable, that all types are supported and vectorize-able, 1439 /// etc. This code reflects the capabilities of InnerLoopVectorizer. 1440 /// This class is also used by InnerLoopVectorizer for identifying 1441 /// induction variable and the different reduction variables. 1442 class LoopVectorizationLegality { 1443 public: 1444 LoopVectorizationLegality( 1445 Loop *L, PredicatedScalarEvolution &PSE, DominatorTree *DT, 1446 TargetLibraryInfo *TLI, AliasAnalysis *AA, Function *F, 1447 const TargetTransformInfo *TTI, 1448 std::function<const LoopAccessInfo &(Loop &)> *GetLAA, LoopInfo *LI, 1449 OptimizationRemarkEmitter *ORE, LoopVectorizationRequirements *R, 1450 LoopVectorizeHints *H) 1451 : NumPredStores(0), TheLoop(L), PSE(PSE), TLI(TLI), TTI(TTI), DT(DT), 1452 GetLAA(GetLAA), LAI(nullptr), ORE(ORE), InterleaveInfo(PSE, L, DT, LI), 1453 Induction(nullptr), WidestIndTy(nullptr), HasFunNoNaNAttr(false), 1454 Requirements(R), Hints(H) {} 1455 1456 /// ReductionList contains the reduction descriptors for all 1457 /// of the reductions that were found in the loop. 1458 typedef DenseMap<PHINode *, RecurrenceDescriptor> ReductionList; 1459 1460 /// InductionList saves induction variables and maps them to the 1461 /// induction descriptor. 1462 typedef MapVector<PHINode *, InductionDescriptor> InductionList; 1463 1464 /// RecurrenceSet contains the phi nodes that are recurrences other than 1465 /// inductions and reductions. 1466 typedef SmallPtrSet<const PHINode *, 8> RecurrenceSet; 1467 1468 /// Returns true if it is legal to vectorize this loop. 1469 /// This does not mean that it is profitable to vectorize this 1470 /// loop, only that it is legal to do so. 1471 bool canVectorize(); 1472 1473 /// Returns the Induction variable. 1474 PHINode *getInduction() { return Induction; } 1475 1476 /// Returns the reduction variables found in the loop. 1477 ReductionList *getReductionVars() { return &Reductions; } 1478 1479 /// Returns the induction variables found in the loop. 1480 InductionList *getInductionVars() { return &Inductions; } 1481 1482 /// Return the first-order recurrences found in the loop. 1483 RecurrenceSet *getFirstOrderRecurrences() { return &FirstOrderRecurrences; } 1484 1485 /// Returns the widest induction type. 1486 Type *getWidestInductionType() { return WidestIndTy; } 1487 1488 /// Returns True if V is an induction variable in this loop. 1489 bool isInductionVariable(const Value *V); 1490 1491 /// Returns True if PN is a reduction variable in this loop. 1492 bool isReductionVariable(PHINode *PN) { return Reductions.count(PN); } 1493 1494 /// Returns True if Phi is a first-order recurrence in this loop. 1495 bool isFirstOrderRecurrence(const PHINode *Phi); 1496 1497 /// Return true if the block BB needs to be predicated in order for the loop 1498 /// to be vectorized. 1499 bool blockNeedsPredication(BasicBlock *BB); 1500 1501 /// Check if this pointer is consecutive when vectorizing. This happens 1502 /// when the last index of the GEP is the induction variable, or that the 1503 /// pointer itself is an induction variable. 1504 /// This check allows us to vectorize A[idx] into a wide load/store. 1505 /// Returns: 1506 /// 0 - Stride is unknown or non-consecutive. 1507 /// 1 - Address is consecutive. 1508 /// -1 - Address is consecutive, and decreasing. 1509 int isConsecutivePtr(Value *Ptr); 1510 1511 /// Returns true if the value V is uniform within the loop. 1512 bool isUniform(Value *V); 1513 1514 /// Returns true if \p I is known to be uniform after vectorization. 1515 bool isUniformAfterVectorization(Instruction *I) { return Uniforms.count(I); } 1516 1517 /// Returns true if \p I is known to be scalar after vectorization. 1518 bool isScalarAfterVectorization(Instruction *I) { return Scalars.count(I); } 1519 1520 /// Returns the information that we collected about runtime memory check. 1521 const RuntimePointerChecking *getRuntimePointerChecking() const { 1522 return LAI->getRuntimePointerChecking(); 1523 } 1524 1525 const LoopAccessInfo *getLAI() const { return LAI; } 1526 1527 /// \brief Check if \p Instr belongs to any interleaved access group. 1528 bool isAccessInterleaved(Instruction *Instr) { 1529 return InterleaveInfo.isInterleaved(Instr); 1530 } 1531 1532 /// \brief Return the maximum interleave factor of all interleaved groups. 1533 unsigned getMaxInterleaveFactor() const { 1534 return InterleaveInfo.getMaxInterleaveFactor(); 1535 } 1536 1537 /// \brief Get the interleaved access group that \p Instr belongs to. 1538 const InterleaveGroup *getInterleavedAccessGroup(Instruction *Instr) { 1539 return InterleaveInfo.getInterleaveGroup(Instr); 1540 } 1541 1542 /// \brief Returns true if an interleaved group requires a scalar iteration 1543 /// to handle accesses with gaps. 1544 bool requiresScalarEpilogue() const { 1545 return InterleaveInfo.requiresScalarEpilogue(); 1546 } 1547 1548 unsigned getMaxSafeDepDistBytes() { return LAI->getMaxSafeDepDistBytes(); } 1549 1550 bool hasStride(Value *V) { return LAI->hasStride(V); } 1551 1552 /// Returns true if the target machine supports masked store operation 1553 /// for the given \p DataType and kind of access to \p Ptr. 1554 bool isLegalMaskedStore(Type *DataType, Value *Ptr) { 1555 return isConsecutivePtr(Ptr) && TTI->isLegalMaskedStore(DataType); 1556 } 1557 /// Returns true if the target machine supports masked load operation 1558 /// for the given \p DataType and kind of access to \p Ptr. 1559 bool isLegalMaskedLoad(Type *DataType, Value *Ptr) { 1560 return isConsecutivePtr(Ptr) && TTI->isLegalMaskedLoad(DataType); 1561 } 1562 /// Returns true if the target machine supports masked scatter operation 1563 /// for the given \p DataType. 1564 bool isLegalMaskedScatter(Type *DataType) { 1565 return TTI->isLegalMaskedScatter(DataType); 1566 } 1567 /// Returns true if the target machine supports masked gather operation 1568 /// for the given \p DataType. 1569 bool isLegalMaskedGather(Type *DataType) { 1570 return TTI->isLegalMaskedGather(DataType); 1571 } 1572 /// Returns true if the target machine can represent \p V as a masked gather 1573 /// or scatter operation. 1574 bool isLegalGatherOrScatter(Value *V) { 1575 auto *LI = dyn_cast<LoadInst>(V); 1576 auto *SI = dyn_cast<StoreInst>(V); 1577 if (!LI && !SI) 1578 return false; 1579 auto *Ptr = getPointerOperand(V); 1580 auto *Ty = cast<PointerType>(Ptr->getType())->getElementType(); 1581 return (LI && isLegalMaskedGather(Ty)) || (SI && isLegalMaskedScatter(Ty)); 1582 } 1583 1584 /// Returns true if vector representation of the instruction \p I 1585 /// requires mask. 1586 bool isMaskRequired(const Instruction *I) { return (MaskedOp.count(I) != 0); } 1587 unsigned getNumStores() const { return LAI->getNumStores(); } 1588 unsigned getNumLoads() const { return LAI->getNumLoads(); } 1589 unsigned getNumPredStores() const { return NumPredStores; } 1590 1591 private: 1592 /// Check if a single basic block loop is vectorizable. 1593 /// At this point we know that this is a loop with a constant trip count 1594 /// and we only need to check individual instructions. 1595 bool canVectorizeInstrs(); 1596 1597 /// When we vectorize loops we may change the order in which 1598 /// we read and write from memory. This method checks if it is 1599 /// legal to vectorize the code, considering only memory constrains. 1600 /// Returns true if the loop is vectorizable 1601 bool canVectorizeMemory(); 1602 1603 /// Return true if we can vectorize this loop using the IF-conversion 1604 /// transformation. 1605 bool canVectorizeWithIfConvert(); 1606 1607 /// Collect the instructions that are uniform after vectorization. An 1608 /// instruction is uniform if we represent it with a single scalar value in 1609 /// the vectorized loop corresponding to each vector iteration. Examples of 1610 /// uniform instructions include pointer operands of consecutive or 1611 /// interleaved memory accesses. Note that although uniformity implies an 1612 /// instruction will be scalar, the reverse is not true. In general, a 1613 /// scalarized instruction will be represented by VF scalar values in the 1614 /// vectorized loop, each corresponding to an iteration of the original 1615 /// scalar loop. 1616 void collectLoopUniforms(); 1617 1618 /// Collect the instructions that are scalar after vectorization. An 1619 /// instruction is scalar if it is known to be uniform or will be scalarized 1620 /// during vectorization. Non-uniform scalarized instructions will be 1621 /// represented by VF values in the vectorized loop, each corresponding to an 1622 /// iteration of the original scalar loop. 1623 void collectLoopScalars(); 1624 1625 /// Return true if all of the instructions in the block can be speculatively 1626 /// executed. \p SafePtrs is a list of addresses that are known to be legal 1627 /// and we know that we can read from them without segfault. 1628 bool blockCanBePredicated(BasicBlock *BB, SmallPtrSetImpl<Value *> &SafePtrs); 1629 1630 /// Updates the vectorization state by adding \p Phi to the inductions list. 1631 /// This can set \p Phi as the main induction of the loop if \p Phi is a 1632 /// better choice for the main induction than the existing one. 1633 void addInductionPhi(PHINode *Phi, const InductionDescriptor &ID, 1634 SmallPtrSetImpl<Value *> &AllowedExit); 1635 1636 /// Report an analysis message to assist the user in diagnosing loops that are 1637 /// not vectorized. These are handled as LoopAccessReport rather than 1638 /// VectorizationReport because the << operator of VectorizationReport returns 1639 /// LoopAccessReport. 1640 void emitAnalysis(const LoopAccessReport &Message) const { 1641 emitAnalysisDiag(TheLoop, *Hints, *ORE, Message); 1642 } 1643 1644 /// \brief If an access has a symbolic strides, this maps the pointer value to 1645 /// the stride symbol. 1646 const ValueToValueMap *getSymbolicStrides() { 1647 // FIXME: Currently, the set of symbolic strides is sometimes queried before 1648 // it's collected. This happens from canVectorizeWithIfConvert, when the 1649 // pointer is checked to reference consecutive elements suitable for a 1650 // masked access. 1651 return LAI ? &LAI->getSymbolicStrides() : nullptr; 1652 } 1653 1654 unsigned NumPredStores; 1655 1656 /// The loop that we evaluate. 1657 Loop *TheLoop; 1658 /// A wrapper around ScalarEvolution used to add runtime SCEV checks. 1659 /// Applies dynamic knowledge to simplify SCEV expressions in the context 1660 /// of existing SCEV assumptions. The analysis will also add a minimal set 1661 /// of new predicates if this is required to enable vectorization and 1662 /// unrolling. 1663 PredicatedScalarEvolution &PSE; 1664 /// Target Library Info. 1665 TargetLibraryInfo *TLI; 1666 /// Target Transform Info 1667 const TargetTransformInfo *TTI; 1668 /// Dominator Tree. 1669 DominatorTree *DT; 1670 // LoopAccess analysis. 1671 std::function<const LoopAccessInfo &(Loop &)> *GetLAA; 1672 // And the loop-accesses info corresponding to this loop. This pointer is 1673 // null until canVectorizeMemory sets it up. 1674 const LoopAccessInfo *LAI; 1675 /// Interface to emit optimization remarks. 1676 OptimizationRemarkEmitter *ORE; 1677 1678 /// The interleave access information contains groups of interleaved accesses 1679 /// with the same stride and close to each other. 1680 InterleavedAccessInfo InterleaveInfo; 1681 1682 // --- vectorization state --- // 1683 1684 /// Holds the integer induction variable. This is the counter of the 1685 /// loop. 1686 PHINode *Induction; 1687 /// Holds the reduction variables. 1688 ReductionList Reductions; 1689 /// Holds all of the induction variables that we found in the loop. 1690 /// Notice that inductions don't need to start at zero and that induction 1691 /// variables can be pointers. 1692 InductionList Inductions; 1693 /// Holds the phi nodes that are first-order recurrences. 1694 RecurrenceSet FirstOrderRecurrences; 1695 /// Holds the widest induction type encountered. 1696 Type *WidestIndTy; 1697 1698 /// Allowed outside users. This holds the induction and reduction 1699 /// vars which can be accessed from outside the loop. 1700 SmallPtrSet<Value *, 4> AllowedExit; 1701 1702 /// Holds the instructions known to be uniform after vectorization. 1703 SmallPtrSet<Instruction *, 4> Uniforms; 1704 1705 /// Holds the instructions known to be scalar after vectorization. 1706 SmallPtrSet<Instruction *, 4> Scalars; 1707 1708 /// Can we assume the absence of NaNs. 1709 bool HasFunNoNaNAttr; 1710 1711 /// Vectorization requirements that will go through late-evaluation. 1712 LoopVectorizationRequirements *Requirements; 1713 1714 /// Used to emit an analysis of any legality issues. 1715 LoopVectorizeHints *Hints; 1716 1717 /// While vectorizing these instructions we have to generate a 1718 /// call to the appropriate masked intrinsic 1719 SmallPtrSet<const Instruction *, 8> MaskedOp; 1720 }; 1721 1722 /// LoopVectorizationCostModel - estimates the expected speedups due to 1723 /// vectorization. 1724 /// In many cases vectorization is not profitable. This can happen because of 1725 /// a number of reasons. In this class we mainly attempt to predict the 1726 /// expected speedup/slowdowns due to the supported instruction set. We use the 1727 /// TargetTransformInfo to query the different backends for the cost of 1728 /// different operations. 1729 class LoopVectorizationCostModel { 1730 public: 1731 LoopVectorizationCostModel(Loop *L, PredicatedScalarEvolution &PSE, 1732 LoopInfo *LI, LoopVectorizationLegality *Legal, 1733 const TargetTransformInfo &TTI, 1734 const TargetLibraryInfo *TLI, DemandedBits *DB, 1735 AssumptionCache *AC, 1736 OptimizationRemarkEmitter *ORE, const Function *F, 1737 const LoopVectorizeHints *Hints) 1738 : TheLoop(L), PSE(PSE), LI(LI), Legal(Legal), TTI(TTI), TLI(TLI), DB(DB), 1739 AC(AC), ORE(ORE), TheFunction(F), Hints(Hints) {} 1740 1741 /// Information about vectorization costs 1742 struct VectorizationFactor { 1743 unsigned Width; // Vector width with best cost 1744 unsigned Cost; // Cost of the loop with that width 1745 }; 1746 /// \return The most profitable vectorization factor and the cost of that VF. 1747 /// This method checks every power of two up to VF. If UserVF is not ZERO 1748 /// then this vectorization factor will be selected if vectorization is 1749 /// possible. 1750 VectorizationFactor selectVectorizationFactor(bool OptForSize); 1751 1752 /// \return The size (in bits) of the smallest and widest types in the code 1753 /// that needs to be vectorized. We ignore values that remain scalar such as 1754 /// 64 bit loop indices. 1755 std::pair<unsigned, unsigned> getSmallestAndWidestTypes(); 1756 1757 /// \return The desired interleave count. 1758 /// If interleave count has been specified by metadata it will be returned. 1759 /// Otherwise, the interleave count is computed and returned. VF and LoopCost 1760 /// are the selected vectorization factor and the cost of the selected VF. 1761 unsigned selectInterleaveCount(bool OptForSize, unsigned VF, 1762 unsigned LoopCost); 1763 1764 /// \return The most profitable unroll factor. 1765 /// This method finds the best unroll-factor based on register pressure and 1766 /// other parameters. VF and LoopCost are the selected vectorization factor 1767 /// and the cost of the selected VF. 1768 unsigned computeInterleaveCount(bool OptForSize, unsigned VF, 1769 unsigned LoopCost); 1770 1771 /// \brief A struct that represents some properties of the register usage 1772 /// of a loop. 1773 struct RegisterUsage { 1774 /// Holds the number of loop invariant values that are used in the loop. 1775 unsigned LoopInvariantRegs; 1776 /// Holds the maximum number of concurrent live intervals in the loop. 1777 unsigned MaxLocalUsers; 1778 /// Holds the number of instructions in the loop. 1779 unsigned NumInstructions; 1780 }; 1781 1782 /// \return Returns information about the register usages of the loop for the 1783 /// given vectorization factors. 1784 SmallVector<RegisterUsage, 8> calculateRegisterUsage(ArrayRef<unsigned> VFs); 1785 1786 /// Collect values we want to ignore in the cost model. 1787 void collectValuesToIgnore(); 1788 1789 private: 1790 /// The vectorization cost is a combination of the cost itself and a boolean 1791 /// indicating whether any of the contributing operations will actually 1792 /// operate on 1793 /// vector values after type legalization in the backend. If this latter value 1794 /// is 1795 /// false, then all operations will be scalarized (i.e. no vectorization has 1796 /// actually taken place). 1797 typedef std::pair<unsigned, bool> VectorizationCostTy; 1798 1799 /// Returns the expected execution cost. The unit of the cost does 1800 /// not matter because we use the 'cost' units to compare different 1801 /// vector widths. The cost that is returned is *not* normalized by 1802 /// the factor width. 1803 VectorizationCostTy expectedCost(unsigned VF); 1804 1805 /// Returns the execution time cost of an instruction for a given vector 1806 /// width. Vector width of one means scalar. 1807 VectorizationCostTy getInstructionCost(Instruction *I, unsigned VF); 1808 1809 /// The cost-computation logic from getInstructionCost which provides 1810 /// the vector type as an output parameter. 1811 unsigned getInstructionCost(Instruction *I, unsigned VF, Type *&VectorTy); 1812 1813 /// Returns whether the instruction is a load or store and will be a emitted 1814 /// as a vector operation. 1815 bool isConsecutiveLoadOrStore(Instruction *I); 1816 1817 /// Report an analysis message to assist the user in diagnosing loops that are 1818 /// not vectorized. These are handled as LoopAccessReport rather than 1819 /// VectorizationReport because the << operator of VectorizationReport returns 1820 /// LoopAccessReport. 1821 void emitAnalysis(const LoopAccessReport &Message) const { 1822 emitAnalysisDiag(TheLoop, *Hints, *ORE, Message); 1823 } 1824 1825 public: 1826 /// Map of scalar integer values to the smallest bitwidth they can be legally 1827 /// represented as. The vector equivalents of these values should be truncated 1828 /// to this type. 1829 MapVector<Instruction *, uint64_t> MinBWs; 1830 1831 /// The loop that we evaluate. 1832 Loop *TheLoop; 1833 /// Predicated scalar evolution analysis. 1834 PredicatedScalarEvolution &PSE; 1835 /// Loop Info analysis. 1836 LoopInfo *LI; 1837 /// Vectorization legality. 1838 LoopVectorizationLegality *Legal; 1839 /// Vector target information. 1840 const TargetTransformInfo &TTI; 1841 /// Target Library Info. 1842 const TargetLibraryInfo *TLI; 1843 /// Demanded bits analysis. 1844 DemandedBits *DB; 1845 /// Assumption cache. 1846 AssumptionCache *AC; 1847 /// Interface to emit optimization remarks. 1848 OptimizationRemarkEmitter *ORE; 1849 1850 const Function *TheFunction; 1851 /// Loop Vectorize Hint. 1852 const LoopVectorizeHints *Hints; 1853 /// Values to ignore in the cost model. 1854 SmallPtrSet<const Value *, 16> ValuesToIgnore; 1855 /// Values to ignore in the cost model when VF > 1. 1856 SmallPtrSet<const Value *, 16> VecValuesToIgnore; 1857 }; 1858 1859 /// \brief This holds vectorization requirements that must be verified late in 1860 /// the process. The requirements are set by legalize and costmodel. Once 1861 /// vectorization has been determined to be possible and profitable the 1862 /// requirements can be verified by looking for metadata or compiler options. 1863 /// For example, some loops require FP commutativity which is only allowed if 1864 /// vectorization is explicitly specified or if the fast-math compiler option 1865 /// has been provided. 1866 /// Late evaluation of these requirements allows helpful diagnostics to be 1867 /// composed that tells the user what need to be done to vectorize the loop. For 1868 /// example, by specifying #pragma clang loop vectorize or -ffast-math. Late 1869 /// evaluation should be used only when diagnostics can generated that can be 1870 /// followed by a non-expert user. 1871 class LoopVectorizationRequirements { 1872 public: 1873 LoopVectorizationRequirements(OptimizationRemarkEmitter &ORE) 1874 : NumRuntimePointerChecks(0), UnsafeAlgebraInst(nullptr), ORE(ORE) {} 1875 1876 void addUnsafeAlgebraInst(Instruction *I) { 1877 // First unsafe algebra instruction. 1878 if (!UnsafeAlgebraInst) 1879 UnsafeAlgebraInst = I; 1880 } 1881 1882 void addRuntimePointerChecks(unsigned Num) { NumRuntimePointerChecks = Num; } 1883 1884 bool doesNotMeet(Function *F, Loop *L, const LoopVectorizeHints &Hints) { 1885 const char *Name = Hints.vectorizeAnalysisPassName(); 1886 bool Failed = false; 1887 if (UnsafeAlgebraInst && !Hints.allowReordering()) { 1888 ORE.emitOptimizationRemarkAnalysisFPCommute( 1889 Name, UnsafeAlgebraInst->getDebugLoc(), 1890 UnsafeAlgebraInst->getParent(), 1891 VectorizationReport() << "cannot prove it is safe to reorder " 1892 "floating-point operations"); 1893 Failed = true; 1894 } 1895 1896 // Test if runtime memcheck thresholds are exceeded. 1897 bool PragmaThresholdReached = 1898 NumRuntimePointerChecks > PragmaVectorizeMemoryCheckThreshold; 1899 bool ThresholdReached = 1900 NumRuntimePointerChecks > VectorizerParams::RuntimeMemoryCheckThreshold; 1901 if ((ThresholdReached && !Hints.allowReordering()) || 1902 PragmaThresholdReached) { 1903 ORE.emitOptimizationRemarkAnalysisAliasing( 1904 Name, L, 1905 VectorizationReport() 1906 << "cannot prove it is safe to reorder memory operations"); 1907 DEBUG(dbgs() << "LV: Too many memory checks needed.\n"); 1908 Failed = true; 1909 } 1910 1911 return Failed; 1912 } 1913 1914 private: 1915 unsigned NumRuntimePointerChecks; 1916 Instruction *UnsafeAlgebraInst; 1917 1918 /// Interface to emit optimization remarks. 1919 OptimizationRemarkEmitter &ORE; 1920 }; 1921 1922 static void addAcyclicInnerLoop(Loop &L, SmallVectorImpl<Loop *> &V) { 1923 if (L.empty()) { 1924 if (!hasCyclesInLoopBody(L)) 1925 V.push_back(&L); 1926 return; 1927 } 1928 for (Loop *InnerL : L) 1929 addAcyclicInnerLoop(*InnerL, V); 1930 } 1931 1932 /// The LoopVectorize Pass. 1933 struct LoopVectorize : public FunctionPass { 1934 /// Pass identification, replacement for typeid 1935 static char ID; 1936 1937 explicit LoopVectorize(bool NoUnrolling = false, bool AlwaysVectorize = true) 1938 : FunctionPass(ID) { 1939 Impl.DisableUnrolling = NoUnrolling; 1940 Impl.AlwaysVectorize = AlwaysVectorize; 1941 initializeLoopVectorizePass(*PassRegistry::getPassRegistry()); 1942 } 1943 1944 LoopVectorizePass Impl; 1945 1946 bool runOnFunction(Function &F) override { 1947 if (skipFunction(F)) 1948 return false; 1949 1950 auto *SE = &getAnalysis<ScalarEvolutionWrapperPass>().getSE(); 1951 auto *LI = &getAnalysis<LoopInfoWrapperPass>().getLoopInfo(); 1952 auto *TTI = &getAnalysis<TargetTransformInfoWrapperPass>().getTTI(F); 1953 auto *DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree(); 1954 auto *BFI = &getAnalysis<BlockFrequencyInfoWrapperPass>().getBFI(); 1955 auto *TLIP = getAnalysisIfAvailable<TargetLibraryInfoWrapperPass>(); 1956 auto *TLI = TLIP ? &TLIP->getTLI() : nullptr; 1957 auto *AA = &getAnalysis<AAResultsWrapperPass>().getAAResults(); 1958 auto *AC = &getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F); 1959 auto *LAA = &getAnalysis<LoopAccessLegacyAnalysis>(); 1960 auto *DB = &getAnalysis<DemandedBitsWrapperPass>().getDemandedBits(); 1961 auto *ORE = &getAnalysis<OptimizationRemarkEmitterWrapperPass>().getORE(); 1962 1963 std::function<const LoopAccessInfo &(Loop &)> GetLAA = 1964 [&](Loop &L) -> const LoopAccessInfo & { return LAA->getInfo(&L); }; 1965 1966 return Impl.runImpl(F, *SE, *LI, *TTI, *DT, *BFI, TLI, *DB, *AA, *AC, 1967 GetLAA, *ORE); 1968 } 1969 1970 void getAnalysisUsage(AnalysisUsage &AU) const override { 1971 AU.addRequired<AssumptionCacheTracker>(); 1972 AU.addRequiredID(LoopSimplifyID); 1973 AU.addRequiredID(LCSSAID); 1974 AU.addRequired<BlockFrequencyInfoWrapperPass>(); 1975 AU.addRequired<DominatorTreeWrapperPass>(); 1976 AU.addRequired<LoopInfoWrapperPass>(); 1977 AU.addRequired<ScalarEvolutionWrapperPass>(); 1978 AU.addRequired<TargetTransformInfoWrapperPass>(); 1979 AU.addRequired<AAResultsWrapperPass>(); 1980 AU.addRequired<LoopAccessLegacyAnalysis>(); 1981 AU.addRequired<DemandedBitsWrapperPass>(); 1982 AU.addRequired<OptimizationRemarkEmitterWrapperPass>(); 1983 AU.addPreserved<LoopInfoWrapperPass>(); 1984 AU.addPreserved<DominatorTreeWrapperPass>(); 1985 AU.addPreserved<BasicAAWrapperPass>(); 1986 AU.addPreserved<GlobalsAAWrapperPass>(); 1987 } 1988 }; 1989 1990 } // end anonymous namespace 1991 1992 //===----------------------------------------------------------------------===// 1993 // Implementation of LoopVectorizationLegality, InnerLoopVectorizer and 1994 // LoopVectorizationCostModel. 1995 //===----------------------------------------------------------------------===// 1996 1997 Value *InnerLoopVectorizer::getBroadcastInstrs(Value *V) { 1998 // We need to place the broadcast of invariant variables outside the loop. 1999 Instruction *Instr = dyn_cast<Instruction>(V); 2000 bool NewInstr = (Instr && Instr->getParent() == LoopVectorBody); 2001 bool Invariant = OrigLoop->isLoopInvariant(V) && !NewInstr; 2002 2003 // Place the code for broadcasting invariant variables in the new preheader. 2004 IRBuilder<>::InsertPointGuard Guard(Builder); 2005 if (Invariant) 2006 Builder.SetInsertPoint(LoopVectorPreHeader->getTerminator()); 2007 2008 // Broadcast the scalar into all locations in the vector. 2009 Value *Shuf = Builder.CreateVectorSplat(VF, V, "broadcast"); 2010 2011 return Shuf; 2012 } 2013 2014 void InnerLoopVectorizer::createVectorIntInductionPHI( 2015 const InductionDescriptor &II, VectorParts &Entry, IntegerType *TruncType) { 2016 Value *Start = II.getStartValue(); 2017 ConstantInt *Step = II.getConstIntStepValue(); 2018 assert(Step && "Can not widen an IV with a non-constant step"); 2019 2020 // Construct the initial value of the vector IV in the vector loop preheader 2021 auto CurrIP = Builder.saveIP(); 2022 Builder.SetInsertPoint(LoopVectorPreHeader->getTerminator()); 2023 if (TruncType) { 2024 Step = ConstantInt::getSigned(TruncType, Step->getSExtValue()); 2025 Start = Builder.CreateCast(Instruction::Trunc, Start, TruncType); 2026 } 2027 Value *SplatStart = Builder.CreateVectorSplat(VF, Start); 2028 Value *SteppedStart = getStepVector(SplatStart, 0, Step); 2029 Builder.restoreIP(CurrIP); 2030 2031 Value *SplatVF = 2032 ConstantVector::getSplat(VF, ConstantInt::getSigned(Start->getType(), 2033 VF * Step->getSExtValue())); 2034 // We may need to add the step a number of times, depending on the unroll 2035 // factor. The last of those goes into the PHI. 2036 PHINode *VecInd = PHINode::Create(SteppedStart->getType(), 2, "vec.ind", 2037 &*LoopVectorBody->getFirstInsertionPt()); 2038 Instruction *LastInduction = VecInd; 2039 for (unsigned Part = 0; Part < UF; ++Part) { 2040 Entry[Part] = LastInduction; 2041 LastInduction = cast<Instruction>( 2042 Builder.CreateAdd(LastInduction, SplatVF, "step.add")); 2043 } 2044 2045 // Move the last step to the end of the latch block. This ensures consistent 2046 // placement of all induction updates. 2047 auto *LoopVectorLatch = LI->getLoopFor(LoopVectorBody)->getLoopLatch(); 2048 auto *Br = cast<BranchInst>(LoopVectorLatch->getTerminator()); 2049 auto *ICmp = cast<Instruction>(Br->getCondition()); 2050 LastInduction->moveBefore(ICmp); 2051 LastInduction->setName("vec.ind.next"); 2052 2053 VecInd->addIncoming(SteppedStart, LoopVectorPreHeader); 2054 VecInd->addIncoming(LastInduction, LoopVectorLatch); 2055 } 2056 2057 bool InnerLoopVectorizer::needsScalarInduction(Instruction *IV) const { 2058 if (Legal->isScalarAfterVectorization(IV)) 2059 return true; 2060 auto isScalarInst = [&](User *U) -> bool { 2061 auto *I = cast<Instruction>(U); 2062 return (OrigLoop->contains(I) && Legal->isScalarAfterVectorization(I)); 2063 }; 2064 return any_of(IV->users(), isScalarInst); 2065 } 2066 2067 void InnerLoopVectorizer::widenIntInduction(PHINode *IV, VectorParts &Entry, 2068 TruncInst *Trunc) { 2069 2070 auto II = Legal->getInductionVars()->find(IV); 2071 assert(II != Legal->getInductionVars()->end() && "IV is not an induction"); 2072 2073 auto ID = II->second; 2074 assert(IV->getType() == ID.getStartValue()->getType() && "Types must match"); 2075 2076 // If a truncate instruction was provided, get the smaller type. 2077 auto *TruncType = Trunc ? cast<IntegerType>(Trunc->getType()) : nullptr; 2078 2079 // The scalar value to broadcast. This will be derived from the canonical 2080 // induction variable. 2081 Value *ScalarIV = nullptr; 2082 2083 // The step of the induction. 2084 Value *Step = nullptr; 2085 2086 // The value from the original loop to which we are mapping the new induction 2087 // variable. 2088 Instruction *EntryVal = Trunc ? cast<Instruction>(Trunc) : IV; 2089 2090 // True if we have vectorized the induction variable. 2091 auto VectorizedIV = false; 2092 2093 // Determine if we want a scalar version of the induction variable. This is 2094 // true if the induction variable itself is not widened, or if it has at 2095 // least one user in the loop that is not widened. 2096 auto NeedsScalarIV = VF > 1 && needsScalarInduction(EntryVal); 2097 2098 // If the induction variable has a constant integer step value, go ahead and 2099 // get it now. 2100 if (ID.getConstIntStepValue()) 2101 Step = ID.getConstIntStepValue(); 2102 2103 // Try to create a new independent vector induction variable. If we can't 2104 // create the phi node, we will splat the scalar induction variable in each 2105 // loop iteration. 2106 if (VF > 1 && IV->getType() == Induction->getType() && Step && 2107 !Legal->isScalarAfterVectorization(EntryVal)) { 2108 createVectorIntInductionPHI(ID, Entry, TruncType); 2109 VectorizedIV = true; 2110 } 2111 2112 // If we haven't yet vectorized the induction variable, or if we will create 2113 // a scalar one, we need to define the scalar induction variable and step 2114 // values. If we were given a truncation type, truncate the canonical 2115 // induction variable and constant step. Otherwise, derive these values from 2116 // the induction descriptor. 2117 if (!VectorizedIV || NeedsScalarIV) { 2118 if (TruncType) { 2119 assert(Step && "Truncation requires constant integer step"); 2120 auto StepInt = cast<ConstantInt>(Step)->getSExtValue(); 2121 ScalarIV = Builder.CreateCast(Instruction::Trunc, Induction, TruncType); 2122 Step = ConstantInt::getSigned(TruncType, StepInt); 2123 } else { 2124 ScalarIV = Induction; 2125 auto &DL = OrigLoop->getHeader()->getModule()->getDataLayout(); 2126 if (IV != OldInduction) { 2127 ScalarIV = Builder.CreateSExtOrTrunc(ScalarIV, IV->getType()); 2128 ScalarIV = ID.transform(Builder, ScalarIV, PSE.getSE(), DL); 2129 ScalarIV->setName("offset.idx"); 2130 } 2131 if (!Step) { 2132 SCEVExpander Exp(*PSE.getSE(), DL, "induction"); 2133 Step = Exp.expandCodeFor(ID.getStep(), ID.getStep()->getType(), 2134 &*Builder.GetInsertPoint()); 2135 } 2136 } 2137 } 2138 2139 // If we haven't yet vectorized the induction variable, splat the scalar 2140 // induction variable, and build the necessary step vectors. 2141 if (!VectorizedIV) { 2142 Value *Broadcasted = getBroadcastInstrs(ScalarIV); 2143 for (unsigned Part = 0; Part < UF; ++Part) 2144 Entry[Part] = getStepVector(Broadcasted, VF * Part, Step); 2145 } 2146 2147 // If an induction variable is only used for counting loop iterations or 2148 // calculating addresses, it doesn't need to be widened. Create scalar steps 2149 // that can be used by instructions we will later scalarize. Note that the 2150 // addition of the scalar steps will not increase the number of instructions 2151 // in the loop in the common case prior to InstCombine. We will be trading 2152 // one vector extract for each scalar step. 2153 if (NeedsScalarIV) 2154 buildScalarSteps(ScalarIV, Step, EntryVal); 2155 } 2156 2157 Value *InnerLoopVectorizer::getStepVector(Value *Val, int StartIdx, Value *Step, 2158 Instruction::BinaryOps BinOp) { 2159 // Create and check the types. 2160 assert(Val->getType()->isVectorTy() && "Must be a vector"); 2161 int VLen = Val->getType()->getVectorNumElements(); 2162 2163 Type *STy = Val->getType()->getScalarType(); 2164 assert((STy->isIntegerTy() || STy->isFloatingPointTy()) && 2165 "Induction Step must be an integer or FP"); 2166 assert(Step->getType() == STy && "Step has wrong type"); 2167 2168 SmallVector<Constant *, 8> Indices; 2169 2170 if (STy->isIntegerTy()) { 2171 // Create a vector of consecutive numbers from zero to VF. 2172 for (int i = 0; i < VLen; ++i) 2173 Indices.push_back(ConstantInt::get(STy, StartIdx + i)); 2174 2175 // Add the consecutive indices to the vector value. 2176 Constant *Cv = ConstantVector::get(Indices); 2177 assert(Cv->getType() == Val->getType() && "Invalid consecutive vec"); 2178 Step = Builder.CreateVectorSplat(VLen, Step); 2179 assert(Step->getType() == Val->getType() && "Invalid step vec"); 2180 // FIXME: The newly created binary instructions should contain nsw/nuw flags, 2181 // which can be found from the original scalar operations. 2182 Step = Builder.CreateMul(Cv, Step); 2183 return Builder.CreateAdd(Val, Step, "induction"); 2184 } 2185 2186 // Floating point induction. 2187 assert((BinOp == Instruction::FAdd || BinOp == Instruction::FSub) && 2188 "Binary Opcode should be specified for FP induction"); 2189 // Create a vector of consecutive numbers from zero to VF. 2190 for (int i = 0; i < VLen; ++i) 2191 Indices.push_back(ConstantFP::get(STy, (double)(StartIdx + i))); 2192 2193 // Add the consecutive indices to the vector value. 2194 Constant *Cv = ConstantVector::get(Indices); 2195 2196 Step = Builder.CreateVectorSplat(VLen, Step); 2197 2198 // Floating point operations had to be 'fast' to enable the induction. 2199 FastMathFlags Flags; 2200 Flags.setUnsafeAlgebra(); 2201 2202 Value *MulOp = Builder.CreateFMul(Cv, Step); 2203 if (isa<Instruction>(MulOp)) 2204 // Have to check, MulOp may be a constant 2205 cast<Instruction>(MulOp)->setFastMathFlags(Flags); 2206 2207 Value *BOp = Builder.CreateBinOp(BinOp, Val, MulOp, "induction"); 2208 if (isa<Instruction>(BOp)) 2209 cast<Instruction>(BOp)->setFastMathFlags(Flags); 2210 return BOp; 2211 } 2212 2213 void InnerLoopVectorizer::buildScalarSteps(Value *ScalarIV, Value *Step, 2214 Value *EntryVal) { 2215 2216 // We shouldn't have to build scalar steps if we aren't vectorizing. 2217 assert(VF > 1 && "VF should be greater than one"); 2218 2219 // Get the value type and ensure it and the step have the same integer type. 2220 Type *ScalarIVTy = ScalarIV->getType()->getScalarType(); 2221 assert(ScalarIVTy->isIntegerTy() && ScalarIVTy == Step->getType() && 2222 "Val and Step should have the same integer type"); 2223 2224 // Compute the scalar steps and save the results in ScalarIVMap. 2225 for (unsigned Part = 0; Part < UF; ++Part) 2226 for (unsigned I = 0; I < VF; ++I) { 2227 auto *StartIdx = ConstantInt::get(ScalarIVTy, VF * Part + I); 2228 auto *Mul = Builder.CreateMul(StartIdx, Step); 2229 auto *Add = Builder.CreateAdd(ScalarIV, Mul); 2230 ScalarIVMap[EntryVal].push_back(Add); 2231 } 2232 } 2233 2234 int LoopVectorizationLegality::isConsecutivePtr(Value *Ptr) { 2235 assert(Ptr->getType()->isPointerTy() && "Unexpected non-ptr"); 2236 auto *SE = PSE.getSE(); 2237 // Make sure that the pointer does not point to structs. 2238 if (Ptr->getType()->getPointerElementType()->isAggregateType()) 2239 return 0; 2240 2241 // If this value is a pointer induction variable, we know it is consecutive. 2242 PHINode *Phi = dyn_cast_or_null<PHINode>(Ptr); 2243 if (Phi && Inductions.count(Phi)) { 2244 InductionDescriptor II = Inductions[Phi]; 2245 return II.getConsecutiveDirection(); 2246 } 2247 2248 GetElementPtrInst *Gep = getGEPInstruction(Ptr); 2249 if (!Gep) 2250 return 0; 2251 2252 unsigned NumOperands = Gep->getNumOperands(); 2253 Value *GpPtr = Gep->getPointerOperand(); 2254 // If this GEP value is a consecutive pointer induction variable and all of 2255 // the indices are constant, then we know it is consecutive. 2256 Phi = dyn_cast<PHINode>(GpPtr); 2257 if (Phi && Inductions.count(Phi)) { 2258 2259 // Make sure that the pointer does not point to structs. 2260 PointerType *GepPtrType = cast<PointerType>(GpPtr->getType()); 2261 if (GepPtrType->getElementType()->isAggregateType()) 2262 return 0; 2263 2264 // Make sure that all of the index operands are loop invariant. 2265 for (unsigned i = 1; i < NumOperands; ++i) 2266 if (!SE->isLoopInvariant(PSE.getSCEV(Gep->getOperand(i)), TheLoop)) 2267 return 0; 2268 2269 InductionDescriptor II = Inductions[Phi]; 2270 return II.getConsecutiveDirection(); 2271 } 2272 2273 unsigned InductionOperand = getGEPInductionOperand(Gep); 2274 2275 // Check that all of the gep indices are uniform except for our induction 2276 // operand. 2277 for (unsigned i = 0; i != NumOperands; ++i) 2278 if (i != InductionOperand && 2279 !SE->isLoopInvariant(PSE.getSCEV(Gep->getOperand(i)), TheLoop)) 2280 return 0; 2281 2282 // We can emit wide load/stores only if the last non-zero index is the 2283 // induction variable. 2284 const SCEV *Last = nullptr; 2285 if (!getSymbolicStrides() || !getSymbolicStrides()->count(Gep)) 2286 Last = PSE.getSCEV(Gep->getOperand(InductionOperand)); 2287 else { 2288 // Because of the multiplication by a stride we can have a s/zext cast. 2289 // We are going to replace this stride by 1 so the cast is safe to ignore. 2290 // 2291 // %indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ] 2292 // %0 = trunc i64 %indvars.iv to i32 2293 // %mul = mul i32 %0, %Stride1 2294 // %idxprom = zext i32 %mul to i64 << Safe cast. 2295 // %arrayidx = getelementptr inbounds i32* %B, i64 %idxprom 2296 // 2297 Last = replaceSymbolicStrideSCEV(PSE, *getSymbolicStrides(), 2298 Gep->getOperand(InductionOperand), Gep); 2299 if (const SCEVCastExpr *C = dyn_cast<SCEVCastExpr>(Last)) 2300 Last = 2301 (C->getSCEVType() == scSignExtend || C->getSCEVType() == scZeroExtend) 2302 ? C->getOperand() 2303 : Last; 2304 } 2305 if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(Last)) { 2306 const SCEV *Step = AR->getStepRecurrence(*SE); 2307 2308 // The memory is consecutive because the last index is consecutive 2309 // and all other indices are loop invariant. 2310 if (Step->isOne()) 2311 return 1; 2312 if (Step->isAllOnesValue()) 2313 return -1; 2314 } 2315 2316 return 0; 2317 } 2318 2319 bool LoopVectorizationLegality::isUniform(Value *V) { 2320 return LAI->isUniform(V); 2321 } 2322 2323 InnerLoopVectorizer::VectorParts & 2324 InnerLoopVectorizer::getVectorValue(Value *V) { 2325 assert(V != Induction && "The new induction variable should not be used."); 2326 assert(!V->getType()->isVectorTy() && "Can't widen a vector"); 2327 2328 // If we have a stride that is replaced by one, do it here. 2329 if (Legal->hasStride(V)) 2330 V = ConstantInt::get(V->getType(), 1); 2331 2332 // If we have this scalar in the map, return it. 2333 if (WidenMap.has(V)) 2334 return WidenMap.get(V); 2335 2336 // If this scalar is unknown, assume that it is a constant or that it is 2337 // loop invariant. Broadcast V and save the value for future uses. 2338 Value *B = getBroadcastInstrs(V); 2339 return WidenMap.splat(V, B); 2340 } 2341 2342 Value *InnerLoopVectorizer::reverseVector(Value *Vec) { 2343 assert(Vec->getType()->isVectorTy() && "Invalid type"); 2344 SmallVector<Constant *, 8> ShuffleMask; 2345 for (unsigned i = 0; i < VF; ++i) 2346 ShuffleMask.push_back(Builder.getInt32(VF - i - 1)); 2347 2348 return Builder.CreateShuffleVector(Vec, UndefValue::get(Vec->getType()), 2349 ConstantVector::get(ShuffleMask), 2350 "reverse"); 2351 } 2352 2353 // Get a mask to interleave \p NumVec vectors into a wide vector. 2354 // I.e. <0, VF, VF*2, ..., VF*(NumVec-1), 1, VF+1, VF*2+1, ...> 2355 // E.g. For 2 interleaved vectors, if VF is 4, the mask is: 2356 // <0, 4, 1, 5, 2, 6, 3, 7> 2357 static Constant *getInterleavedMask(IRBuilder<> &Builder, unsigned VF, 2358 unsigned NumVec) { 2359 SmallVector<Constant *, 16> Mask; 2360 for (unsigned i = 0; i < VF; i++) 2361 for (unsigned j = 0; j < NumVec; j++) 2362 Mask.push_back(Builder.getInt32(j * VF + i)); 2363 2364 return ConstantVector::get(Mask); 2365 } 2366 2367 // Get the strided mask starting from index \p Start. 2368 // I.e. <Start, Start + Stride, ..., Start + Stride*(VF-1)> 2369 static Constant *getStridedMask(IRBuilder<> &Builder, unsigned Start, 2370 unsigned Stride, unsigned VF) { 2371 SmallVector<Constant *, 16> Mask; 2372 for (unsigned i = 0; i < VF; i++) 2373 Mask.push_back(Builder.getInt32(Start + i * Stride)); 2374 2375 return ConstantVector::get(Mask); 2376 } 2377 2378 // Get a mask of two parts: The first part consists of sequential integers 2379 // starting from 0, The second part consists of UNDEFs. 2380 // I.e. <0, 1, 2, ..., NumInt - 1, undef, ..., undef> 2381 static Constant *getSequentialMask(IRBuilder<> &Builder, unsigned NumInt, 2382 unsigned NumUndef) { 2383 SmallVector<Constant *, 16> Mask; 2384 for (unsigned i = 0; i < NumInt; i++) 2385 Mask.push_back(Builder.getInt32(i)); 2386 2387 Constant *Undef = UndefValue::get(Builder.getInt32Ty()); 2388 for (unsigned i = 0; i < NumUndef; i++) 2389 Mask.push_back(Undef); 2390 2391 return ConstantVector::get(Mask); 2392 } 2393 2394 // Concatenate two vectors with the same element type. The 2nd vector should 2395 // not have more elements than the 1st vector. If the 2nd vector has less 2396 // elements, extend it with UNDEFs. 2397 static Value *ConcatenateTwoVectors(IRBuilder<> &Builder, Value *V1, 2398 Value *V2) { 2399 VectorType *VecTy1 = dyn_cast<VectorType>(V1->getType()); 2400 VectorType *VecTy2 = dyn_cast<VectorType>(V2->getType()); 2401 assert(VecTy1 && VecTy2 && 2402 VecTy1->getScalarType() == VecTy2->getScalarType() && 2403 "Expect two vectors with the same element type"); 2404 2405 unsigned NumElts1 = VecTy1->getNumElements(); 2406 unsigned NumElts2 = VecTy2->getNumElements(); 2407 assert(NumElts1 >= NumElts2 && "Unexpect the first vector has less elements"); 2408 2409 if (NumElts1 > NumElts2) { 2410 // Extend with UNDEFs. 2411 Constant *ExtMask = 2412 getSequentialMask(Builder, NumElts2, NumElts1 - NumElts2); 2413 V2 = Builder.CreateShuffleVector(V2, UndefValue::get(VecTy2), ExtMask); 2414 } 2415 2416 Constant *Mask = getSequentialMask(Builder, NumElts1 + NumElts2, 0); 2417 return Builder.CreateShuffleVector(V1, V2, Mask); 2418 } 2419 2420 // Concatenate vectors in the given list. All vectors have the same type. 2421 static Value *ConcatenateVectors(IRBuilder<> &Builder, 2422 ArrayRef<Value *> InputList) { 2423 unsigned NumVec = InputList.size(); 2424 assert(NumVec > 1 && "Should be at least two vectors"); 2425 2426 SmallVector<Value *, 8> ResList; 2427 ResList.append(InputList.begin(), InputList.end()); 2428 do { 2429 SmallVector<Value *, 8> TmpList; 2430 for (unsigned i = 0; i < NumVec - 1; i += 2) { 2431 Value *V0 = ResList[i], *V1 = ResList[i + 1]; 2432 assert((V0->getType() == V1->getType() || i == NumVec - 2) && 2433 "Only the last vector may have a different type"); 2434 2435 TmpList.push_back(ConcatenateTwoVectors(Builder, V0, V1)); 2436 } 2437 2438 // Push the last vector if the total number of vectors is odd. 2439 if (NumVec % 2 != 0) 2440 TmpList.push_back(ResList[NumVec - 1]); 2441 2442 ResList = TmpList; 2443 NumVec = ResList.size(); 2444 } while (NumVec > 1); 2445 2446 return ResList[0]; 2447 } 2448 2449 // Try to vectorize the interleave group that \p Instr belongs to. 2450 // 2451 // E.g. Translate following interleaved load group (factor = 3): 2452 // for (i = 0; i < N; i+=3) { 2453 // R = Pic[i]; // Member of index 0 2454 // G = Pic[i+1]; // Member of index 1 2455 // B = Pic[i+2]; // Member of index 2 2456 // ... // do something to R, G, B 2457 // } 2458 // To: 2459 // %wide.vec = load <12 x i32> ; Read 4 tuples of R,G,B 2460 // %R.vec = shuffle %wide.vec, undef, <0, 3, 6, 9> ; R elements 2461 // %G.vec = shuffle %wide.vec, undef, <1, 4, 7, 10> ; G elements 2462 // %B.vec = shuffle %wide.vec, undef, <2, 5, 8, 11> ; B elements 2463 // 2464 // Or translate following interleaved store group (factor = 3): 2465 // for (i = 0; i < N; i+=3) { 2466 // ... do something to R, G, B 2467 // Pic[i] = R; // Member of index 0 2468 // Pic[i+1] = G; // Member of index 1 2469 // Pic[i+2] = B; // Member of index 2 2470 // } 2471 // To: 2472 // %R_G.vec = shuffle %R.vec, %G.vec, <0, 1, 2, ..., 7> 2473 // %B_U.vec = shuffle %B.vec, undef, <0, 1, 2, 3, u, u, u, u> 2474 // %interleaved.vec = shuffle %R_G.vec, %B_U.vec, 2475 // <0, 4, 8, 1, 5, 9, 2, 6, 10, 3, 7, 11> ; Interleave R,G,B elements 2476 // store <12 x i32> %interleaved.vec ; Write 4 tuples of R,G,B 2477 void InnerLoopVectorizer::vectorizeInterleaveGroup(Instruction *Instr) { 2478 const InterleaveGroup *Group = Legal->getInterleavedAccessGroup(Instr); 2479 assert(Group && "Fail to get an interleaved access group."); 2480 2481 // Skip if current instruction is not the insert position. 2482 if (Instr != Group->getInsertPos()) 2483 return; 2484 2485 LoadInst *LI = dyn_cast<LoadInst>(Instr); 2486 StoreInst *SI = dyn_cast<StoreInst>(Instr); 2487 Value *Ptr = getPointerOperand(Instr); 2488 2489 // Prepare for the vector type of the interleaved load/store. 2490 Type *ScalarTy = LI ? LI->getType() : SI->getValueOperand()->getType(); 2491 unsigned InterleaveFactor = Group->getFactor(); 2492 Type *VecTy = VectorType::get(ScalarTy, InterleaveFactor * VF); 2493 Type *PtrTy = VecTy->getPointerTo(Ptr->getType()->getPointerAddressSpace()); 2494 2495 // Prepare for the new pointers. 2496 setDebugLocFromInst(Builder, Ptr); 2497 VectorParts &PtrParts = getVectorValue(Ptr); 2498 SmallVector<Value *, 2> NewPtrs; 2499 unsigned Index = Group->getIndex(Instr); 2500 for (unsigned Part = 0; Part < UF; Part++) { 2501 // Extract the pointer for current instruction from the pointer vector. A 2502 // reverse access uses the pointer in the last lane. 2503 Value *NewPtr = Builder.CreateExtractElement( 2504 PtrParts[Part], 2505 Group->isReverse() ? Builder.getInt32(VF - 1) : Builder.getInt32(0)); 2506 2507 // Notice current instruction could be any index. Need to adjust the address 2508 // to the member of index 0. 2509 // 2510 // E.g. a = A[i+1]; // Member of index 1 (Current instruction) 2511 // b = A[i]; // Member of index 0 2512 // Current pointer is pointed to A[i+1], adjust it to A[i]. 2513 // 2514 // E.g. A[i+1] = a; // Member of index 1 2515 // A[i] = b; // Member of index 0 2516 // A[i+2] = c; // Member of index 2 (Current instruction) 2517 // Current pointer is pointed to A[i+2], adjust it to A[i]. 2518 NewPtr = Builder.CreateGEP(NewPtr, Builder.getInt32(-Index)); 2519 2520 // Cast to the vector pointer type. 2521 NewPtrs.push_back(Builder.CreateBitCast(NewPtr, PtrTy)); 2522 } 2523 2524 setDebugLocFromInst(Builder, Instr); 2525 Value *UndefVec = UndefValue::get(VecTy); 2526 2527 // Vectorize the interleaved load group. 2528 if (LI) { 2529 for (unsigned Part = 0; Part < UF; Part++) { 2530 Instruction *NewLoadInstr = Builder.CreateAlignedLoad( 2531 NewPtrs[Part], Group->getAlignment(), "wide.vec"); 2532 2533 for (unsigned i = 0; i < InterleaveFactor; i++) { 2534 Instruction *Member = Group->getMember(i); 2535 2536 // Skip the gaps in the group. 2537 if (!Member) 2538 continue; 2539 2540 Constant *StrideMask = getStridedMask(Builder, i, InterleaveFactor, VF); 2541 Value *StridedVec = Builder.CreateShuffleVector( 2542 NewLoadInstr, UndefVec, StrideMask, "strided.vec"); 2543 2544 // If this member has different type, cast the result type. 2545 if (Member->getType() != ScalarTy) { 2546 VectorType *OtherVTy = VectorType::get(Member->getType(), VF); 2547 StridedVec = Builder.CreateBitOrPointerCast(StridedVec, OtherVTy); 2548 } 2549 2550 VectorParts &Entry = WidenMap.get(Member); 2551 Entry[Part] = 2552 Group->isReverse() ? reverseVector(StridedVec) : StridedVec; 2553 } 2554 2555 addMetadata(NewLoadInstr, Instr); 2556 } 2557 return; 2558 } 2559 2560 // The sub vector type for current instruction. 2561 VectorType *SubVT = VectorType::get(ScalarTy, VF); 2562 2563 // Vectorize the interleaved store group. 2564 for (unsigned Part = 0; Part < UF; Part++) { 2565 // Collect the stored vector from each member. 2566 SmallVector<Value *, 4> StoredVecs; 2567 for (unsigned i = 0; i < InterleaveFactor; i++) { 2568 // Interleaved store group doesn't allow a gap, so each index has a member 2569 Instruction *Member = Group->getMember(i); 2570 assert(Member && "Fail to get a member from an interleaved store group"); 2571 2572 Value *StoredVec = 2573 getVectorValue(cast<StoreInst>(Member)->getValueOperand())[Part]; 2574 if (Group->isReverse()) 2575 StoredVec = reverseVector(StoredVec); 2576 2577 // If this member has different type, cast it to an unified type. 2578 if (StoredVec->getType() != SubVT) 2579 StoredVec = Builder.CreateBitOrPointerCast(StoredVec, SubVT); 2580 2581 StoredVecs.push_back(StoredVec); 2582 } 2583 2584 // Concatenate all vectors into a wide vector. 2585 Value *WideVec = ConcatenateVectors(Builder, StoredVecs); 2586 2587 // Interleave the elements in the wide vector. 2588 Constant *IMask = getInterleavedMask(Builder, VF, InterleaveFactor); 2589 Value *IVec = Builder.CreateShuffleVector(WideVec, UndefVec, IMask, 2590 "interleaved.vec"); 2591 2592 Instruction *NewStoreInstr = 2593 Builder.CreateAlignedStore(IVec, NewPtrs[Part], Group->getAlignment()); 2594 addMetadata(NewStoreInstr, Instr); 2595 } 2596 } 2597 2598 void InnerLoopVectorizer::vectorizeMemoryInstruction(Instruction *Instr) { 2599 // Attempt to issue a wide load. 2600 LoadInst *LI = dyn_cast<LoadInst>(Instr); 2601 StoreInst *SI = dyn_cast<StoreInst>(Instr); 2602 2603 assert((LI || SI) && "Invalid Load/Store instruction"); 2604 2605 // Try to vectorize the interleave group if this access is interleaved. 2606 if (Legal->isAccessInterleaved(Instr)) 2607 return vectorizeInterleaveGroup(Instr); 2608 2609 Type *ScalarDataTy = LI ? LI->getType() : SI->getValueOperand()->getType(); 2610 Type *DataTy = VectorType::get(ScalarDataTy, VF); 2611 Value *Ptr = getPointerOperand(Instr); 2612 unsigned Alignment = LI ? LI->getAlignment() : SI->getAlignment(); 2613 // An alignment of 0 means target abi alignment. We need to use the scalar's 2614 // target abi alignment in such a case. 2615 const DataLayout &DL = Instr->getModule()->getDataLayout(); 2616 if (!Alignment) 2617 Alignment = DL.getABITypeAlignment(ScalarDataTy); 2618 unsigned AddressSpace = Ptr->getType()->getPointerAddressSpace(); 2619 uint64_t ScalarAllocatedSize = DL.getTypeAllocSize(ScalarDataTy); 2620 uint64_t VectorElementSize = DL.getTypeStoreSize(DataTy) / VF; 2621 2622 if (SI && Legal->blockNeedsPredication(SI->getParent()) && 2623 !Legal->isMaskRequired(SI)) 2624 return scalarizeInstruction(Instr, true); 2625 2626 if (ScalarAllocatedSize != VectorElementSize) 2627 return scalarizeInstruction(Instr); 2628 2629 // If the pointer is loop invariant scalarize the load. 2630 if (LI && Legal->isUniform(Ptr)) 2631 return scalarizeInstruction(Instr); 2632 2633 // If the pointer is non-consecutive and gather/scatter is not supported 2634 // scalarize the instruction. 2635 int ConsecutiveStride = Legal->isConsecutivePtr(Ptr); 2636 bool Reverse = ConsecutiveStride < 0; 2637 bool CreateGatherScatter = 2638 !ConsecutiveStride && ((LI && Legal->isLegalMaskedGather(ScalarDataTy)) || 2639 (SI && Legal->isLegalMaskedScatter(ScalarDataTy))); 2640 2641 if (!ConsecutiveStride && !CreateGatherScatter) 2642 return scalarizeInstruction(Instr); 2643 2644 Constant *Zero = Builder.getInt32(0); 2645 VectorParts &Entry = WidenMap.get(Instr); 2646 VectorParts VectorGep; 2647 2648 // Handle consecutive loads/stores. 2649 GetElementPtrInst *Gep = getGEPInstruction(Ptr); 2650 if (ConsecutiveStride) { 2651 if (Gep && Legal->isInductionVariable(Gep->getPointerOperand())) { 2652 setDebugLocFromInst(Builder, Gep); 2653 Value *PtrOperand = Gep->getPointerOperand(); 2654 Value *FirstBasePtr = getVectorValue(PtrOperand)[0]; 2655 FirstBasePtr = Builder.CreateExtractElement(FirstBasePtr, Zero); 2656 2657 // Create the new GEP with the new induction variable. 2658 GetElementPtrInst *Gep2 = cast<GetElementPtrInst>(Gep->clone()); 2659 Gep2->setOperand(0, FirstBasePtr); 2660 Gep2->setName("gep.indvar.base"); 2661 Ptr = Builder.Insert(Gep2); 2662 } else if (Gep) { 2663 setDebugLocFromInst(Builder, Gep); 2664 assert(PSE.getSE()->isLoopInvariant(PSE.getSCEV(Gep->getPointerOperand()), 2665 OrigLoop) && 2666 "Base ptr must be invariant"); 2667 // The last index does not have to be the induction. It can be 2668 // consecutive and be a function of the index. For example A[I+1]; 2669 unsigned NumOperands = Gep->getNumOperands(); 2670 unsigned InductionOperand = getGEPInductionOperand(Gep); 2671 // Create the new GEP with the new induction variable. 2672 GetElementPtrInst *Gep2 = cast<GetElementPtrInst>(Gep->clone()); 2673 2674 for (unsigned i = 0; i < NumOperands; ++i) { 2675 Value *GepOperand = Gep->getOperand(i); 2676 Instruction *GepOperandInst = dyn_cast<Instruction>(GepOperand); 2677 2678 // Update last index or loop invariant instruction anchored in loop. 2679 if (i == InductionOperand || 2680 (GepOperandInst && OrigLoop->contains(GepOperandInst))) { 2681 assert((i == InductionOperand || 2682 PSE.getSE()->isLoopInvariant(PSE.getSCEV(GepOperandInst), 2683 OrigLoop)) && 2684 "Must be last index or loop invariant"); 2685 2686 VectorParts &GEPParts = getVectorValue(GepOperand); 2687 2688 // If GepOperand is an induction variable, and there's a scalarized 2689 // version of it available, use it. Otherwise, we will need to create 2690 // an extractelement instruction. 2691 Value *Index = ScalarIVMap.count(GepOperand) 2692 ? ScalarIVMap[GepOperand][0] 2693 : Builder.CreateExtractElement(GEPParts[0], Zero); 2694 2695 Gep2->setOperand(i, Index); 2696 Gep2->setName("gep.indvar.idx"); 2697 } 2698 } 2699 Ptr = Builder.Insert(Gep2); 2700 } else { // No GEP 2701 // Use the induction element ptr. 2702 assert(isa<PHINode>(Ptr) && "Invalid induction ptr"); 2703 setDebugLocFromInst(Builder, Ptr); 2704 VectorParts &PtrVal = getVectorValue(Ptr); 2705 Ptr = Builder.CreateExtractElement(PtrVal[0], Zero); 2706 } 2707 } else { 2708 // At this point we should vector version of GEP for Gather or Scatter 2709 assert(CreateGatherScatter && "The instruction should be scalarized"); 2710 if (Gep) { 2711 // Vectorizing GEP, across UF parts. We want to get a vector value for base 2712 // and each index that's defined inside the loop, even if it is 2713 // loop-invariant but wasn't hoisted out. Otherwise we want to keep them 2714 // scalar. 2715 SmallVector<VectorParts, 4> OpsV; 2716 for (Value *Op : Gep->operands()) { 2717 Instruction *SrcInst = dyn_cast<Instruction>(Op); 2718 if (SrcInst && OrigLoop->contains(SrcInst)) 2719 OpsV.push_back(getVectorValue(Op)); 2720 else 2721 OpsV.push_back(VectorParts(UF, Op)); 2722 } 2723 for (unsigned Part = 0; Part < UF; ++Part) { 2724 SmallVector<Value *, 4> Ops; 2725 Value *GEPBasePtr = OpsV[0][Part]; 2726 for (unsigned i = 1; i < Gep->getNumOperands(); i++) 2727 Ops.push_back(OpsV[i][Part]); 2728 Value *NewGep = Builder.CreateGEP(GEPBasePtr, Ops, "VectorGep"); 2729 cast<GetElementPtrInst>(NewGep)->setIsInBounds(Gep->isInBounds()); 2730 assert(NewGep->getType()->isVectorTy() && "Expected vector GEP"); 2731 2732 NewGep = 2733 Builder.CreateBitCast(NewGep, VectorType::get(Ptr->getType(), VF)); 2734 VectorGep.push_back(NewGep); 2735 } 2736 } else 2737 VectorGep = getVectorValue(Ptr); 2738 } 2739 2740 VectorParts Mask = createBlockInMask(Instr->getParent()); 2741 // Handle Stores: 2742 if (SI) { 2743 assert(!Legal->isUniform(SI->getPointerOperand()) && 2744 "We do not allow storing to uniform addresses"); 2745 setDebugLocFromInst(Builder, SI); 2746 // We don't want to update the value in the map as it might be used in 2747 // another expression. So don't use a reference type for "StoredVal". 2748 VectorParts StoredVal = getVectorValue(SI->getValueOperand()); 2749 2750 for (unsigned Part = 0; Part < UF; ++Part) { 2751 Instruction *NewSI = nullptr; 2752 if (CreateGatherScatter) { 2753 Value *MaskPart = Legal->isMaskRequired(SI) ? Mask[Part] : nullptr; 2754 NewSI = Builder.CreateMaskedScatter(StoredVal[Part], VectorGep[Part], 2755 Alignment, MaskPart); 2756 } else { 2757 // Calculate the pointer for the specific unroll-part. 2758 Value *PartPtr = 2759 Builder.CreateGEP(nullptr, Ptr, Builder.getInt32(Part * VF)); 2760 2761 if (Reverse) { 2762 // If we store to reverse consecutive memory locations, then we need 2763 // to reverse the order of elements in the stored value. 2764 StoredVal[Part] = reverseVector(StoredVal[Part]); 2765 // If the address is consecutive but reversed, then the 2766 // wide store needs to start at the last vector element. 2767 PartPtr = 2768 Builder.CreateGEP(nullptr, Ptr, Builder.getInt32(-Part * VF)); 2769 PartPtr = 2770 Builder.CreateGEP(nullptr, PartPtr, Builder.getInt32(1 - VF)); 2771 Mask[Part] = reverseVector(Mask[Part]); 2772 } 2773 2774 Value *VecPtr = 2775 Builder.CreateBitCast(PartPtr, DataTy->getPointerTo(AddressSpace)); 2776 2777 if (Legal->isMaskRequired(SI)) 2778 NewSI = Builder.CreateMaskedStore(StoredVal[Part], VecPtr, Alignment, 2779 Mask[Part]); 2780 else 2781 NewSI = 2782 Builder.CreateAlignedStore(StoredVal[Part], VecPtr, Alignment); 2783 } 2784 addMetadata(NewSI, SI); 2785 } 2786 return; 2787 } 2788 2789 // Handle loads. 2790 assert(LI && "Must have a load instruction"); 2791 setDebugLocFromInst(Builder, LI); 2792 for (unsigned Part = 0; Part < UF; ++Part) { 2793 Instruction *NewLI; 2794 if (CreateGatherScatter) { 2795 Value *MaskPart = Legal->isMaskRequired(LI) ? Mask[Part] : nullptr; 2796 NewLI = Builder.CreateMaskedGather(VectorGep[Part], Alignment, MaskPart, 2797 0, "wide.masked.gather"); 2798 Entry[Part] = NewLI; 2799 } else { 2800 // Calculate the pointer for the specific unroll-part. 2801 Value *PartPtr = 2802 Builder.CreateGEP(nullptr, Ptr, Builder.getInt32(Part * VF)); 2803 2804 if (Reverse) { 2805 // If the address is consecutive but reversed, then the 2806 // wide load needs to start at the last vector element. 2807 PartPtr = Builder.CreateGEP(nullptr, Ptr, Builder.getInt32(-Part * VF)); 2808 PartPtr = Builder.CreateGEP(nullptr, PartPtr, Builder.getInt32(1 - VF)); 2809 Mask[Part] = reverseVector(Mask[Part]); 2810 } 2811 2812 Value *VecPtr = 2813 Builder.CreateBitCast(PartPtr, DataTy->getPointerTo(AddressSpace)); 2814 if (Legal->isMaskRequired(LI)) 2815 NewLI = Builder.CreateMaskedLoad(VecPtr, Alignment, Mask[Part], 2816 UndefValue::get(DataTy), 2817 "wide.masked.load"); 2818 else 2819 NewLI = Builder.CreateAlignedLoad(VecPtr, Alignment, "wide.load"); 2820 Entry[Part] = Reverse ? reverseVector(NewLI) : NewLI; 2821 } 2822 addMetadata(NewLI, LI); 2823 } 2824 } 2825 2826 void InnerLoopVectorizer::scalarizeInstruction(Instruction *Instr, 2827 bool IfPredicateStore) { 2828 assert(!Instr->getType()->isAggregateType() && "Can't handle vectors"); 2829 // Holds vector parameters or scalars, in case of uniform vals. 2830 SmallVector<VectorParts, 4> Params; 2831 2832 setDebugLocFromInst(Builder, Instr); 2833 2834 // Find all of the vectorized parameters. 2835 for (Value *SrcOp : Instr->operands()) { 2836 // If we are accessing the old induction variable, use the new one. 2837 if (SrcOp == OldInduction) { 2838 Params.push_back(getVectorValue(SrcOp)); 2839 continue; 2840 } 2841 2842 // Try using previously calculated values. 2843 auto *SrcInst = dyn_cast<Instruction>(SrcOp); 2844 2845 // If the src is an instruction that appeared earlier in the basic block, 2846 // then it should already be vectorized. 2847 if (SrcInst && OrigLoop->contains(SrcInst)) { 2848 assert(WidenMap.has(SrcInst) && "Source operand is unavailable"); 2849 // The parameter is a vector value from earlier. 2850 Params.push_back(WidenMap.get(SrcInst)); 2851 } else { 2852 // The parameter is a scalar from outside the loop. Maybe even a constant. 2853 VectorParts Scalars; 2854 Scalars.append(UF, SrcOp); 2855 Params.push_back(Scalars); 2856 } 2857 } 2858 2859 assert(Params.size() == Instr->getNumOperands() && 2860 "Invalid number of operands"); 2861 2862 // Does this instruction return a value ? 2863 bool IsVoidRetTy = Instr->getType()->isVoidTy(); 2864 2865 Value *UndefVec = 2866 IsVoidRetTy ? nullptr 2867 : UndefValue::get(VectorType::get(Instr->getType(), VF)); 2868 // Create a new entry in the WidenMap and initialize it to Undef or Null. 2869 VectorParts &VecResults = WidenMap.splat(Instr, UndefVec); 2870 2871 VectorParts Cond; 2872 if (IfPredicateStore) { 2873 assert(Instr->getParent()->getSinglePredecessor() && 2874 "Only support single predecessor blocks"); 2875 Cond = createEdgeMask(Instr->getParent()->getSinglePredecessor(), 2876 Instr->getParent()); 2877 } 2878 2879 // For each vector unroll 'part': 2880 for (unsigned Part = 0; Part < UF; ++Part) { 2881 // For each scalar that we create: 2882 for (unsigned Width = 0; Width < VF; ++Width) { 2883 2884 // Start if-block. 2885 Value *Cmp = nullptr; 2886 if (IfPredicateStore) { 2887 Cmp = Builder.CreateExtractElement(Cond[Part], Builder.getInt32(Width)); 2888 Cmp = Builder.CreateICmp(ICmpInst::ICMP_EQ, Cmp, 2889 ConstantInt::get(Cmp->getType(), 1)); 2890 } 2891 2892 Instruction *Cloned = Instr->clone(); 2893 if (!IsVoidRetTy) 2894 Cloned->setName(Instr->getName() + ".cloned"); 2895 // Replace the operands of the cloned instructions with extracted scalars. 2896 for (unsigned op = 0, e = Instr->getNumOperands(); op != e; ++op) { 2897 2898 // If the operand is an induction variable, and there's a scalarized 2899 // version of it available, use it. Otherwise, we will need to create 2900 // an extractelement instruction if vectorizing. 2901 auto *NewOp = Params[op][Part]; 2902 auto *ScalarOp = Instr->getOperand(op); 2903 if (ScalarIVMap.count(ScalarOp)) 2904 NewOp = ScalarIVMap[ScalarOp][VF * Part + Width]; 2905 else if (NewOp->getType()->isVectorTy()) 2906 NewOp = Builder.CreateExtractElement(NewOp, Builder.getInt32(Width)); 2907 Cloned->setOperand(op, NewOp); 2908 } 2909 addNewMetadata(Cloned, Instr); 2910 2911 // Place the cloned scalar in the new loop. 2912 Builder.Insert(Cloned); 2913 2914 // If we just cloned a new assumption, add it the assumption cache. 2915 if (auto *II = dyn_cast<IntrinsicInst>(Cloned)) 2916 if (II->getIntrinsicID() == Intrinsic::assume) 2917 AC->registerAssumption(II); 2918 2919 // If the original scalar returns a value we need to place it in a vector 2920 // so that future users will be able to use it. 2921 if (!IsVoidRetTy) 2922 VecResults[Part] = Builder.CreateInsertElement(VecResults[Part], Cloned, 2923 Builder.getInt32(Width)); 2924 // End if-block. 2925 if (IfPredicateStore) 2926 PredicatedStores.push_back( 2927 std::make_pair(cast<StoreInst>(Cloned), Cmp)); 2928 } 2929 } 2930 } 2931 2932 PHINode *InnerLoopVectorizer::createInductionVariable(Loop *L, Value *Start, 2933 Value *End, Value *Step, 2934 Instruction *DL) { 2935 BasicBlock *Header = L->getHeader(); 2936 BasicBlock *Latch = L->getLoopLatch(); 2937 // As we're just creating this loop, it's possible no latch exists 2938 // yet. If so, use the header as this will be a single block loop. 2939 if (!Latch) 2940 Latch = Header; 2941 2942 IRBuilder<> Builder(&*Header->getFirstInsertionPt()); 2943 setDebugLocFromInst(Builder, getDebugLocFromInstOrOperands(OldInduction)); 2944 auto *Induction = Builder.CreatePHI(Start->getType(), 2, "index"); 2945 2946 Builder.SetInsertPoint(Latch->getTerminator()); 2947 2948 // Create i+1 and fill the PHINode. 2949 Value *Next = Builder.CreateAdd(Induction, Step, "index.next"); 2950 Induction->addIncoming(Start, L->getLoopPreheader()); 2951 Induction->addIncoming(Next, Latch); 2952 // Create the compare. 2953 Value *ICmp = Builder.CreateICmpEQ(Next, End); 2954 Builder.CreateCondBr(ICmp, L->getExitBlock(), Header); 2955 2956 // Now we have two terminators. Remove the old one from the block. 2957 Latch->getTerminator()->eraseFromParent(); 2958 2959 return Induction; 2960 } 2961 2962 Value *InnerLoopVectorizer::getOrCreateTripCount(Loop *L) { 2963 if (TripCount) 2964 return TripCount; 2965 2966 IRBuilder<> Builder(L->getLoopPreheader()->getTerminator()); 2967 // Find the loop boundaries. 2968 ScalarEvolution *SE = PSE.getSE(); 2969 const SCEV *BackedgeTakenCount = PSE.getBackedgeTakenCount(); 2970 assert(BackedgeTakenCount != SE->getCouldNotCompute() && 2971 "Invalid loop count"); 2972 2973 Type *IdxTy = Legal->getWidestInductionType(); 2974 2975 // The exit count might have the type of i64 while the phi is i32. This can 2976 // happen if we have an induction variable that is sign extended before the 2977 // compare. The only way that we get a backedge taken count is that the 2978 // induction variable was signed and as such will not overflow. In such a case 2979 // truncation is legal. 2980 if (BackedgeTakenCount->getType()->getPrimitiveSizeInBits() > 2981 IdxTy->getPrimitiveSizeInBits()) 2982 BackedgeTakenCount = SE->getTruncateOrNoop(BackedgeTakenCount, IdxTy); 2983 BackedgeTakenCount = SE->getNoopOrZeroExtend(BackedgeTakenCount, IdxTy); 2984 2985 // Get the total trip count from the count by adding 1. 2986 const SCEV *ExitCount = SE->getAddExpr( 2987 BackedgeTakenCount, SE->getOne(BackedgeTakenCount->getType())); 2988 2989 const DataLayout &DL = L->getHeader()->getModule()->getDataLayout(); 2990 2991 // Expand the trip count and place the new instructions in the preheader. 2992 // Notice that the pre-header does not change, only the loop body. 2993 SCEVExpander Exp(*SE, DL, "induction"); 2994 2995 // Count holds the overall loop count (N). 2996 TripCount = Exp.expandCodeFor(ExitCount, ExitCount->getType(), 2997 L->getLoopPreheader()->getTerminator()); 2998 2999 if (TripCount->getType()->isPointerTy()) 3000 TripCount = 3001 CastInst::CreatePointerCast(TripCount, IdxTy, "exitcount.ptrcnt.to.int", 3002 L->getLoopPreheader()->getTerminator()); 3003 3004 return TripCount; 3005 } 3006 3007 Value *InnerLoopVectorizer::getOrCreateVectorTripCount(Loop *L) { 3008 if (VectorTripCount) 3009 return VectorTripCount; 3010 3011 Value *TC = getOrCreateTripCount(L); 3012 IRBuilder<> Builder(L->getLoopPreheader()->getTerminator()); 3013 3014 // Now we need to generate the expression for the part of the loop that the 3015 // vectorized body will execute. This is equal to N - (N % Step) if scalar 3016 // iterations are not required for correctness, or N - Step, otherwise. Step 3017 // is equal to the vectorization factor (number of SIMD elements) times the 3018 // unroll factor (number of SIMD instructions). 3019 Constant *Step = ConstantInt::get(TC->getType(), VF * UF); 3020 Value *R = Builder.CreateURem(TC, Step, "n.mod.vf"); 3021 3022 // If there is a non-reversed interleaved group that may speculatively access 3023 // memory out-of-bounds, we need to ensure that there will be at least one 3024 // iteration of the scalar epilogue loop. Thus, if the step evenly divides 3025 // the trip count, we set the remainder to be equal to the step. If the step 3026 // does not evenly divide the trip count, no adjustment is necessary since 3027 // there will already be scalar iterations. Note that the minimum iterations 3028 // check ensures that N >= Step. 3029 if (VF > 1 && Legal->requiresScalarEpilogue()) { 3030 auto *IsZero = Builder.CreateICmpEQ(R, ConstantInt::get(R->getType(), 0)); 3031 R = Builder.CreateSelect(IsZero, Step, R); 3032 } 3033 3034 VectorTripCount = Builder.CreateSub(TC, R, "n.vec"); 3035 3036 return VectorTripCount; 3037 } 3038 3039 void InnerLoopVectorizer::emitMinimumIterationCountCheck(Loop *L, 3040 BasicBlock *Bypass) { 3041 Value *Count = getOrCreateTripCount(L); 3042 BasicBlock *BB = L->getLoopPreheader(); 3043 IRBuilder<> Builder(BB->getTerminator()); 3044 3045 // Generate code to check that the loop's trip count that we computed by 3046 // adding one to the backedge-taken count will not overflow. 3047 Value *CheckMinIters = Builder.CreateICmpULT( 3048 Count, ConstantInt::get(Count->getType(), VF * UF), "min.iters.check"); 3049 3050 BasicBlock *NewBB = 3051 BB->splitBasicBlock(BB->getTerminator(), "min.iters.checked"); 3052 // Update dominator tree immediately if the generated block is a 3053 // LoopBypassBlock because SCEV expansions to generate loop bypass 3054 // checks may query it before the current function is finished. 3055 DT->addNewBlock(NewBB, BB); 3056 if (L->getParentLoop()) 3057 L->getParentLoop()->addBasicBlockToLoop(NewBB, *LI); 3058 ReplaceInstWithInst(BB->getTerminator(), 3059 BranchInst::Create(Bypass, NewBB, CheckMinIters)); 3060 LoopBypassBlocks.push_back(BB); 3061 } 3062 3063 void InnerLoopVectorizer::emitVectorLoopEnteredCheck(Loop *L, 3064 BasicBlock *Bypass) { 3065 Value *TC = getOrCreateVectorTripCount(L); 3066 BasicBlock *BB = L->getLoopPreheader(); 3067 IRBuilder<> Builder(BB->getTerminator()); 3068 3069 // Now, compare the new count to zero. If it is zero skip the vector loop and 3070 // jump to the scalar loop. 3071 Value *Cmp = Builder.CreateICmpEQ(TC, Constant::getNullValue(TC->getType()), 3072 "cmp.zero"); 3073 3074 // Generate code to check that the loop's trip count that we computed by 3075 // adding one to the backedge-taken count will not overflow. 3076 BasicBlock *NewBB = BB->splitBasicBlock(BB->getTerminator(), "vector.ph"); 3077 // Update dominator tree immediately if the generated block is a 3078 // LoopBypassBlock because SCEV expansions to generate loop bypass 3079 // checks may query it before the current function is finished. 3080 DT->addNewBlock(NewBB, BB); 3081 if (L->getParentLoop()) 3082 L->getParentLoop()->addBasicBlockToLoop(NewBB, *LI); 3083 ReplaceInstWithInst(BB->getTerminator(), 3084 BranchInst::Create(Bypass, NewBB, Cmp)); 3085 LoopBypassBlocks.push_back(BB); 3086 } 3087 3088 void InnerLoopVectorizer::emitSCEVChecks(Loop *L, BasicBlock *Bypass) { 3089 BasicBlock *BB = L->getLoopPreheader(); 3090 3091 // Generate the code to check that the SCEV assumptions that we made. 3092 // We want the new basic block to start at the first instruction in a 3093 // sequence of instructions that form a check. 3094 SCEVExpander Exp(*PSE.getSE(), Bypass->getModule()->getDataLayout(), 3095 "scev.check"); 3096 Value *SCEVCheck = 3097 Exp.expandCodeForPredicate(&PSE.getUnionPredicate(), BB->getTerminator()); 3098 3099 if (auto *C = dyn_cast<ConstantInt>(SCEVCheck)) 3100 if (C->isZero()) 3101 return; 3102 3103 // Create a new block containing the stride check. 3104 BB->setName("vector.scevcheck"); 3105 auto *NewBB = BB->splitBasicBlock(BB->getTerminator(), "vector.ph"); 3106 // Update dominator tree immediately if the generated block is a 3107 // LoopBypassBlock because SCEV expansions to generate loop bypass 3108 // checks may query it before the current function is finished. 3109 DT->addNewBlock(NewBB, BB); 3110 if (L->getParentLoop()) 3111 L->getParentLoop()->addBasicBlockToLoop(NewBB, *LI); 3112 ReplaceInstWithInst(BB->getTerminator(), 3113 BranchInst::Create(Bypass, NewBB, SCEVCheck)); 3114 LoopBypassBlocks.push_back(BB); 3115 AddedSafetyChecks = true; 3116 } 3117 3118 void InnerLoopVectorizer::emitMemRuntimeChecks(Loop *L, BasicBlock *Bypass) { 3119 BasicBlock *BB = L->getLoopPreheader(); 3120 3121 // Generate the code that checks in runtime if arrays overlap. We put the 3122 // checks into a separate block to make the more common case of few elements 3123 // faster. 3124 Instruction *FirstCheckInst; 3125 Instruction *MemRuntimeCheck; 3126 std::tie(FirstCheckInst, MemRuntimeCheck) = 3127 Legal->getLAI()->addRuntimeChecks(BB->getTerminator()); 3128 if (!MemRuntimeCheck) 3129 return; 3130 3131 // Create a new block containing the memory check. 3132 BB->setName("vector.memcheck"); 3133 auto *NewBB = BB->splitBasicBlock(BB->getTerminator(), "vector.ph"); 3134 // Update dominator tree immediately if the generated block is a 3135 // LoopBypassBlock because SCEV expansions to generate loop bypass 3136 // checks may query it before the current function is finished. 3137 DT->addNewBlock(NewBB, BB); 3138 if (L->getParentLoop()) 3139 L->getParentLoop()->addBasicBlockToLoop(NewBB, *LI); 3140 ReplaceInstWithInst(BB->getTerminator(), 3141 BranchInst::Create(Bypass, NewBB, MemRuntimeCheck)); 3142 LoopBypassBlocks.push_back(BB); 3143 AddedSafetyChecks = true; 3144 3145 // We currently don't use LoopVersioning for the actual loop cloning but we 3146 // still use it to add the noalias metadata. 3147 LVer = llvm::make_unique<LoopVersioning>(*Legal->getLAI(), OrigLoop, LI, DT, 3148 PSE.getSE()); 3149 LVer->prepareNoAliasMetadata(); 3150 } 3151 3152 void InnerLoopVectorizer::createEmptyLoop() { 3153 /* 3154 In this function we generate a new loop. The new loop will contain 3155 the vectorized instructions while the old loop will continue to run the 3156 scalar remainder. 3157 3158 [ ] <-- loop iteration number check. 3159 / | 3160 / v 3161 | [ ] <-- vector loop bypass (may consist of multiple blocks). 3162 | / | 3163 | / v 3164 || [ ] <-- vector pre header. 3165 |/ | 3166 | v 3167 | [ ] \ 3168 | [ ]_| <-- vector loop. 3169 | | 3170 | v 3171 | -[ ] <--- middle-block. 3172 | / | 3173 | / v 3174 -|- >[ ] <--- new preheader. 3175 | | 3176 | v 3177 | [ ] \ 3178 | [ ]_| <-- old scalar loop to handle remainder. 3179 \ | 3180 \ v 3181 >[ ] <-- exit block. 3182 ... 3183 */ 3184 3185 BasicBlock *OldBasicBlock = OrigLoop->getHeader(); 3186 BasicBlock *VectorPH = OrigLoop->getLoopPreheader(); 3187 BasicBlock *ExitBlock = OrigLoop->getExitBlock(); 3188 assert(VectorPH && "Invalid loop structure"); 3189 assert(ExitBlock && "Must have an exit block"); 3190 3191 // Some loops have a single integer induction variable, while other loops 3192 // don't. One example is c++ iterators that often have multiple pointer 3193 // induction variables. In the code below we also support a case where we 3194 // don't have a single induction variable. 3195 // 3196 // We try to obtain an induction variable from the original loop as hard 3197 // as possible. However if we don't find one that: 3198 // - is an integer 3199 // - counts from zero, stepping by one 3200 // - is the size of the widest induction variable type 3201 // then we create a new one. 3202 OldInduction = Legal->getInduction(); 3203 Type *IdxTy = Legal->getWidestInductionType(); 3204 3205 // Split the single block loop into the two loop structure described above. 3206 BasicBlock *VecBody = 3207 VectorPH->splitBasicBlock(VectorPH->getTerminator(), "vector.body"); 3208 BasicBlock *MiddleBlock = 3209 VecBody->splitBasicBlock(VecBody->getTerminator(), "middle.block"); 3210 BasicBlock *ScalarPH = 3211 MiddleBlock->splitBasicBlock(MiddleBlock->getTerminator(), "scalar.ph"); 3212 3213 // Create and register the new vector loop. 3214 Loop *Lp = new Loop(); 3215 Loop *ParentLoop = OrigLoop->getParentLoop(); 3216 3217 // Insert the new loop into the loop nest and register the new basic blocks 3218 // before calling any utilities such as SCEV that require valid LoopInfo. 3219 if (ParentLoop) { 3220 ParentLoop->addChildLoop(Lp); 3221 ParentLoop->addBasicBlockToLoop(ScalarPH, *LI); 3222 ParentLoop->addBasicBlockToLoop(MiddleBlock, *LI); 3223 } else { 3224 LI->addTopLevelLoop(Lp); 3225 } 3226 Lp->addBasicBlockToLoop(VecBody, *LI); 3227 3228 // Find the loop boundaries. 3229 Value *Count = getOrCreateTripCount(Lp); 3230 3231 Value *StartIdx = ConstantInt::get(IdxTy, 0); 3232 3233 // We need to test whether the backedge-taken count is uint##_max. Adding one 3234 // to it will cause overflow and an incorrect loop trip count in the vector 3235 // body. In case of overflow we want to directly jump to the scalar remainder 3236 // loop. 3237 emitMinimumIterationCountCheck(Lp, ScalarPH); 3238 // Now, compare the new count to zero. If it is zero skip the vector loop and 3239 // jump to the scalar loop. 3240 emitVectorLoopEnteredCheck(Lp, ScalarPH); 3241 // Generate the code to check any assumptions that we've made for SCEV 3242 // expressions. 3243 emitSCEVChecks(Lp, ScalarPH); 3244 3245 // Generate the code that checks in runtime if arrays overlap. We put the 3246 // checks into a separate block to make the more common case of few elements 3247 // faster. 3248 emitMemRuntimeChecks(Lp, ScalarPH); 3249 3250 // Generate the induction variable. 3251 // The loop step is equal to the vectorization factor (num of SIMD elements) 3252 // times the unroll factor (num of SIMD instructions). 3253 Value *CountRoundDown = getOrCreateVectorTripCount(Lp); 3254 Constant *Step = ConstantInt::get(IdxTy, VF * UF); 3255 Induction = 3256 createInductionVariable(Lp, StartIdx, CountRoundDown, Step, 3257 getDebugLocFromInstOrOperands(OldInduction)); 3258 3259 // We are going to resume the execution of the scalar loop. 3260 // Go over all of the induction variables that we found and fix the 3261 // PHIs that are left in the scalar version of the loop. 3262 // The starting values of PHI nodes depend on the counter of the last 3263 // iteration in the vectorized loop. 3264 // If we come from a bypass edge then we need to start from the original 3265 // start value. 3266 3267 // This variable saves the new starting index for the scalar loop. It is used 3268 // to test if there are any tail iterations left once the vector loop has 3269 // completed. 3270 LoopVectorizationLegality::InductionList *List = Legal->getInductionVars(); 3271 for (auto &InductionEntry : *List) { 3272 PHINode *OrigPhi = InductionEntry.first; 3273 InductionDescriptor II = InductionEntry.second; 3274 3275 // Create phi nodes to merge from the backedge-taken check block. 3276 PHINode *BCResumeVal = PHINode::Create( 3277 OrigPhi->getType(), 3, "bc.resume.val", ScalarPH->getTerminator()); 3278 Value *EndValue; 3279 if (OrigPhi == OldInduction) { 3280 // We know what the end value is. 3281 EndValue = CountRoundDown; 3282 } else { 3283 IRBuilder<> B(LoopBypassBlocks.back()->getTerminator()); 3284 Type *StepType = II.getStep()->getType(); 3285 Instruction::CastOps CastOp = 3286 CastInst::getCastOpcode(CountRoundDown, true, StepType, true); 3287 Value *CRD = B.CreateCast(CastOp, CountRoundDown, StepType, "cast.crd"); 3288 const DataLayout &DL = OrigLoop->getHeader()->getModule()->getDataLayout(); 3289 EndValue = II.transform(B, CRD, PSE.getSE(), DL); 3290 EndValue->setName("ind.end"); 3291 } 3292 3293 // The new PHI merges the original incoming value, in case of a bypass, 3294 // or the value at the end of the vectorized loop. 3295 BCResumeVal->addIncoming(EndValue, MiddleBlock); 3296 3297 // Fix up external users of the induction variable. 3298 fixupIVUsers(OrigPhi, II, CountRoundDown, EndValue, MiddleBlock); 3299 3300 // Fix the scalar body counter (PHI node). 3301 unsigned BlockIdx = OrigPhi->getBasicBlockIndex(ScalarPH); 3302 3303 // The old induction's phi node in the scalar body needs the truncated 3304 // value. 3305 for (BasicBlock *BB : LoopBypassBlocks) 3306 BCResumeVal->addIncoming(II.getStartValue(), BB); 3307 OrigPhi->setIncomingValue(BlockIdx, BCResumeVal); 3308 } 3309 3310 // Add a check in the middle block to see if we have completed 3311 // all of the iterations in the first vector loop. 3312 // If (N - N%VF) == N, then we *don't* need to run the remainder. 3313 Value *CmpN = 3314 CmpInst::Create(Instruction::ICmp, CmpInst::ICMP_EQ, Count, 3315 CountRoundDown, "cmp.n", MiddleBlock->getTerminator()); 3316 ReplaceInstWithInst(MiddleBlock->getTerminator(), 3317 BranchInst::Create(ExitBlock, ScalarPH, CmpN)); 3318 3319 // Get ready to start creating new instructions into the vectorized body. 3320 Builder.SetInsertPoint(&*VecBody->getFirstInsertionPt()); 3321 3322 // Save the state. 3323 LoopVectorPreHeader = Lp->getLoopPreheader(); 3324 LoopScalarPreHeader = ScalarPH; 3325 LoopMiddleBlock = MiddleBlock; 3326 LoopExitBlock = ExitBlock; 3327 LoopVectorBody = VecBody; 3328 LoopScalarBody = OldBasicBlock; 3329 3330 // Keep all loop hints from the original loop on the vector loop (we'll 3331 // replace the vectorizer-specific hints below). 3332 if (MDNode *LID = OrigLoop->getLoopID()) 3333 Lp->setLoopID(LID); 3334 3335 LoopVectorizeHints Hints(Lp, true, *ORE); 3336 Hints.setAlreadyVectorized(); 3337 } 3338 3339 // Fix up external users of the induction variable. At this point, we are 3340 // in LCSSA form, with all external PHIs that use the IV having one input value, 3341 // coming from the remainder loop. We need those PHIs to also have a correct 3342 // value for the IV when arriving directly from the middle block. 3343 void InnerLoopVectorizer::fixupIVUsers(PHINode *OrigPhi, 3344 const InductionDescriptor &II, 3345 Value *CountRoundDown, Value *EndValue, 3346 BasicBlock *MiddleBlock) { 3347 // There are two kinds of external IV usages - those that use the value 3348 // computed in the last iteration (the PHI) and those that use the penultimate 3349 // value (the value that feeds into the phi from the loop latch). 3350 // We allow both, but they, obviously, have different values. 3351 3352 assert(OrigLoop->getExitBlock() && "Expected a single exit block"); 3353 3354 DenseMap<Value *, Value *> MissingVals; 3355 3356 // An external user of the last iteration's value should see the value that 3357 // the remainder loop uses to initialize its own IV. 3358 Value *PostInc = OrigPhi->getIncomingValueForBlock(OrigLoop->getLoopLatch()); 3359 for (User *U : PostInc->users()) { 3360 Instruction *UI = cast<Instruction>(U); 3361 if (!OrigLoop->contains(UI)) { 3362 assert(isa<PHINode>(UI) && "Expected LCSSA form"); 3363 MissingVals[UI] = EndValue; 3364 } 3365 } 3366 3367 // An external user of the penultimate value need to see EndValue - Step. 3368 // The simplest way to get this is to recompute it from the constituent SCEVs, 3369 // that is Start + (Step * (CRD - 1)). 3370 for (User *U : OrigPhi->users()) { 3371 auto *UI = cast<Instruction>(U); 3372 if (!OrigLoop->contains(UI)) { 3373 const DataLayout &DL = 3374 OrigLoop->getHeader()->getModule()->getDataLayout(); 3375 assert(isa<PHINode>(UI) && "Expected LCSSA form"); 3376 3377 IRBuilder<> B(MiddleBlock->getTerminator()); 3378 Value *CountMinusOne = B.CreateSub( 3379 CountRoundDown, ConstantInt::get(CountRoundDown->getType(), 1)); 3380 Value *CMO = B.CreateSExtOrTrunc(CountMinusOne, II.getStep()->getType(), 3381 "cast.cmo"); 3382 Value *Escape = II.transform(B, CMO, PSE.getSE(), DL); 3383 Escape->setName("ind.escape"); 3384 MissingVals[UI] = Escape; 3385 } 3386 } 3387 3388 for (auto &I : MissingVals) { 3389 PHINode *PHI = cast<PHINode>(I.first); 3390 // One corner case we have to handle is two IVs "chasing" each-other, 3391 // that is %IV2 = phi [...], [ %IV1, %latch ] 3392 // In this case, if IV1 has an external use, we need to avoid adding both 3393 // "last value of IV1" and "penultimate value of IV2". So, verify that we 3394 // don't already have an incoming value for the middle block. 3395 if (PHI->getBasicBlockIndex(MiddleBlock) == -1) 3396 PHI->addIncoming(I.second, MiddleBlock); 3397 } 3398 } 3399 3400 namespace { 3401 struct CSEDenseMapInfo { 3402 static bool canHandle(Instruction *I) { 3403 return isa<InsertElementInst>(I) || isa<ExtractElementInst>(I) || 3404 isa<ShuffleVectorInst>(I) || isa<GetElementPtrInst>(I); 3405 } 3406 static inline Instruction *getEmptyKey() { 3407 return DenseMapInfo<Instruction *>::getEmptyKey(); 3408 } 3409 static inline Instruction *getTombstoneKey() { 3410 return DenseMapInfo<Instruction *>::getTombstoneKey(); 3411 } 3412 static unsigned getHashValue(Instruction *I) { 3413 assert(canHandle(I) && "Unknown instruction!"); 3414 return hash_combine(I->getOpcode(), hash_combine_range(I->value_op_begin(), 3415 I->value_op_end())); 3416 } 3417 static bool isEqual(Instruction *LHS, Instruction *RHS) { 3418 if (LHS == getEmptyKey() || RHS == getEmptyKey() || 3419 LHS == getTombstoneKey() || RHS == getTombstoneKey()) 3420 return LHS == RHS; 3421 return LHS->isIdenticalTo(RHS); 3422 } 3423 }; 3424 } 3425 3426 ///\brief Perform cse of induction variable instructions. 3427 static void cse(BasicBlock *BB) { 3428 // Perform simple cse. 3429 SmallDenseMap<Instruction *, Instruction *, 4, CSEDenseMapInfo> CSEMap; 3430 for (BasicBlock::iterator I = BB->begin(), E = BB->end(); I != E;) { 3431 Instruction *In = &*I++; 3432 3433 if (!CSEDenseMapInfo::canHandle(In)) 3434 continue; 3435 3436 // Check if we can replace this instruction with any of the 3437 // visited instructions. 3438 if (Instruction *V = CSEMap.lookup(In)) { 3439 In->replaceAllUsesWith(V); 3440 In->eraseFromParent(); 3441 continue; 3442 } 3443 3444 CSEMap[In] = In; 3445 } 3446 } 3447 3448 /// \brief Adds a 'fast' flag to floating point operations. 3449 static Value *addFastMathFlag(Value *V) { 3450 if (isa<FPMathOperator>(V)) { 3451 FastMathFlags Flags; 3452 Flags.setUnsafeAlgebra(); 3453 cast<Instruction>(V)->setFastMathFlags(Flags); 3454 } 3455 return V; 3456 } 3457 3458 /// Estimate the overhead of scalarizing a value. Insert and Extract are set if 3459 /// the result needs to be inserted and/or extracted from vectors. 3460 static unsigned getScalarizationOverhead(Type *Ty, bool Insert, bool Extract, 3461 const TargetTransformInfo &TTI) { 3462 if (Ty->isVoidTy()) 3463 return 0; 3464 3465 assert(Ty->isVectorTy() && "Can only scalarize vectors"); 3466 unsigned Cost = 0; 3467 3468 for (unsigned I = 0, E = Ty->getVectorNumElements(); I < E; ++I) { 3469 if (Insert) 3470 Cost += TTI.getVectorInstrCost(Instruction::InsertElement, Ty, I); 3471 if (Extract) 3472 Cost += TTI.getVectorInstrCost(Instruction::ExtractElement, Ty, I); 3473 } 3474 3475 return Cost; 3476 } 3477 3478 // Estimate cost of a call instruction CI if it were vectorized with factor VF. 3479 // Return the cost of the instruction, including scalarization overhead if it's 3480 // needed. The flag NeedToScalarize shows if the call needs to be scalarized - 3481 // i.e. either vector version isn't available, or is too expensive. 3482 static unsigned getVectorCallCost(CallInst *CI, unsigned VF, 3483 const TargetTransformInfo &TTI, 3484 const TargetLibraryInfo *TLI, 3485 bool &NeedToScalarize) { 3486 Function *F = CI->getCalledFunction(); 3487 StringRef FnName = CI->getCalledFunction()->getName(); 3488 Type *ScalarRetTy = CI->getType(); 3489 SmallVector<Type *, 4> Tys, ScalarTys; 3490 for (auto &ArgOp : CI->arg_operands()) 3491 ScalarTys.push_back(ArgOp->getType()); 3492 3493 // Estimate cost of scalarized vector call. The source operands are assumed 3494 // to be vectors, so we need to extract individual elements from there, 3495 // execute VF scalar calls, and then gather the result into the vector return 3496 // value. 3497 unsigned ScalarCallCost = TTI.getCallInstrCost(F, ScalarRetTy, ScalarTys); 3498 if (VF == 1) 3499 return ScalarCallCost; 3500 3501 // Compute corresponding vector type for return value and arguments. 3502 Type *RetTy = ToVectorTy(ScalarRetTy, VF); 3503 for (Type *ScalarTy : ScalarTys) 3504 Tys.push_back(ToVectorTy(ScalarTy, VF)); 3505 3506 // Compute costs of unpacking argument values for the scalar calls and 3507 // packing the return values to a vector. 3508 unsigned ScalarizationCost = 3509 getScalarizationOverhead(RetTy, true, false, TTI); 3510 for (Type *Ty : Tys) 3511 ScalarizationCost += getScalarizationOverhead(Ty, false, true, TTI); 3512 3513 unsigned Cost = ScalarCallCost * VF + ScalarizationCost; 3514 3515 // If we can't emit a vector call for this function, then the currently found 3516 // cost is the cost we need to return. 3517 NeedToScalarize = true; 3518 if (!TLI || !TLI->isFunctionVectorizable(FnName, VF) || CI->isNoBuiltin()) 3519 return Cost; 3520 3521 // If the corresponding vector cost is cheaper, return its cost. 3522 unsigned VectorCallCost = TTI.getCallInstrCost(nullptr, RetTy, Tys); 3523 if (VectorCallCost < Cost) { 3524 NeedToScalarize = false; 3525 return VectorCallCost; 3526 } 3527 return Cost; 3528 } 3529 3530 // Estimate cost of an intrinsic call instruction CI if it were vectorized with 3531 // factor VF. Return the cost of the instruction, including scalarization 3532 // overhead if it's needed. 3533 static unsigned getVectorIntrinsicCost(CallInst *CI, unsigned VF, 3534 const TargetTransformInfo &TTI, 3535 const TargetLibraryInfo *TLI) { 3536 Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI); 3537 assert(ID && "Expected intrinsic call!"); 3538 3539 Type *RetTy = ToVectorTy(CI->getType(), VF); 3540 SmallVector<Type *, 4> Tys; 3541 for (Value *ArgOperand : CI->arg_operands()) 3542 Tys.push_back(ToVectorTy(ArgOperand->getType(), VF)); 3543 3544 FastMathFlags FMF; 3545 if (auto *FPMO = dyn_cast<FPMathOperator>(CI)) 3546 FMF = FPMO->getFastMathFlags(); 3547 3548 return TTI.getIntrinsicInstrCost(ID, RetTy, Tys, FMF); 3549 } 3550 3551 static Type *smallestIntegerVectorType(Type *T1, Type *T2) { 3552 auto *I1 = cast<IntegerType>(T1->getVectorElementType()); 3553 auto *I2 = cast<IntegerType>(T2->getVectorElementType()); 3554 return I1->getBitWidth() < I2->getBitWidth() ? T1 : T2; 3555 } 3556 static Type *largestIntegerVectorType(Type *T1, Type *T2) { 3557 auto *I1 = cast<IntegerType>(T1->getVectorElementType()); 3558 auto *I2 = cast<IntegerType>(T2->getVectorElementType()); 3559 return I1->getBitWidth() > I2->getBitWidth() ? T1 : T2; 3560 } 3561 3562 void InnerLoopVectorizer::truncateToMinimalBitwidths() { 3563 // For every instruction `I` in MinBWs, truncate the operands, create a 3564 // truncated version of `I` and reextend its result. InstCombine runs 3565 // later and will remove any ext/trunc pairs. 3566 // 3567 SmallPtrSet<Value *, 4> Erased; 3568 for (const auto &KV : *MinBWs) { 3569 VectorParts &Parts = WidenMap.get(KV.first); 3570 for (Value *&I : Parts) { 3571 if (Erased.count(I) || I->use_empty() || !isa<Instruction>(I)) 3572 continue; 3573 Type *OriginalTy = I->getType(); 3574 Type *ScalarTruncatedTy = 3575 IntegerType::get(OriginalTy->getContext(), KV.second); 3576 Type *TruncatedTy = VectorType::get(ScalarTruncatedTy, 3577 OriginalTy->getVectorNumElements()); 3578 if (TruncatedTy == OriginalTy) 3579 continue; 3580 3581 IRBuilder<> B(cast<Instruction>(I)); 3582 auto ShrinkOperand = [&](Value *V) -> Value * { 3583 if (auto *ZI = dyn_cast<ZExtInst>(V)) 3584 if (ZI->getSrcTy() == TruncatedTy) 3585 return ZI->getOperand(0); 3586 return B.CreateZExtOrTrunc(V, TruncatedTy); 3587 }; 3588 3589 // The actual instruction modification depends on the instruction type, 3590 // unfortunately. 3591 Value *NewI = nullptr; 3592 if (auto *BO = dyn_cast<BinaryOperator>(I)) { 3593 NewI = B.CreateBinOp(BO->getOpcode(), ShrinkOperand(BO->getOperand(0)), 3594 ShrinkOperand(BO->getOperand(1))); 3595 cast<BinaryOperator>(NewI)->copyIRFlags(I); 3596 } else if (auto *CI = dyn_cast<ICmpInst>(I)) { 3597 NewI = 3598 B.CreateICmp(CI->getPredicate(), ShrinkOperand(CI->getOperand(0)), 3599 ShrinkOperand(CI->getOperand(1))); 3600 } else if (auto *SI = dyn_cast<SelectInst>(I)) { 3601 NewI = B.CreateSelect(SI->getCondition(), 3602 ShrinkOperand(SI->getTrueValue()), 3603 ShrinkOperand(SI->getFalseValue())); 3604 } else if (auto *CI = dyn_cast<CastInst>(I)) { 3605 switch (CI->getOpcode()) { 3606 default: 3607 llvm_unreachable("Unhandled cast!"); 3608 case Instruction::Trunc: 3609 NewI = ShrinkOperand(CI->getOperand(0)); 3610 break; 3611 case Instruction::SExt: 3612 NewI = B.CreateSExtOrTrunc( 3613 CI->getOperand(0), 3614 smallestIntegerVectorType(OriginalTy, TruncatedTy)); 3615 break; 3616 case Instruction::ZExt: 3617 NewI = B.CreateZExtOrTrunc( 3618 CI->getOperand(0), 3619 smallestIntegerVectorType(OriginalTy, TruncatedTy)); 3620 break; 3621 } 3622 } else if (auto *SI = dyn_cast<ShuffleVectorInst>(I)) { 3623 auto Elements0 = SI->getOperand(0)->getType()->getVectorNumElements(); 3624 auto *O0 = B.CreateZExtOrTrunc( 3625 SI->getOperand(0), VectorType::get(ScalarTruncatedTy, Elements0)); 3626 auto Elements1 = SI->getOperand(1)->getType()->getVectorNumElements(); 3627 auto *O1 = B.CreateZExtOrTrunc( 3628 SI->getOperand(1), VectorType::get(ScalarTruncatedTy, Elements1)); 3629 3630 NewI = B.CreateShuffleVector(O0, O1, SI->getMask()); 3631 } else if (isa<LoadInst>(I)) { 3632 // Don't do anything with the operands, just extend the result. 3633 continue; 3634 } else if (auto *IE = dyn_cast<InsertElementInst>(I)) { 3635 auto Elements = IE->getOperand(0)->getType()->getVectorNumElements(); 3636 auto *O0 = B.CreateZExtOrTrunc( 3637 IE->getOperand(0), VectorType::get(ScalarTruncatedTy, Elements)); 3638 auto *O1 = B.CreateZExtOrTrunc(IE->getOperand(1), ScalarTruncatedTy); 3639 NewI = B.CreateInsertElement(O0, O1, IE->getOperand(2)); 3640 } else if (auto *EE = dyn_cast<ExtractElementInst>(I)) { 3641 auto Elements = EE->getOperand(0)->getType()->getVectorNumElements(); 3642 auto *O0 = B.CreateZExtOrTrunc( 3643 EE->getOperand(0), VectorType::get(ScalarTruncatedTy, Elements)); 3644 NewI = B.CreateExtractElement(O0, EE->getOperand(2)); 3645 } else { 3646 llvm_unreachable("Unhandled instruction type!"); 3647 } 3648 3649 // Lastly, extend the result. 3650 NewI->takeName(cast<Instruction>(I)); 3651 Value *Res = B.CreateZExtOrTrunc(NewI, OriginalTy); 3652 I->replaceAllUsesWith(Res); 3653 cast<Instruction>(I)->eraseFromParent(); 3654 Erased.insert(I); 3655 I = Res; 3656 } 3657 } 3658 3659 // We'll have created a bunch of ZExts that are now parentless. Clean up. 3660 for (const auto &KV : *MinBWs) { 3661 VectorParts &Parts = WidenMap.get(KV.first); 3662 for (Value *&I : Parts) { 3663 ZExtInst *Inst = dyn_cast<ZExtInst>(I); 3664 if (Inst && Inst->use_empty()) { 3665 Value *NewI = Inst->getOperand(0); 3666 Inst->eraseFromParent(); 3667 I = NewI; 3668 } 3669 } 3670 } 3671 } 3672 3673 void InnerLoopVectorizer::vectorizeLoop() { 3674 //===------------------------------------------------===// 3675 // 3676 // Notice: any optimization or new instruction that go 3677 // into the code below should be also be implemented in 3678 // the cost-model. 3679 // 3680 //===------------------------------------------------===// 3681 Constant *Zero = Builder.getInt32(0); 3682 3683 // In order to support recurrences we need to be able to vectorize Phi nodes. 3684 // Phi nodes have cycles, so we need to vectorize them in two stages. First, 3685 // we create a new vector PHI node with no incoming edges. We use this value 3686 // when we vectorize all of the instructions that use the PHI. Next, after 3687 // all of the instructions in the block are complete we add the new incoming 3688 // edges to the PHI. At this point all of the instructions in the basic block 3689 // are vectorized, so we can use them to construct the PHI. 3690 PhiVector PHIsToFix; 3691 3692 // Scan the loop in a topological order to ensure that defs are vectorized 3693 // before users. 3694 LoopBlocksDFS DFS(OrigLoop); 3695 DFS.perform(LI); 3696 3697 // Vectorize all of the blocks in the original loop. 3698 for (BasicBlock *BB : make_range(DFS.beginRPO(), DFS.endRPO())) 3699 vectorizeBlockInLoop(BB, &PHIsToFix); 3700 3701 // Insert truncates and extends for any truncated instructions as hints to 3702 // InstCombine. 3703 if (VF > 1) 3704 truncateToMinimalBitwidths(); 3705 3706 // At this point every instruction in the original loop is widened to a 3707 // vector form. Now we need to fix the recurrences in PHIsToFix. These PHI 3708 // nodes are currently empty because we did not want to introduce cycles. 3709 // This is the second stage of vectorizing recurrences. 3710 for (PHINode *Phi : PHIsToFix) { 3711 assert(Phi && "Unable to recover vectorized PHI"); 3712 3713 // Handle first-order recurrences that need to be fixed. 3714 if (Legal->isFirstOrderRecurrence(Phi)) { 3715 fixFirstOrderRecurrence(Phi); 3716 continue; 3717 } 3718 3719 // If the phi node is not a first-order recurrence, it must be a reduction. 3720 // Get it's reduction variable descriptor. 3721 assert(Legal->isReductionVariable(Phi) && 3722 "Unable to find the reduction variable"); 3723 RecurrenceDescriptor RdxDesc = (*Legal->getReductionVars())[Phi]; 3724 3725 RecurrenceDescriptor::RecurrenceKind RK = RdxDesc.getRecurrenceKind(); 3726 TrackingVH<Value> ReductionStartValue = RdxDesc.getRecurrenceStartValue(); 3727 Instruction *LoopExitInst = RdxDesc.getLoopExitInstr(); 3728 RecurrenceDescriptor::MinMaxRecurrenceKind MinMaxKind = 3729 RdxDesc.getMinMaxRecurrenceKind(); 3730 setDebugLocFromInst(Builder, ReductionStartValue); 3731 3732 // We need to generate a reduction vector from the incoming scalar. 3733 // To do so, we need to generate the 'identity' vector and override 3734 // one of the elements with the incoming scalar reduction. We need 3735 // to do it in the vector-loop preheader. 3736 Builder.SetInsertPoint(LoopBypassBlocks[1]->getTerminator()); 3737 3738 // This is the vector-clone of the value that leaves the loop. 3739 VectorParts &VectorExit = getVectorValue(LoopExitInst); 3740 Type *VecTy = VectorExit[0]->getType(); 3741 3742 // Find the reduction identity variable. Zero for addition, or, xor, 3743 // one for multiplication, -1 for And. 3744 Value *Identity; 3745 Value *VectorStart; 3746 if (RK == RecurrenceDescriptor::RK_IntegerMinMax || 3747 RK == RecurrenceDescriptor::RK_FloatMinMax) { 3748 // MinMax reduction have the start value as their identify. 3749 if (VF == 1) { 3750 VectorStart = Identity = ReductionStartValue; 3751 } else { 3752 VectorStart = Identity = 3753 Builder.CreateVectorSplat(VF, ReductionStartValue, "minmax.ident"); 3754 } 3755 } else { 3756 // Handle other reduction kinds: 3757 Constant *Iden = RecurrenceDescriptor::getRecurrenceIdentity( 3758 RK, VecTy->getScalarType()); 3759 if (VF == 1) { 3760 Identity = Iden; 3761 // This vector is the Identity vector where the first element is the 3762 // incoming scalar reduction. 3763 VectorStart = ReductionStartValue; 3764 } else { 3765 Identity = ConstantVector::getSplat(VF, Iden); 3766 3767 // This vector is the Identity vector where the first element is the 3768 // incoming scalar reduction. 3769 VectorStart = 3770 Builder.CreateInsertElement(Identity, ReductionStartValue, Zero); 3771 } 3772 } 3773 3774 // Fix the vector-loop phi. 3775 3776 // Reductions do not have to start at zero. They can start with 3777 // any loop invariant values. 3778 VectorParts &VecRdxPhi = WidenMap.get(Phi); 3779 BasicBlock *Latch = OrigLoop->getLoopLatch(); 3780 Value *LoopVal = Phi->getIncomingValueForBlock(Latch); 3781 VectorParts &Val = getVectorValue(LoopVal); 3782 for (unsigned part = 0; part < UF; ++part) { 3783 // Make sure to add the reduction stat value only to the 3784 // first unroll part. 3785 Value *StartVal = (part == 0) ? VectorStart : Identity; 3786 cast<PHINode>(VecRdxPhi[part]) 3787 ->addIncoming(StartVal, LoopVectorPreHeader); 3788 cast<PHINode>(VecRdxPhi[part]) 3789 ->addIncoming(Val[part], LoopVectorBody); 3790 } 3791 3792 // Before each round, move the insertion point right between 3793 // the PHIs and the values we are going to write. 3794 // This allows us to write both PHINodes and the extractelement 3795 // instructions. 3796 Builder.SetInsertPoint(&*LoopMiddleBlock->getFirstInsertionPt()); 3797 3798 VectorParts RdxParts = getVectorValue(LoopExitInst); 3799 setDebugLocFromInst(Builder, LoopExitInst); 3800 3801 // If the vector reduction can be performed in a smaller type, we truncate 3802 // then extend the loop exit value to enable InstCombine to evaluate the 3803 // entire expression in the smaller type. 3804 if (VF > 1 && Phi->getType() != RdxDesc.getRecurrenceType()) { 3805 Type *RdxVecTy = VectorType::get(RdxDesc.getRecurrenceType(), VF); 3806 Builder.SetInsertPoint(LoopVectorBody->getTerminator()); 3807 for (unsigned part = 0; part < UF; ++part) { 3808 Value *Trunc = Builder.CreateTrunc(RdxParts[part], RdxVecTy); 3809 Value *Extnd = RdxDesc.isSigned() ? Builder.CreateSExt(Trunc, VecTy) 3810 : Builder.CreateZExt(Trunc, VecTy); 3811 for (Value::user_iterator UI = RdxParts[part]->user_begin(); 3812 UI != RdxParts[part]->user_end();) 3813 if (*UI != Trunc) { 3814 (*UI++)->replaceUsesOfWith(RdxParts[part], Extnd); 3815 RdxParts[part] = Extnd; 3816 } else { 3817 ++UI; 3818 } 3819 } 3820 Builder.SetInsertPoint(&*LoopMiddleBlock->getFirstInsertionPt()); 3821 for (unsigned part = 0; part < UF; ++part) 3822 RdxParts[part] = Builder.CreateTrunc(RdxParts[part], RdxVecTy); 3823 } 3824 3825 // Reduce all of the unrolled parts into a single vector. 3826 Value *ReducedPartRdx = RdxParts[0]; 3827 unsigned Op = RecurrenceDescriptor::getRecurrenceBinOp(RK); 3828 setDebugLocFromInst(Builder, ReducedPartRdx); 3829 for (unsigned part = 1; part < UF; ++part) { 3830 if (Op != Instruction::ICmp && Op != Instruction::FCmp) 3831 // Floating point operations had to be 'fast' to enable the reduction. 3832 ReducedPartRdx = addFastMathFlag( 3833 Builder.CreateBinOp((Instruction::BinaryOps)Op, RdxParts[part], 3834 ReducedPartRdx, "bin.rdx")); 3835 else 3836 ReducedPartRdx = RecurrenceDescriptor::createMinMaxOp( 3837 Builder, MinMaxKind, ReducedPartRdx, RdxParts[part]); 3838 } 3839 3840 if (VF > 1) { 3841 // VF is a power of 2 so we can emit the reduction using log2(VF) shuffles 3842 // and vector ops, reducing the set of values being computed by half each 3843 // round. 3844 assert(isPowerOf2_32(VF) && 3845 "Reduction emission only supported for pow2 vectors!"); 3846 Value *TmpVec = ReducedPartRdx; 3847 SmallVector<Constant *, 32> ShuffleMask(VF, nullptr); 3848 for (unsigned i = VF; i != 1; i >>= 1) { 3849 // Move the upper half of the vector to the lower half. 3850 for (unsigned j = 0; j != i / 2; ++j) 3851 ShuffleMask[j] = Builder.getInt32(i / 2 + j); 3852 3853 // Fill the rest of the mask with undef. 3854 std::fill(&ShuffleMask[i / 2], ShuffleMask.end(), 3855 UndefValue::get(Builder.getInt32Ty())); 3856 3857 Value *Shuf = Builder.CreateShuffleVector( 3858 TmpVec, UndefValue::get(TmpVec->getType()), 3859 ConstantVector::get(ShuffleMask), "rdx.shuf"); 3860 3861 if (Op != Instruction::ICmp && Op != Instruction::FCmp) 3862 // Floating point operations had to be 'fast' to enable the reduction. 3863 TmpVec = addFastMathFlag(Builder.CreateBinOp( 3864 (Instruction::BinaryOps)Op, TmpVec, Shuf, "bin.rdx")); 3865 else 3866 TmpVec = RecurrenceDescriptor::createMinMaxOp(Builder, MinMaxKind, 3867 TmpVec, Shuf); 3868 } 3869 3870 // The result is in the first element of the vector. 3871 ReducedPartRdx = 3872 Builder.CreateExtractElement(TmpVec, Builder.getInt32(0)); 3873 3874 // If the reduction can be performed in a smaller type, we need to extend 3875 // the reduction to the wider type before we branch to the original loop. 3876 if (Phi->getType() != RdxDesc.getRecurrenceType()) 3877 ReducedPartRdx = 3878 RdxDesc.isSigned() 3879 ? Builder.CreateSExt(ReducedPartRdx, Phi->getType()) 3880 : Builder.CreateZExt(ReducedPartRdx, Phi->getType()); 3881 } 3882 3883 // Create a phi node that merges control-flow from the backedge-taken check 3884 // block and the middle block. 3885 PHINode *BCBlockPhi = PHINode::Create(Phi->getType(), 2, "bc.merge.rdx", 3886 LoopScalarPreHeader->getTerminator()); 3887 for (unsigned I = 0, E = LoopBypassBlocks.size(); I != E; ++I) 3888 BCBlockPhi->addIncoming(ReductionStartValue, LoopBypassBlocks[I]); 3889 BCBlockPhi->addIncoming(ReducedPartRdx, LoopMiddleBlock); 3890 3891 // Now, we need to fix the users of the reduction variable 3892 // inside and outside of the scalar remainder loop. 3893 // We know that the loop is in LCSSA form. We need to update the 3894 // PHI nodes in the exit blocks. 3895 for (BasicBlock::iterator LEI = LoopExitBlock->begin(), 3896 LEE = LoopExitBlock->end(); 3897 LEI != LEE; ++LEI) { 3898 PHINode *LCSSAPhi = dyn_cast<PHINode>(LEI); 3899 if (!LCSSAPhi) 3900 break; 3901 3902 // All PHINodes need to have a single entry edge, or two if 3903 // we already fixed them. 3904 assert(LCSSAPhi->getNumIncomingValues() < 3 && "Invalid LCSSA PHI"); 3905 3906 // We found our reduction value exit-PHI. Update it with the 3907 // incoming bypass edge. 3908 if (LCSSAPhi->getIncomingValue(0) == LoopExitInst) { 3909 // Add an edge coming from the bypass. 3910 LCSSAPhi->addIncoming(ReducedPartRdx, LoopMiddleBlock); 3911 break; 3912 } 3913 } // end of the LCSSA phi scan. 3914 3915 // Fix the scalar loop reduction variable with the incoming reduction sum 3916 // from the vector body and from the backedge value. 3917 int IncomingEdgeBlockIdx = 3918 Phi->getBasicBlockIndex(OrigLoop->getLoopLatch()); 3919 assert(IncomingEdgeBlockIdx >= 0 && "Invalid block index"); 3920 // Pick the other block. 3921 int SelfEdgeBlockIdx = (IncomingEdgeBlockIdx ? 0 : 1); 3922 Phi->setIncomingValue(SelfEdgeBlockIdx, BCBlockPhi); 3923 Phi->setIncomingValue(IncomingEdgeBlockIdx, LoopExitInst); 3924 } // end of for each Phi in PHIsToFix. 3925 3926 fixLCSSAPHIs(); 3927 3928 // Make sure DomTree is updated. 3929 updateAnalysis(); 3930 3931 predicateStores(); 3932 3933 // Remove redundant induction instructions. 3934 cse(LoopVectorBody); 3935 } 3936 3937 void InnerLoopVectorizer::fixFirstOrderRecurrence(PHINode *Phi) { 3938 3939 // This is the second phase of vectorizing first-order recurrences. An 3940 // overview of the transformation is described below. Suppose we have the 3941 // following loop. 3942 // 3943 // for (int i = 0; i < n; ++i) 3944 // b[i] = a[i] - a[i - 1]; 3945 // 3946 // There is a first-order recurrence on "a". For this loop, the shorthand 3947 // scalar IR looks like: 3948 // 3949 // scalar.ph: 3950 // s_init = a[-1] 3951 // br scalar.body 3952 // 3953 // scalar.body: 3954 // i = phi [0, scalar.ph], [i+1, scalar.body] 3955 // s1 = phi [s_init, scalar.ph], [s2, scalar.body] 3956 // s2 = a[i] 3957 // b[i] = s2 - s1 3958 // br cond, scalar.body, ... 3959 // 3960 // In this example, s1 is a recurrence because it's value depends on the 3961 // previous iteration. In the first phase of vectorization, we created a 3962 // temporary value for s1. We now complete the vectorization and produce the 3963 // shorthand vector IR shown below (for VF = 4, UF = 1). 3964 // 3965 // vector.ph: 3966 // v_init = vector(..., ..., ..., a[-1]) 3967 // br vector.body 3968 // 3969 // vector.body 3970 // i = phi [0, vector.ph], [i+4, vector.body] 3971 // v1 = phi [v_init, vector.ph], [v2, vector.body] 3972 // v2 = a[i, i+1, i+2, i+3]; 3973 // v3 = vector(v1(3), v2(0, 1, 2)) 3974 // b[i, i+1, i+2, i+3] = v2 - v3 3975 // br cond, vector.body, middle.block 3976 // 3977 // middle.block: 3978 // x = v2(3) 3979 // br scalar.ph 3980 // 3981 // scalar.ph: 3982 // s_init = phi [x, middle.block], [a[-1], otherwise] 3983 // br scalar.body 3984 // 3985 // After execution completes the vector loop, we extract the next value of 3986 // the recurrence (x) to use as the initial value in the scalar loop. 3987 3988 // Get the original loop preheader and single loop latch. 3989 auto *Preheader = OrigLoop->getLoopPreheader(); 3990 auto *Latch = OrigLoop->getLoopLatch(); 3991 3992 // Get the initial and previous values of the scalar recurrence. 3993 auto *ScalarInit = Phi->getIncomingValueForBlock(Preheader); 3994 auto *Previous = Phi->getIncomingValueForBlock(Latch); 3995 3996 // Create a vector from the initial value. 3997 auto *VectorInit = ScalarInit; 3998 if (VF > 1) { 3999 Builder.SetInsertPoint(LoopVectorPreHeader->getTerminator()); 4000 VectorInit = Builder.CreateInsertElement( 4001 UndefValue::get(VectorType::get(VectorInit->getType(), VF)), VectorInit, 4002 Builder.getInt32(VF - 1), "vector.recur.init"); 4003 } 4004 4005 // We constructed a temporary phi node in the first phase of vectorization. 4006 // This phi node will eventually be deleted. 4007 auto &PhiParts = getVectorValue(Phi); 4008 Builder.SetInsertPoint(cast<Instruction>(PhiParts[0])); 4009 4010 // Create a phi node for the new recurrence. The current value will either be 4011 // the initial value inserted into a vector or loop-varying vector value. 4012 auto *VecPhi = Builder.CreatePHI(VectorInit->getType(), 2, "vector.recur"); 4013 VecPhi->addIncoming(VectorInit, LoopVectorPreHeader); 4014 4015 // Get the vectorized previous value. We ensured the previous values was an 4016 // instruction when detecting the recurrence. 4017 auto &PreviousParts = getVectorValue(Previous); 4018 4019 // Set the insertion point to be after this instruction. We ensured the 4020 // previous value dominated all uses of the phi when detecting the 4021 // recurrence. 4022 Builder.SetInsertPoint( 4023 &*++BasicBlock::iterator(cast<Instruction>(PreviousParts[UF - 1]))); 4024 4025 // We will construct a vector for the recurrence by combining the values for 4026 // the current and previous iterations. This is the required shuffle mask. 4027 SmallVector<Constant *, 8> ShuffleMask(VF); 4028 ShuffleMask[0] = Builder.getInt32(VF - 1); 4029 for (unsigned I = 1; I < VF; ++I) 4030 ShuffleMask[I] = Builder.getInt32(I + VF - 1); 4031 4032 // The vector from which to take the initial value for the current iteration 4033 // (actual or unrolled). Initially, this is the vector phi node. 4034 Value *Incoming = VecPhi; 4035 4036 // Shuffle the current and previous vector and update the vector parts. 4037 for (unsigned Part = 0; Part < UF; ++Part) { 4038 auto *Shuffle = 4039 VF > 1 4040 ? Builder.CreateShuffleVector(Incoming, PreviousParts[Part], 4041 ConstantVector::get(ShuffleMask)) 4042 : Incoming; 4043 PhiParts[Part]->replaceAllUsesWith(Shuffle); 4044 cast<Instruction>(PhiParts[Part])->eraseFromParent(); 4045 PhiParts[Part] = Shuffle; 4046 Incoming = PreviousParts[Part]; 4047 } 4048 4049 // Fix the latch value of the new recurrence in the vector loop. 4050 VecPhi->addIncoming(Incoming, LI->getLoopFor(LoopVectorBody)->getLoopLatch()); 4051 4052 // Extract the last vector element in the middle block. This will be the 4053 // initial value for the recurrence when jumping to the scalar loop. 4054 auto *Extract = Incoming; 4055 if (VF > 1) { 4056 Builder.SetInsertPoint(LoopMiddleBlock->getTerminator()); 4057 Extract = Builder.CreateExtractElement(Extract, Builder.getInt32(VF - 1), 4058 "vector.recur.extract"); 4059 } 4060 4061 // Fix the initial value of the original recurrence in the scalar loop. 4062 Builder.SetInsertPoint(&*LoopScalarPreHeader->begin()); 4063 auto *Start = Builder.CreatePHI(Phi->getType(), 2, "scalar.recur.init"); 4064 for (auto *BB : predecessors(LoopScalarPreHeader)) { 4065 auto *Incoming = BB == LoopMiddleBlock ? Extract : ScalarInit; 4066 Start->addIncoming(Incoming, BB); 4067 } 4068 4069 Phi->setIncomingValue(Phi->getBasicBlockIndex(LoopScalarPreHeader), Start); 4070 Phi->setName("scalar.recur"); 4071 4072 // Finally, fix users of the recurrence outside the loop. The users will need 4073 // either the last value of the scalar recurrence or the last value of the 4074 // vector recurrence we extracted in the middle block. Since the loop is in 4075 // LCSSA form, we just need to find the phi node for the original scalar 4076 // recurrence in the exit block, and then add an edge for the middle block. 4077 for (auto &I : *LoopExitBlock) { 4078 auto *LCSSAPhi = dyn_cast<PHINode>(&I); 4079 if (!LCSSAPhi) 4080 break; 4081 if (LCSSAPhi->getIncomingValue(0) == Phi) { 4082 LCSSAPhi->addIncoming(Extract, LoopMiddleBlock); 4083 break; 4084 } 4085 } 4086 } 4087 4088 void InnerLoopVectorizer::fixLCSSAPHIs() { 4089 for (Instruction &LEI : *LoopExitBlock) { 4090 auto *LCSSAPhi = dyn_cast<PHINode>(&LEI); 4091 if (!LCSSAPhi) 4092 break; 4093 if (LCSSAPhi->getNumIncomingValues() == 1) 4094 LCSSAPhi->addIncoming(UndefValue::get(LCSSAPhi->getType()), 4095 LoopMiddleBlock); 4096 } 4097 } 4098 4099 void InnerLoopVectorizer::predicateStores() { 4100 for (auto KV : PredicatedStores) { 4101 BasicBlock::iterator I(KV.first); 4102 auto *BB = SplitBlock(I->getParent(), &*std::next(I), DT, LI); 4103 auto *T = SplitBlockAndInsertIfThen(KV.second, &*I, /*Unreachable=*/false, 4104 /*BranchWeights=*/nullptr, DT, LI); 4105 I->moveBefore(T); 4106 I->getParent()->setName("pred.store.if"); 4107 BB->setName("pred.store.continue"); 4108 } 4109 DEBUG(DT->verifyDomTree()); 4110 } 4111 4112 InnerLoopVectorizer::VectorParts 4113 InnerLoopVectorizer::createEdgeMask(BasicBlock *Src, BasicBlock *Dst) { 4114 assert(is_contained(predecessors(Dst), Src) && "Invalid edge"); 4115 4116 // Look for cached value. 4117 std::pair<BasicBlock *, BasicBlock *> Edge(Src, Dst); 4118 EdgeMaskCache::iterator ECEntryIt = MaskCache.find(Edge); 4119 if (ECEntryIt != MaskCache.end()) 4120 return ECEntryIt->second; 4121 4122 VectorParts SrcMask = createBlockInMask(Src); 4123 4124 // The terminator has to be a branch inst! 4125 BranchInst *BI = dyn_cast<BranchInst>(Src->getTerminator()); 4126 assert(BI && "Unexpected terminator found"); 4127 4128 if (BI->isConditional()) { 4129 VectorParts EdgeMask = getVectorValue(BI->getCondition()); 4130 4131 if (BI->getSuccessor(0) != Dst) 4132 for (unsigned part = 0; part < UF; ++part) 4133 EdgeMask[part] = Builder.CreateNot(EdgeMask[part]); 4134 4135 for (unsigned part = 0; part < UF; ++part) 4136 EdgeMask[part] = Builder.CreateAnd(EdgeMask[part], SrcMask[part]); 4137 4138 MaskCache[Edge] = EdgeMask; 4139 return EdgeMask; 4140 } 4141 4142 MaskCache[Edge] = SrcMask; 4143 return SrcMask; 4144 } 4145 4146 InnerLoopVectorizer::VectorParts 4147 InnerLoopVectorizer::createBlockInMask(BasicBlock *BB) { 4148 assert(OrigLoop->contains(BB) && "Block is not a part of a loop"); 4149 4150 // Loop incoming mask is all-one. 4151 if (OrigLoop->getHeader() == BB) { 4152 Value *C = ConstantInt::get(IntegerType::getInt1Ty(BB->getContext()), 1); 4153 return getVectorValue(C); 4154 } 4155 4156 // This is the block mask. We OR all incoming edges, and with zero. 4157 Value *Zero = ConstantInt::get(IntegerType::getInt1Ty(BB->getContext()), 0); 4158 VectorParts BlockMask = getVectorValue(Zero); 4159 4160 // For each pred: 4161 for (pred_iterator it = pred_begin(BB), e = pred_end(BB); it != e; ++it) { 4162 VectorParts EM = createEdgeMask(*it, BB); 4163 for (unsigned part = 0; part < UF; ++part) 4164 BlockMask[part] = Builder.CreateOr(BlockMask[part], EM[part]); 4165 } 4166 4167 return BlockMask; 4168 } 4169 4170 void InnerLoopVectorizer::widenPHIInstruction( 4171 Instruction *PN, InnerLoopVectorizer::VectorParts &Entry, unsigned UF, 4172 unsigned VF, PhiVector *PV) { 4173 PHINode *P = cast<PHINode>(PN); 4174 // Handle recurrences. 4175 if (Legal->isReductionVariable(P) || Legal->isFirstOrderRecurrence(P)) { 4176 for (unsigned part = 0; part < UF; ++part) { 4177 // This is phase one of vectorizing PHIs. 4178 Type *VecTy = 4179 (VF == 1) ? PN->getType() : VectorType::get(PN->getType(), VF); 4180 Entry[part] = PHINode::Create( 4181 VecTy, 2, "vec.phi", &*LoopVectorBody->getFirstInsertionPt()); 4182 } 4183 PV->push_back(P); 4184 return; 4185 } 4186 4187 setDebugLocFromInst(Builder, P); 4188 // Check for PHI nodes that are lowered to vector selects. 4189 if (P->getParent() != OrigLoop->getHeader()) { 4190 // We know that all PHIs in non-header blocks are converted into 4191 // selects, so we don't have to worry about the insertion order and we 4192 // can just use the builder. 4193 // At this point we generate the predication tree. There may be 4194 // duplications since this is a simple recursive scan, but future 4195 // optimizations will clean it up. 4196 4197 unsigned NumIncoming = P->getNumIncomingValues(); 4198 4199 // Generate a sequence of selects of the form: 4200 // SELECT(Mask3, In3, 4201 // SELECT(Mask2, In2, 4202 // ( ...))) 4203 for (unsigned In = 0; In < NumIncoming; In++) { 4204 VectorParts Cond = 4205 createEdgeMask(P->getIncomingBlock(In), P->getParent()); 4206 VectorParts &In0 = getVectorValue(P->getIncomingValue(In)); 4207 4208 for (unsigned part = 0; part < UF; ++part) { 4209 // We might have single edge PHIs (blocks) - use an identity 4210 // 'select' for the first PHI operand. 4211 if (In == 0) 4212 Entry[part] = Builder.CreateSelect(Cond[part], In0[part], In0[part]); 4213 else 4214 // Select between the current value and the previous incoming edge 4215 // based on the incoming mask. 4216 Entry[part] = Builder.CreateSelect(Cond[part], In0[part], Entry[part], 4217 "predphi"); 4218 } 4219 } 4220 return; 4221 } 4222 4223 // This PHINode must be an induction variable. 4224 // Make sure that we know about it. 4225 assert(Legal->getInductionVars()->count(P) && "Not an induction variable"); 4226 4227 InductionDescriptor II = Legal->getInductionVars()->lookup(P); 4228 const DataLayout &DL = OrigLoop->getHeader()->getModule()->getDataLayout(); 4229 4230 // FIXME: The newly created binary instructions should contain nsw/nuw flags, 4231 // which can be found from the original scalar operations. 4232 switch (II.getKind()) { 4233 case InductionDescriptor::IK_NoInduction: 4234 llvm_unreachable("Unknown induction"); 4235 case InductionDescriptor::IK_IntInduction: 4236 return widenIntInduction(P, Entry); 4237 case InductionDescriptor::IK_PtrInduction: { 4238 // Handle the pointer induction variable case. 4239 assert(P->getType()->isPointerTy() && "Unexpected type."); 4240 // This is the normalized GEP that starts counting at zero. 4241 Value *PtrInd = Induction; 4242 PtrInd = Builder.CreateSExtOrTrunc(PtrInd, II.getStep()->getType()); 4243 // This is the vector of results. Notice that we don't generate 4244 // vector geps because scalar geps result in better code. 4245 for (unsigned part = 0; part < UF; ++part) { 4246 if (VF == 1) { 4247 int EltIndex = part; 4248 Constant *Idx = ConstantInt::get(PtrInd->getType(), EltIndex); 4249 Value *GlobalIdx = Builder.CreateAdd(PtrInd, Idx); 4250 Value *SclrGep = II.transform(Builder, GlobalIdx, PSE.getSE(), DL); 4251 SclrGep->setName("next.gep"); 4252 Entry[part] = SclrGep; 4253 continue; 4254 } 4255 4256 Value *VecVal = UndefValue::get(VectorType::get(P->getType(), VF)); 4257 for (unsigned int i = 0; i < VF; ++i) { 4258 int EltIndex = i + part * VF; 4259 Constant *Idx = ConstantInt::get(PtrInd->getType(), EltIndex); 4260 Value *GlobalIdx = Builder.CreateAdd(PtrInd, Idx); 4261 Value *SclrGep = II.transform(Builder, GlobalIdx, PSE.getSE(), DL); 4262 SclrGep->setName("next.gep"); 4263 VecVal = Builder.CreateInsertElement(VecVal, SclrGep, 4264 Builder.getInt32(i), "insert.gep"); 4265 } 4266 Entry[part] = VecVal; 4267 } 4268 return; 4269 } 4270 case InductionDescriptor::IK_FpInduction: { 4271 assert(P->getType() == II.getStartValue()->getType() && 4272 "Types must match"); 4273 // Handle other induction variables that are now based on the 4274 // canonical one. 4275 assert(P != OldInduction && "Primary induction can be integer only"); 4276 4277 Value *V = Builder.CreateCast(Instruction::SIToFP, Induction, P->getType()); 4278 V = II.transform(Builder, V, PSE.getSE(), DL); 4279 V->setName("fp.offset.idx"); 4280 4281 // Now we have scalar op: %fp.offset.idx = StartVal +/- Induction*StepVal 4282 4283 Value *Broadcasted = getBroadcastInstrs(V); 4284 // After broadcasting the induction variable we need to make the vector 4285 // consecutive by adding StepVal*0, StepVal*1, StepVal*2, etc. 4286 Value *StepVal = cast<SCEVUnknown>(II.getStep())->getValue(); 4287 for (unsigned part = 0; part < UF; ++part) 4288 Entry[part] = getStepVector(Broadcasted, VF * part, StepVal, 4289 II.getInductionOpcode()); 4290 return; 4291 } 4292 } 4293 } 4294 4295 void InnerLoopVectorizer::vectorizeBlockInLoop(BasicBlock *BB, PhiVector *PV) { 4296 // For each instruction in the old loop. 4297 for (Instruction &I : *BB) { 4298 VectorParts &Entry = WidenMap.get(&I); 4299 4300 switch (I.getOpcode()) { 4301 case Instruction::Br: 4302 // Nothing to do for PHIs and BR, since we already took care of the 4303 // loop control flow instructions. 4304 continue; 4305 case Instruction::PHI: { 4306 // Vectorize PHINodes. 4307 widenPHIInstruction(&I, Entry, UF, VF, PV); 4308 continue; 4309 } // End of PHI. 4310 4311 case Instruction::Add: 4312 case Instruction::FAdd: 4313 case Instruction::Sub: 4314 case Instruction::FSub: 4315 case Instruction::Mul: 4316 case Instruction::FMul: 4317 case Instruction::UDiv: 4318 case Instruction::SDiv: 4319 case Instruction::FDiv: 4320 case Instruction::URem: 4321 case Instruction::SRem: 4322 case Instruction::FRem: 4323 case Instruction::Shl: 4324 case Instruction::LShr: 4325 case Instruction::AShr: 4326 case Instruction::And: 4327 case Instruction::Or: 4328 case Instruction::Xor: { 4329 // Just widen binops. 4330 auto *BinOp = cast<BinaryOperator>(&I); 4331 setDebugLocFromInst(Builder, BinOp); 4332 VectorParts &A = getVectorValue(BinOp->getOperand(0)); 4333 VectorParts &B = getVectorValue(BinOp->getOperand(1)); 4334 4335 // Use this vector value for all users of the original instruction. 4336 for (unsigned Part = 0; Part < UF; ++Part) { 4337 Value *V = Builder.CreateBinOp(BinOp->getOpcode(), A[Part], B[Part]); 4338 4339 if (BinaryOperator *VecOp = dyn_cast<BinaryOperator>(V)) 4340 VecOp->copyIRFlags(BinOp); 4341 4342 Entry[Part] = V; 4343 } 4344 4345 addMetadata(Entry, BinOp); 4346 break; 4347 } 4348 case Instruction::Select: { 4349 // Widen selects. 4350 // If the selector is loop invariant we can create a select 4351 // instruction with a scalar condition. Otherwise, use vector-select. 4352 auto *SE = PSE.getSE(); 4353 bool InvariantCond = 4354 SE->isLoopInvariant(PSE.getSCEV(I.getOperand(0)), OrigLoop); 4355 setDebugLocFromInst(Builder, &I); 4356 4357 // The condition can be loop invariant but still defined inside the 4358 // loop. This means that we can't just use the original 'cond' value. 4359 // We have to take the 'vectorized' value and pick the first lane. 4360 // Instcombine will make this a no-op. 4361 VectorParts &Cond = getVectorValue(I.getOperand(0)); 4362 VectorParts &Op0 = getVectorValue(I.getOperand(1)); 4363 VectorParts &Op1 = getVectorValue(I.getOperand(2)); 4364 4365 Value *ScalarCond = 4366 (VF == 1) 4367 ? Cond[0] 4368 : Builder.CreateExtractElement(Cond[0], Builder.getInt32(0)); 4369 4370 for (unsigned Part = 0; Part < UF; ++Part) { 4371 Entry[Part] = Builder.CreateSelect( 4372 InvariantCond ? ScalarCond : Cond[Part], Op0[Part], Op1[Part]); 4373 } 4374 4375 addMetadata(Entry, &I); 4376 break; 4377 } 4378 4379 case Instruction::ICmp: 4380 case Instruction::FCmp: { 4381 // Widen compares. Generate vector compares. 4382 bool FCmp = (I.getOpcode() == Instruction::FCmp); 4383 auto *Cmp = dyn_cast<CmpInst>(&I); 4384 setDebugLocFromInst(Builder, Cmp); 4385 VectorParts &A = getVectorValue(Cmp->getOperand(0)); 4386 VectorParts &B = getVectorValue(Cmp->getOperand(1)); 4387 for (unsigned Part = 0; Part < UF; ++Part) { 4388 Value *C = nullptr; 4389 if (FCmp) { 4390 C = Builder.CreateFCmp(Cmp->getPredicate(), A[Part], B[Part]); 4391 cast<FCmpInst>(C)->copyFastMathFlags(Cmp); 4392 } else { 4393 C = Builder.CreateICmp(Cmp->getPredicate(), A[Part], B[Part]); 4394 } 4395 Entry[Part] = C; 4396 } 4397 4398 addMetadata(Entry, &I); 4399 break; 4400 } 4401 4402 case Instruction::Store: 4403 case Instruction::Load: 4404 vectorizeMemoryInstruction(&I); 4405 break; 4406 case Instruction::ZExt: 4407 case Instruction::SExt: 4408 case Instruction::FPToUI: 4409 case Instruction::FPToSI: 4410 case Instruction::FPExt: 4411 case Instruction::PtrToInt: 4412 case Instruction::IntToPtr: 4413 case Instruction::SIToFP: 4414 case Instruction::UIToFP: 4415 case Instruction::Trunc: 4416 case Instruction::FPTrunc: 4417 case Instruction::BitCast: { 4418 auto *CI = dyn_cast<CastInst>(&I); 4419 setDebugLocFromInst(Builder, CI); 4420 4421 // Optimize the special case where the source is a constant integer 4422 // induction variable. Notice that we can only optimize the 'trunc' case 4423 // because (a) FP conversions lose precision, (b) sext/zext may wrap, and 4424 // (c) other casts depend on pointer size. 4425 auto ID = Legal->getInductionVars()->lookup(OldInduction); 4426 if (isa<TruncInst>(CI) && CI->getOperand(0) == OldInduction && 4427 ID.getConstIntStepValue()) { 4428 widenIntInduction(OldInduction, Entry, cast<TruncInst>(CI)); 4429 addMetadata(Entry, &I); 4430 break; 4431 } 4432 4433 /// Vectorize casts. 4434 Type *DestTy = 4435 (VF == 1) ? CI->getType() : VectorType::get(CI->getType(), VF); 4436 4437 VectorParts &A = getVectorValue(CI->getOperand(0)); 4438 for (unsigned Part = 0; Part < UF; ++Part) 4439 Entry[Part] = Builder.CreateCast(CI->getOpcode(), A[Part], DestTy); 4440 addMetadata(Entry, &I); 4441 break; 4442 } 4443 4444 case Instruction::Call: { 4445 // Ignore dbg intrinsics. 4446 if (isa<DbgInfoIntrinsic>(I)) 4447 break; 4448 setDebugLocFromInst(Builder, &I); 4449 4450 Module *M = BB->getParent()->getParent(); 4451 auto *CI = cast<CallInst>(&I); 4452 4453 StringRef FnName = CI->getCalledFunction()->getName(); 4454 Function *F = CI->getCalledFunction(); 4455 Type *RetTy = ToVectorTy(CI->getType(), VF); 4456 SmallVector<Type *, 4> Tys; 4457 for (Value *ArgOperand : CI->arg_operands()) 4458 Tys.push_back(ToVectorTy(ArgOperand->getType(), VF)); 4459 4460 Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI); 4461 if (ID && (ID == Intrinsic::assume || ID == Intrinsic::lifetime_end || 4462 ID == Intrinsic::lifetime_start)) { 4463 scalarizeInstruction(&I); 4464 break; 4465 } 4466 // The flag shows whether we use Intrinsic or a usual Call for vectorized 4467 // version of the instruction. 4468 // Is it beneficial to perform intrinsic call compared to lib call? 4469 bool NeedToScalarize; 4470 unsigned CallCost = getVectorCallCost(CI, VF, *TTI, TLI, NeedToScalarize); 4471 bool UseVectorIntrinsic = 4472 ID && getVectorIntrinsicCost(CI, VF, *TTI, TLI) <= CallCost; 4473 if (!UseVectorIntrinsic && NeedToScalarize) { 4474 scalarizeInstruction(&I); 4475 break; 4476 } 4477 4478 for (unsigned Part = 0; Part < UF; ++Part) { 4479 SmallVector<Value *, 4> Args; 4480 for (unsigned i = 0, ie = CI->getNumArgOperands(); i != ie; ++i) { 4481 Value *Arg = CI->getArgOperand(i); 4482 // Some intrinsics have a scalar argument - don't replace it with a 4483 // vector. 4484 if (!UseVectorIntrinsic || !hasVectorInstrinsicScalarOpd(ID, i)) { 4485 VectorParts &VectorArg = getVectorValue(CI->getArgOperand(i)); 4486 Arg = VectorArg[Part]; 4487 } 4488 Args.push_back(Arg); 4489 } 4490 4491 Function *VectorF; 4492 if (UseVectorIntrinsic) { 4493 // Use vector version of the intrinsic. 4494 Type *TysForDecl[] = {CI->getType()}; 4495 if (VF > 1) 4496 TysForDecl[0] = VectorType::get(CI->getType()->getScalarType(), VF); 4497 VectorF = Intrinsic::getDeclaration(M, ID, TysForDecl); 4498 } else { 4499 // Use vector version of the library call. 4500 StringRef VFnName = TLI->getVectorizedFunction(FnName, VF); 4501 assert(!VFnName.empty() && "Vector function name is empty."); 4502 VectorF = M->getFunction(VFnName); 4503 if (!VectorF) { 4504 // Generate a declaration 4505 FunctionType *FTy = FunctionType::get(RetTy, Tys, false); 4506 VectorF = 4507 Function::Create(FTy, Function::ExternalLinkage, VFnName, M); 4508 VectorF->copyAttributesFrom(F); 4509 } 4510 } 4511 assert(VectorF && "Can't create vector function."); 4512 4513 SmallVector<OperandBundleDef, 1> OpBundles; 4514 CI->getOperandBundlesAsDefs(OpBundles); 4515 CallInst *V = Builder.CreateCall(VectorF, Args, OpBundles); 4516 4517 if (isa<FPMathOperator>(V)) 4518 V->copyFastMathFlags(CI); 4519 4520 Entry[Part] = V; 4521 } 4522 4523 addMetadata(Entry, &I); 4524 break; 4525 } 4526 4527 default: 4528 // All other instructions are unsupported. Scalarize them. 4529 scalarizeInstruction(&I); 4530 break; 4531 } // end of switch. 4532 } // end of for_each instr. 4533 } 4534 4535 void InnerLoopVectorizer::updateAnalysis() { 4536 // Forget the original basic block. 4537 PSE.getSE()->forgetLoop(OrigLoop); 4538 4539 // Update the dominator tree information. 4540 assert(DT->properlyDominates(LoopBypassBlocks.front(), LoopExitBlock) && 4541 "Entry does not dominate exit."); 4542 4543 // We don't predicate stores by this point, so the vector body should be a 4544 // single loop. 4545 DT->addNewBlock(LoopVectorBody, LoopVectorPreHeader); 4546 4547 DT->addNewBlock(LoopMiddleBlock, LoopVectorBody); 4548 DT->addNewBlock(LoopScalarPreHeader, LoopBypassBlocks[0]); 4549 DT->changeImmediateDominator(LoopScalarBody, LoopScalarPreHeader); 4550 DT->changeImmediateDominator(LoopExitBlock, LoopBypassBlocks[0]); 4551 4552 DEBUG(DT->verifyDomTree()); 4553 } 4554 4555 /// \brief Check whether it is safe to if-convert this phi node. 4556 /// 4557 /// Phi nodes with constant expressions that can trap are not safe to if 4558 /// convert. 4559 static bool canIfConvertPHINodes(BasicBlock *BB) { 4560 for (Instruction &I : *BB) { 4561 auto *Phi = dyn_cast<PHINode>(&I); 4562 if (!Phi) 4563 return true; 4564 for (Value *V : Phi->incoming_values()) 4565 if (auto *C = dyn_cast<Constant>(V)) 4566 if (C->canTrap()) 4567 return false; 4568 } 4569 return true; 4570 } 4571 4572 bool LoopVectorizationLegality::canVectorizeWithIfConvert() { 4573 if (!EnableIfConversion) { 4574 emitAnalysis(VectorizationReport() << "if-conversion is disabled"); 4575 return false; 4576 } 4577 4578 assert(TheLoop->getNumBlocks() > 1 && "Single block loops are vectorizable"); 4579 4580 // A list of pointers that we can safely read and write to. 4581 SmallPtrSet<Value *, 8> SafePointes; 4582 4583 // Collect safe addresses. 4584 for (BasicBlock *BB : TheLoop->blocks()) { 4585 if (blockNeedsPredication(BB)) 4586 continue; 4587 4588 for (Instruction &I : *BB) 4589 if (auto *Ptr = getPointerOperand(&I)) 4590 SafePointes.insert(Ptr); 4591 } 4592 4593 // Collect the blocks that need predication. 4594 BasicBlock *Header = TheLoop->getHeader(); 4595 for (BasicBlock *BB : TheLoop->blocks()) { 4596 // We don't support switch statements inside loops. 4597 if (!isa<BranchInst>(BB->getTerminator())) { 4598 emitAnalysis(VectorizationReport(BB->getTerminator()) 4599 << "loop contains a switch statement"); 4600 return false; 4601 } 4602 4603 // We must be able to predicate all blocks that need to be predicated. 4604 if (blockNeedsPredication(BB)) { 4605 if (!blockCanBePredicated(BB, SafePointes)) { 4606 emitAnalysis(VectorizationReport(BB->getTerminator()) 4607 << "control flow cannot be substituted for a select"); 4608 return false; 4609 } 4610 } else if (BB != Header && !canIfConvertPHINodes(BB)) { 4611 emitAnalysis(VectorizationReport(BB->getTerminator()) 4612 << "control flow cannot be substituted for a select"); 4613 return false; 4614 } 4615 } 4616 4617 // We can if-convert this loop. 4618 return true; 4619 } 4620 4621 bool LoopVectorizationLegality::canVectorize() { 4622 // We must have a loop in canonical form. Loops with indirectbr in them cannot 4623 // be canonicalized. 4624 if (!TheLoop->getLoopPreheader()) { 4625 emitAnalysis(VectorizationReport() 4626 << "loop control flow is not understood by vectorizer"); 4627 return false; 4628 } 4629 4630 // FIXME: The code is currently dead, since the loop gets sent to 4631 // LoopVectorizationLegality is already an innermost loop. 4632 // 4633 // We can only vectorize innermost loops. 4634 if (!TheLoop->empty()) { 4635 emitAnalysis(VectorizationReport() << "loop is not the innermost loop"); 4636 return false; 4637 } 4638 4639 // We must have a single backedge. 4640 if (TheLoop->getNumBackEdges() != 1) { 4641 emitAnalysis(VectorizationReport() 4642 << "loop control flow is not understood by vectorizer"); 4643 return false; 4644 } 4645 4646 // We must have a single exiting block. 4647 if (!TheLoop->getExitingBlock()) { 4648 emitAnalysis(VectorizationReport() 4649 << "loop control flow is not understood by vectorizer"); 4650 return false; 4651 } 4652 4653 // We only handle bottom-tested loops, i.e. loop in which the condition is 4654 // checked at the end of each iteration. With that we can assume that all 4655 // instructions in the loop are executed the same number of times. 4656 if (TheLoop->getExitingBlock() != TheLoop->getLoopLatch()) { 4657 emitAnalysis(VectorizationReport() 4658 << "loop control flow is not understood by vectorizer"); 4659 return false; 4660 } 4661 4662 // We need to have a loop header. 4663 DEBUG(dbgs() << "LV: Found a loop: " << TheLoop->getHeader()->getName() 4664 << '\n'); 4665 4666 // Check if we can if-convert non-single-bb loops. 4667 unsigned NumBlocks = TheLoop->getNumBlocks(); 4668 if (NumBlocks != 1 && !canVectorizeWithIfConvert()) { 4669 DEBUG(dbgs() << "LV: Can't if-convert the loop.\n"); 4670 return false; 4671 } 4672 4673 // ScalarEvolution needs to be able to find the exit count. 4674 const SCEV *ExitCount = PSE.getBackedgeTakenCount(); 4675 if (ExitCount == PSE.getSE()->getCouldNotCompute()) { 4676 emitAnalysis(VectorizationReport() 4677 << "could not determine number of loop iterations"); 4678 DEBUG(dbgs() << "LV: SCEV could not compute the loop exit count.\n"); 4679 return false; 4680 } 4681 4682 // Check if we can vectorize the instructions and CFG in this loop. 4683 if (!canVectorizeInstrs()) { 4684 DEBUG(dbgs() << "LV: Can't vectorize the instructions or CFG\n"); 4685 return false; 4686 } 4687 4688 // Go over each instruction and look at memory deps. 4689 if (!canVectorizeMemory()) { 4690 DEBUG(dbgs() << "LV: Can't vectorize due to memory conflicts\n"); 4691 return false; 4692 } 4693 4694 DEBUG(dbgs() << "LV: We can vectorize this loop" 4695 << (LAI->getRuntimePointerChecking()->Need 4696 ? " (with a runtime bound check)" 4697 : "") 4698 << "!\n"); 4699 4700 bool UseInterleaved = TTI->enableInterleavedAccessVectorization(); 4701 4702 // If an override option has been passed in for interleaved accesses, use it. 4703 if (EnableInterleavedMemAccesses.getNumOccurrences() > 0) 4704 UseInterleaved = EnableInterleavedMemAccesses; 4705 4706 // Analyze interleaved memory accesses. 4707 if (UseInterleaved) 4708 InterleaveInfo.analyzeInterleaving(*getSymbolicStrides()); 4709 4710 // Collect all instructions that are known to be uniform after vectorization. 4711 collectLoopUniforms(); 4712 4713 // Collect all instructions that are known to be scalar after vectorization. 4714 collectLoopScalars(); 4715 4716 unsigned SCEVThreshold = VectorizeSCEVCheckThreshold; 4717 if (Hints->getForce() == LoopVectorizeHints::FK_Enabled) 4718 SCEVThreshold = PragmaVectorizeSCEVCheckThreshold; 4719 4720 if (PSE.getUnionPredicate().getComplexity() > SCEVThreshold) { 4721 emitAnalysis(VectorizationReport() 4722 << "Too many SCEV assumptions need to be made and checked " 4723 << "at runtime"); 4724 DEBUG(dbgs() << "LV: Too many SCEV checks needed.\n"); 4725 return false; 4726 } 4727 4728 // Okay! We can vectorize. At this point we don't have any other mem analysis 4729 // which may limit our maximum vectorization factor, so just return true with 4730 // no restrictions. 4731 return true; 4732 } 4733 4734 static Type *convertPointerToIntegerType(const DataLayout &DL, Type *Ty) { 4735 if (Ty->isPointerTy()) 4736 return DL.getIntPtrType(Ty); 4737 4738 // It is possible that char's or short's overflow when we ask for the loop's 4739 // trip count, work around this by changing the type size. 4740 if (Ty->getScalarSizeInBits() < 32) 4741 return Type::getInt32Ty(Ty->getContext()); 4742 4743 return Ty; 4744 } 4745 4746 static Type *getWiderType(const DataLayout &DL, Type *Ty0, Type *Ty1) { 4747 Ty0 = convertPointerToIntegerType(DL, Ty0); 4748 Ty1 = convertPointerToIntegerType(DL, Ty1); 4749 if (Ty0->getScalarSizeInBits() > Ty1->getScalarSizeInBits()) 4750 return Ty0; 4751 return Ty1; 4752 } 4753 4754 /// \brief Check that the instruction has outside loop users and is not an 4755 /// identified reduction variable. 4756 static bool hasOutsideLoopUser(const Loop *TheLoop, Instruction *Inst, 4757 SmallPtrSetImpl<Value *> &AllowedExit) { 4758 // Reduction and Induction instructions are allowed to have exit users. All 4759 // other instructions must not have external users. 4760 if (!AllowedExit.count(Inst)) 4761 // Check that all of the users of the loop are inside the BB. 4762 for (User *U : Inst->users()) { 4763 Instruction *UI = cast<Instruction>(U); 4764 // This user may be a reduction exit value. 4765 if (!TheLoop->contains(UI)) { 4766 DEBUG(dbgs() << "LV: Found an outside user for : " << *UI << '\n'); 4767 return true; 4768 } 4769 } 4770 return false; 4771 } 4772 4773 void LoopVectorizationLegality::addInductionPhi( 4774 PHINode *Phi, const InductionDescriptor &ID, 4775 SmallPtrSetImpl<Value *> &AllowedExit) { 4776 Inductions[Phi] = ID; 4777 Type *PhiTy = Phi->getType(); 4778 const DataLayout &DL = Phi->getModule()->getDataLayout(); 4779 4780 // Get the widest type. 4781 if (!PhiTy->isFloatingPointTy()) { 4782 if (!WidestIndTy) 4783 WidestIndTy = convertPointerToIntegerType(DL, PhiTy); 4784 else 4785 WidestIndTy = getWiderType(DL, PhiTy, WidestIndTy); 4786 } 4787 4788 // Int inductions are special because we only allow one IV. 4789 if (ID.getKind() == InductionDescriptor::IK_IntInduction && 4790 ID.getConstIntStepValue() && 4791 ID.getConstIntStepValue()->isOne() && 4792 isa<Constant>(ID.getStartValue()) && 4793 cast<Constant>(ID.getStartValue())->isNullValue()) { 4794 4795 // Use the phi node with the widest type as induction. Use the last 4796 // one if there are multiple (no good reason for doing this other 4797 // than it is expedient). We've checked that it begins at zero and 4798 // steps by one, so this is a canonical induction variable. 4799 if (!Induction || PhiTy == WidestIndTy) 4800 Induction = Phi; 4801 } 4802 4803 // Both the PHI node itself, and the "post-increment" value feeding 4804 // back into the PHI node may have external users. 4805 AllowedExit.insert(Phi); 4806 AllowedExit.insert(Phi->getIncomingValueForBlock(TheLoop->getLoopLatch())); 4807 4808 DEBUG(dbgs() << "LV: Found an induction variable.\n"); 4809 return; 4810 } 4811 4812 bool LoopVectorizationLegality::canVectorizeInstrs() { 4813 BasicBlock *Header = TheLoop->getHeader(); 4814 4815 // Look for the attribute signaling the absence of NaNs. 4816 Function &F = *Header->getParent(); 4817 HasFunNoNaNAttr = 4818 F.getFnAttribute("no-nans-fp-math").getValueAsString() == "true"; 4819 4820 // For each block in the loop. 4821 for (BasicBlock *BB : TheLoop->blocks()) { 4822 // Scan the instructions in the block and look for hazards. 4823 for (Instruction &I : *BB) { 4824 if (auto *Phi = dyn_cast<PHINode>(&I)) { 4825 Type *PhiTy = Phi->getType(); 4826 // Check that this PHI type is allowed. 4827 if (!PhiTy->isIntegerTy() && !PhiTy->isFloatingPointTy() && 4828 !PhiTy->isPointerTy()) { 4829 emitAnalysis(VectorizationReport(Phi) 4830 << "loop control flow is not understood by vectorizer"); 4831 DEBUG(dbgs() << "LV: Found an non-int non-pointer PHI.\n"); 4832 return false; 4833 } 4834 4835 // If this PHINode is not in the header block, then we know that we 4836 // can convert it to select during if-conversion. No need to check if 4837 // the PHIs in this block are induction or reduction variables. 4838 if (BB != Header) { 4839 // Check that this instruction has no outside users or is an 4840 // identified reduction value with an outside user. 4841 if (!hasOutsideLoopUser(TheLoop, Phi, AllowedExit)) 4842 continue; 4843 emitAnalysis(VectorizationReport(Phi) 4844 << "value could not be identified as " 4845 "an induction or reduction variable"); 4846 return false; 4847 } 4848 4849 // We only allow if-converted PHIs with exactly two incoming values. 4850 if (Phi->getNumIncomingValues() != 2) { 4851 emitAnalysis(VectorizationReport(Phi) 4852 << "control flow not understood by vectorizer"); 4853 DEBUG(dbgs() << "LV: Found an invalid PHI.\n"); 4854 return false; 4855 } 4856 4857 RecurrenceDescriptor RedDes; 4858 if (RecurrenceDescriptor::isReductionPHI(Phi, TheLoop, RedDes)) { 4859 if (RedDes.hasUnsafeAlgebra()) 4860 Requirements->addUnsafeAlgebraInst(RedDes.getUnsafeAlgebraInst()); 4861 AllowedExit.insert(RedDes.getLoopExitInstr()); 4862 Reductions[Phi] = RedDes; 4863 continue; 4864 } 4865 4866 InductionDescriptor ID; 4867 if (InductionDescriptor::isInductionPHI(Phi, TheLoop, PSE, ID)) { 4868 addInductionPhi(Phi, ID, AllowedExit); 4869 if (ID.hasUnsafeAlgebra() && !HasFunNoNaNAttr) 4870 Requirements->addUnsafeAlgebraInst(ID.getUnsafeAlgebraInst()); 4871 continue; 4872 } 4873 4874 if (RecurrenceDescriptor::isFirstOrderRecurrence(Phi, TheLoop, DT)) { 4875 FirstOrderRecurrences.insert(Phi); 4876 continue; 4877 } 4878 4879 // As a last resort, coerce the PHI to a AddRec expression 4880 // and re-try classifying it a an induction PHI. 4881 if (InductionDescriptor::isInductionPHI(Phi, TheLoop, PSE, ID, true)) { 4882 addInductionPhi(Phi, ID, AllowedExit); 4883 continue; 4884 } 4885 4886 emitAnalysis(VectorizationReport(Phi) 4887 << "value that could not be identified as " 4888 "reduction is used outside the loop"); 4889 DEBUG(dbgs() << "LV: Found an unidentified PHI." << *Phi << "\n"); 4890 return false; 4891 } // end of PHI handling 4892 4893 // We handle calls that: 4894 // * Are debug info intrinsics. 4895 // * Have a mapping to an IR intrinsic. 4896 // * Have a vector version available. 4897 auto *CI = dyn_cast<CallInst>(&I); 4898 if (CI && !getVectorIntrinsicIDForCall(CI, TLI) && 4899 !isa<DbgInfoIntrinsic>(CI) && 4900 !(CI->getCalledFunction() && TLI && 4901 TLI->isFunctionVectorizable(CI->getCalledFunction()->getName()))) { 4902 emitAnalysis(VectorizationReport(CI) 4903 << "call instruction cannot be vectorized"); 4904 DEBUG(dbgs() << "LV: Found a non-intrinsic, non-libfunc callsite.\n"); 4905 return false; 4906 } 4907 4908 // Intrinsics such as powi,cttz and ctlz are legal to vectorize if the 4909 // second argument is the same (i.e. loop invariant) 4910 if (CI && hasVectorInstrinsicScalarOpd( 4911 getVectorIntrinsicIDForCall(CI, TLI), 1)) { 4912 auto *SE = PSE.getSE(); 4913 if (!SE->isLoopInvariant(PSE.getSCEV(CI->getOperand(1)), TheLoop)) { 4914 emitAnalysis(VectorizationReport(CI) 4915 << "intrinsic instruction cannot be vectorized"); 4916 DEBUG(dbgs() << "LV: Found unvectorizable intrinsic " << *CI << "\n"); 4917 return false; 4918 } 4919 } 4920 4921 // Check that the instruction return type is vectorizable. 4922 // Also, we can't vectorize extractelement instructions. 4923 if ((!VectorType::isValidElementType(I.getType()) && 4924 !I.getType()->isVoidTy()) || 4925 isa<ExtractElementInst>(I)) { 4926 emitAnalysis(VectorizationReport(&I) 4927 << "instruction return type cannot be vectorized"); 4928 DEBUG(dbgs() << "LV: Found unvectorizable type.\n"); 4929 return false; 4930 } 4931 4932 // Check that the stored type is vectorizable. 4933 if (auto *ST = dyn_cast<StoreInst>(&I)) { 4934 Type *T = ST->getValueOperand()->getType(); 4935 if (!VectorType::isValidElementType(T)) { 4936 emitAnalysis(VectorizationReport(ST) 4937 << "store instruction cannot be vectorized"); 4938 return false; 4939 } 4940 4941 // FP instructions can allow unsafe algebra, thus vectorizable by 4942 // non-IEEE-754 compliant SIMD units. 4943 // This applies to floating-point math operations and calls, not memory 4944 // operations, shuffles, or casts, as they don't change precision or 4945 // semantics. 4946 } else if (I.getType()->isFloatingPointTy() && (CI || I.isBinaryOp()) && 4947 !I.hasUnsafeAlgebra()) { 4948 DEBUG(dbgs() << "LV: Found FP op with unsafe algebra.\n"); 4949 Hints->setPotentiallyUnsafe(); 4950 } 4951 4952 // Reduction instructions are allowed to have exit users. 4953 // All other instructions must not have external users. 4954 if (hasOutsideLoopUser(TheLoop, &I, AllowedExit)) { 4955 emitAnalysis(VectorizationReport(&I) 4956 << "value cannot be used outside the loop"); 4957 return false; 4958 } 4959 4960 } // next instr. 4961 } 4962 4963 if (!Induction) { 4964 DEBUG(dbgs() << "LV: Did not find one integer induction var.\n"); 4965 if (Inductions.empty()) { 4966 emitAnalysis(VectorizationReport() 4967 << "loop induction variable could not be identified"); 4968 return false; 4969 } 4970 } 4971 4972 // Now we know the widest induction type, check if our found induction 4973 // is the same size. If it's not, unset it here and InnerLoopVectorizer 4974 // will create another. 4975 if (Induction && WidestIndTy != Induction->getType()) 4976 Induction = nullptr; 4977 4978 return true; 4979 } 4980 4981 void LoopVectorizationLegality::collectLoopScalars() { 4982 4983 // If an instruction is uniform after vectorization, it will remain scalar. 4984 Scalars.insert(Uniforms.begin(), Uniforms.end()); 4985 4986 // Collect the getelementptr instructions that will not be vectorized. A 4987 // getelementptr instruction is only vectorized if it is used for a legal 4988 // gather or scatter operation. 4989 for (auto *BB : TheLoop->blocks()) 4990 for (auto &I : *BB) { 4991 if (auto *GEP = dyn_cast<GetElementPtrInst>(&I)) { 4992 Scalars.insert(GEP); 4993 continue; 4994 } 4995 auto *Ptr = getPointerOperand(&I); 4996 if (!Ptr) 4997 continue; 4998 auto *GEP = getGEPInstruction(Ptr); 4999 if (GEP && isLegalGatherOrScatter(&I)) 5000 Scalars.erase(GEP); 5001 } 5002 5003 // An induction variable will remain scalar if all users of the induction 5004 // variable and induction variable update remain scalar. 5005 auto *Latch = TheLoop->getLoopLatch(); 5006 for (auto &Induction : *getInductionVars()) { 5007 auto *Ind = Induction.first; 5008 auto *IndUpdate = cast<Instruction>(Ind->getIncomingValueForBlock(Latch)); 5009 5010 // Determine if all users of the induction variable are scalar after 5011 // vectorization. 5012 auto ScalarInd = all_of(Ind->users(), [&](User *U) -> bool { 5013 auto *I = cast<Instruction>(U); 5014 return I == IndUpdate || !TheLoop->contains(I) || Scalars.count(I); 5015 }); 5016 if (!ScalarInd) 5017 continue; 5018 5019 // Determine if all users of the induction variable update instruction are 5020 // scalar after vectorization. 5021 auto ScalarIndUpdate = all_of(IndUpdate->users(), [&](User *U) -> bool { 5022 auto *I = cast<Instruction>(U); 5023 return I == Ind || !TheLoop->contains(I) || Scalars.count(I); 5024 }); 5025 if (!ScalarIndUpdate) 5026 continue; 5027 5028 // The induction variable and its update instruction will remain scalar. 5029 Scalars.insert(Ind); 5030 Scalars.insert(IndUpdate); 5031 } 5032 } 5033 5034 void LoopVectorizationLegality::collectLoopUniforms() { 5035 // We now know that the loop is vectorizable! 5036 // Collect instructions inside the loop that will remain uniform after 5037 // vectorization. 5038 5039 // Global values, params and instructions outside of current loop are out of 5040 // scope. 5041 auto isOutOfScope = [&](Value *V) -> bool { 5042 Instruction *I = dyn_cast<Instruction>(V); 5043 return (!I || !TheLoop->contains(I)); 5044 }; 5045 5046 SetVector<Instruction *> Worklist; 5047 BasicBlock *Latch = TheLoop->getLoopLatch(); 5048 // Start with the conditional branch. 5049 if (!isOutOfScope(Latch->getTerminator()->getOperand(0))) { 5050 Instruction *Cmp = cast<Instruction>(Latch->getTerminator()->getOperand(0)); 5051 Worklist.insert(Cmp); 5052 DEBUG(dbgs() << "LV: Found uniform instruction: " << *Cmp << "\n"); 5053 } 5054 5055 // Add all consecutive pointer values; these values will be uniform after 5056 // vectorization (and subsequent cleanup). Although non-consecutive, we also 5057 // add the pointer operands of interleaved accesses since they are treated 5058 // like consecutive pointers during vectorization. 5059 for (auto *BB : TheLoop->blocks()) 5060 for (auto &I : *BB) { 5061 Instruction *Ptr = nullptr; 5062 if (I.getType()->isPointerTy() && isConsecutivePtr(&I)) 5063 Ptr = &I; 5064 else if (isAccessInterleaved(&I)) 5065 Ptr = cast<Instruction>(getPointerOperand(&I)); 5066 else 5067 continue; 5068 Worklist.insert(Ptr); 5069 DEBUG(dbgs() << "LV: Found uniform instruction: " << *Ptr << "\n"); 5070 } 5071 5072 // Expand Worklist in topological order: whenever a new instruction 5073 // is added , its users should be either already inside Worklist, or 5074 // out of scope. It ensures a uniform instruction will only be used 5075 // by uniform instructions or out of scope instructions. 5076 unsigned idx = 0; 5077 while (idx != Worklist.size()) { 5078 Instruction *I = Worklist[idx++]; 5079 5080 for (auto OV : I->operand_values()) { 5081 if (isOutOfScope(OV)) 5082 continue; 5083 auto *OI = cast<Instruction>(OV); 5084 if (all_of(OI->users(), [&](User *U) -> bool { 5085 return isOutOfScope(U) || Worklist.count(cast<Instruction>(U)); 5086 })) { 5087 Worklist.insert(OI); 5088 DEBUG(dbgs() << "LV: Found uniform instruction: " << *OI << "\n"); 5089 } 5090 } 5091 } 5092 5093 // For an instruction to be added into Worklist above, all its users inside 5094 // the current loop should be already added into Worklist. This condition 5095 // cannot be true for phi instructions which is always in a dependence loop. 5096 // Because any instruction in the dependence cycle always depends on others 5097 // in the cycle to be added into Worklist first, the result is no ones in 5098 // the cycle will be added into Worklist in the end. 5099 // That is why we process PHI separately. 5100 for (auto &Induction : *getInductionVars()) { 5101 auto *PN = Induction.first; 5102 auto *UpdateV = PN->getIncomingValueForBlock(TheLoop->getLoopLatch()); 5103 if (all_of(PN->users(), 5104 [&](User *U) -> bool { 5105 return U == UpdateV || isOutOfScope(U) || 5106 Worklist.count(cast<Instruction>(U)); 5107 }) && 5108 all_of(UpdateV->users(), [&](User *U) -> bool { 5109 return U == PN || isOutOfScope(U) || 5110 Worklist.count(cast<Instruction>(U)); 5111 })) { 5112 Worklist.insert(cast<Instruction>(PN)); 5113 Worklist.insert(cast<Instruction>(UpdateV)); 5114 DEBUG(dbgs() << "LV: Found uniform instruction: " << *PN << "\n"); 5115 DEBUG(dbgs() << "LV: Found uniform instruction: " << *UpdateV << "\n"); 5116 } 5117 } 5118 5119 Uniforms.insert(Worklist.begin(), Worklist.end()); 5120 } 5121 5122 bool LoopVectorizationLegality::canVectorizeMemory() { 5123 LAI = &(*GetLAA)(*TheLoop); 5124 InterleaveInfo.setLAI(LAI); 5125 auto &OptionalReport = LAI->getReport(); 5126 if (OptionalReport) 5127 emitAnalysis(VectorizationReport(*OptionalReport)); 5128 if (!LAI->canVectorizeMemory()) 5129 return false; 5130 5131 if (LAI->hasStoreToLoopInvariantAddress()) { 5132 emitAnalysis( 5133 VectorizationReport() 5134 << "write to a loop invariant address could not be vectorized"); 5135 DEBUG(dbgs() << "LV: We don't allow storing to uniform addresses\n"); 5136 return false; 5137 } 5138 5139 Requirements->addRuntimePointerChecks(LAI->getNumRuntimePointerChecks()); 5140 PSE.addPredicate(LAI->getPSE().getUnionPredicate()); 5141 5142 return true; 5143 } 5144 5145 bool LoopVectorizationLegality::isInductionVariable(const Value *V) { 5146 Value *In0 = const_cast<Value *>(V); 5147 PHINode *PN = dyn_cast_or_null<PHINode>(In0); 5148 if (!PN) 5149 return false; 5150 5151 return Inductions.count(PN); 5152 } 5153 5154 bool LoopVectorizationLegality::isFirstOrderRecurrence(const PHINode *Phi) { 5155 return FirstOrderRecurrences.count(Phi); 5156 } 5157 5158 bool LoopVectorizationLegality::blockNeedsPredication(BasicBlock *BB) { 5159 return LoopAccessInfo::blockNeedsPredication(BB, TheLoop, DT); 5160 } 5161 5162 bool LoopVectorizationLegality::blockCanBePredicated( 5163 BasicBlock *BB, SmallPtrSetImpl<Value *> &SafePtrs) { 5164 const bool IsAnnotatedParallel = TheLoop->isAnnotatedParallel(); 5165 5166 for (Instruction &I : *BB) { 5167 // Check that we don't have a constant expression that can trap as operand. 5168 for (Value *Operand : I.operands()) { 5169 if (auto *C = dyn_cast<Constant>(Operand)) 5170 if (C->canTrap()) 5171 return false; 5172 } 5173 // We might be able to hoist the load. 5174 if (I.mayReadFromMemory()) { 5175 auto *LI = dyn_cast<LoadInst>(&I); 5176 if (!LI) 5177 return false; 5178 if (!SafePtrs.count(LI->getPointerOperand())) { 5179 if (isLegalMaskedLoad(LI->getType(), LI->getPointerOperand()) || 5180 isLegalMaskedGather(LI->getType())) { 5181 MaskedOp.insert(LI); 5182 continue; 5183 } 5184 // !llvm.mem.parallel_loop_access implies if-conversion safety. 5185 if (IsAnnotatedParallel) 5186 continue; 5187 return false; 5188 } 5189 } 5190 5191 if (I.mayWriteToMemory()) { 5192 auto *SI = dyn_cast<StoreInst>(&I); 5193 // We only support predication of stores in basic blocks with one 5194 // predecessor. 5195 if (!SI) 5196 return false; 5197 5198 // Build a masked store if it is legal for the target. 5199 if (isLegalMaskedStore(SI->getValueOperand()->getType(), 5200 SI->getPointerOperand()) || 5201 isLegalMaskedScatter(SI->getValueOperand()->getType())) { 5202 MaskedOp.insert(SI); 5203 continue; 5204 } 5205 5206 bool isSafePtr = (SafePtrs.count(SI->getPointerOperand()) != 0); 5207 bool isSinglePredecessor = SI->getParent()->getSinglePredecessor(); 5208 5209 if (++NumPredStores > NumberOfStoresToPredicate || !isSafePtr || 5210 !isSinglePredecessor) 5211 return false; 5212 } 5213 if (I.mayThrow()) 5214 return false; 5215 5216 // The instructions below can trap. 5217 switch (I.getOpcode()) { 5218 default: 5219 continue; 5220 case Instruction::UDiv: 5221 case Instruction::SDiv: 5222 case Instruction::URem: 5223 case Instruction::SRem: 5224 return false; 5225 } 5226 } 5227 5228 return true; 5229 } 5230 5231 void InterleavedAccessInfo::collectConstStrideAccesses( 5232 MapVector<Instruction *, StrideDescriptor> &AccessStrideInfo, 5233 const ValueToValueMap &Strides) { 5234 5235 auto &DL = TheLoop->getHeader()->getModule()->getDataLayout(); 5236 5237 // Since it's desired that the load/store instructions be maintained in 5238 // "program order" for the interleaved access analysis, we have to visit the 5239 // blocks in the loop in reverse postorder (i.e., in a topological order). 5240 // Such an ordering will ensure that any load/store that may be executed 5241 // before a second load/store will precede the second load/store in 5242 // AccessStrideInfo. 5243 LoopBlocksDFS DFS(TheLoop); 5244 DFS.perform(LI); 5245 for (BasicBlock *BB : make_range(DFS.beginRPO(), DFS.endRPO())) 5246 for (auto &I : *BB) { 5247 auto *LI = dyn_cast<LoadInst>(&I); 5248 auto *SI = dyn_cast<StoreInst>(&I); 5249 if (!LI && !SI) 5250 continue; 5251 5252 Value *Ptr = getPointerOperand(&I); 5253 int64_t Stride = getPtrStride(PSE, Ptr, TheLoop, Strides); 5254 5255 const SCEV *Scev = replaceSymbolicStrideSCEV(PSE, Strides, Ptr); 5256 PointerType *PtrTy = dyn_cast<PointerType>(Ptr->getType()); 5257 uint64_t Size = DL.getTypeAllocSize(PtrTy->getElementType()); 5258 5259 // An alignment of 0 means target ABI alignment. 5260 unsigned Align = LI ? LI->getAlignment() : SI->getAlignment(); 5261 if (!Align) 5262 Align = DL.getABITypeAlignment(PtrTy->getElementType()); 5263 5264 AccessStrideInfo[&I] = StrideDescriptor(Stride, Scev, Size, Align); 5265 } 5266 } 5267 5268 // Analyze interleaved accesses and collect them into interleaved load and 5269 // store groups. 5270 // 5271 // When generating code for an interleaved load group, we effectively hoist all 5272 // loads in the group to the location of the first load in program order. When 5273 // generating code for an interleaved store group, we sink all stores to the 5274 // location of the last store. This code motion can change the order of load 5275 // and store instructions and may break dependences. 5276 // 5277 // The code generation strategy mentioned above ensures that we won't violate 5278 // any write-after-read (WAR) dependences. 5279 // 5280 // E.g., for the WAR dependence: a = A[i]; // (1) 5281 // A[i] = b; // (2) 5282 // 5283 // The store group of (2) is always inserted at or below (2), and the load 5284 // group of (1) is always inserted at or above (1). Thus, the instructions will 5285 // never be reordered. All other dependences are checked to ensure the 5286 // correctness of the instruction reordering. 5287 // 5288 // The algorithm visits all memory accesses in the loop in bottom-up program 5289 // order. Program order is established by traversing the blocks in the loop in 5290 // reverse postorder when collecting the accesses. 5291 // 5292 // We visit the memory accesses in bottom-up order because it can simplify the 5293 // construction of store groups in the presence of write-after-write (WAW) 5294 // dependences. 5295 // 5296 // E.g., for the WAW dependence: A[i] = a; // (1) 5297 // A[i] = b; // (2) 5298 // A[i + 1] = c; // (3) 5299 // 5300 // We will first create a store group with (3) and (2). (1) can't be added to 5301 // this group because it and (2) are dependent. However, (1) can be grouped 5302 // with other accesses that may precede it in program order. Note that a 5303 // bottom-up order does not imply that WAW dependences should not be checked. 5304 void InterleavedAccessInfo::analyzeInterleaving( 5305 const ValueToValueMap &Strides) { 5306 DEBUG(dbgs() << "LV: Analyzing interleaved accesses...\n"); 5307 5308 // Holds all accesses with a constant stride. 5309 MapVector<Instruction *, StrideDescriptor> AccessStrideInfo; 5310 collectConstStrideAccesses(AccessStrideInfo, Strides); 5311 5312 if (AccessStrideInfo.empty()) 5313 return; 5314 5315 // Collect the dependences in the loop. 5316 collectDependences(); 5317 5318 // Holds all interleaved store groups temporarily. 5319 SmallSetVector<InterleaveGroup *, 4> StoreGroups; 5320 // Holds all interleaved load groups temporarily. 5321 SmallSetVector<InterleaveGroup *, 4> LoadGroups; 5322 5323 // Search in bottom-up program order for pairs of accesses (A and B) that can 5324 // form interleaved load or store groups. In the algorithm below, access A 5325 // precedes access B in program order. We initialize a group for B in the 5326 // outer loop of the algorithm, and then in the inner loop, we attempt to 5327 // insert each A into B's group if: 5328 // 5329 // 1. A and B have the same stride, 5330 // 2. A and B have the same memory object size, and 5331 // 3. A belongs in B's group according to its distance from B. 5332 // 5333 // Special care is taken to ensure group formation will not break any 5334 // dependences. 5335 for (auto BI = AccessStrideInfo.rbegin(), E = AccessStrideInfo.rend(); 5336 BI != E; ++BI) { 5337 Instruction *B = BI->first; 5338 StrideDescriptor DesB = BI->second; 5339 5340 // Initialize a group for B if it has an allowable stride. Even if we don't 5341 // create a group for B, we continue with the bottom-up algorithm to ensure 5342 // we don't break any of B's dependences. 5343 InterleaveGroup *Group = nullptr; 5344 if (isStrided(DesB.Stride)) { 5345 Group = getInterleaveGroup(B); 5346 if (!Group) { 5347 DEBUG(dbgs() << "LV: Creating an interleave group with:" << *B << '\n'); 5348 Group = createInterleaveGroup(B, DesB.Stride, DesB.Align); 5349 } 5350 if (B->mayWriteToMemory()) 5351 StoreGroups.insert(Group); 5352 else 5353 LoadGroups.insert(Group); 5354 } 5355 5356 for (auto AI = std::next(BI); AI != E; ++AI) { 5357 Instruction *A = AI->first; 5358 StrideDescriptor DesA = AI->second; 5359 5360 // Our code motion strategy implies that we can't have dependences 5361 // between accesses in an interleaved group and other accesses located 5362 // between the first and last member of the group. Note that this also 5363 // means that a group can't have more than one member at a given offset. 5364 // The accesses in a group can have dependences with other accesses, but 5365 // we must ensure we don't extend the boundaries of the group such that 5366 // we encompass those dependent accesses. 5367 // 5368 // For example, assume we have the sequence of accesses shown below in a 5369 // stride-2 loop: 5370 // 5371 // (1, 2) is a group | A[i] = a; // (1) 5372 // | A[i-1] = b; // (2) | 5373 // A[i-3] = c; // (3) 5374 // A[i] = d; // (4) | (2, 4) is not a group 5375 // 5376 // Because accesses (2) and (3) are dependent, we can group (2) with (1) 5377 // but not with (4). If we did, the dependent access (3) would be within 5378 // the boundaries of the (2, 4) group. 5379 if (!canReorderMemAccessesForInterleavedGroups(&*AI, &*BI)) { 5380 5381 // If a dependence exists and A is already in a group, we know that A 5382 // must be a store since A precedes B and WAR dependences are allowed. 5383 // Thus, A would be sunk below B. We release A's group to prevent this 5384 // illegal code motion. A will then be free to form another group with 5385 // instructions that precede it. 5386 if (isInterleaved(A)) { 5387 InterleaveGroup *StoreGroup = getInterleaveGroup(A); 5388 StoreGroups.remove(StoreGroup); 5389 releaseGroup(StoreGroup); 5390 } 5391 5392 // If a dependence exists and A is not already in a group (or it was 5393 // and we just released it), B might be hoisted above A (if B is a 5394 // load) or another store might be sunk below A (if B is a store). In 5395 // either case, we can't add additional instructions to B's group. B 5396 // will only form a group with instructions that it precedes. 5397 break; 5398 } 5399 5400 // At this point, we've checked for illegal code motion. If either A or B 5401 // isn't strided, there's nothing left to do. 5402 if (!isStrided(DesA.Stride) || !isStrided(DesB.Stride)) 5403 continue; 5404 5405 // Ignore A if it's already in a group or isn't the same kind of memory 5406 // operation as B. 5407 if (isInterleaved(A) || A->mayReadFromMemory() != B->mayReadFromMemory()) 5408 continue; 5409 5410 // Check rules 1 and 2. Ignore A if its stride or size is different from 5411 // that of B. 5412 if (DesA.Stride != DesB.Stride || DesA.Size != DesB.Size) 5413 continue; 5414 5415 // Calculate the distance from A to B. 5416 const SCEVConstant *DistToB = dyn_cast<SCEVConstant>( 5417 PSE.getSE()->getMinusSCEV(DesA.Scev, DesB.Scev)); 5418 if (!DistToB) 5419 continue; 5420 int64_t DistanceToB = DistToB->getAPInt().getSExtValue(); 5421 5422 // Check rule 3. Ignore A if its distance to B is not a multiple of the 5423 // size. 5424 if (DistanceToB % static_cast<int64_t>(DesB.Size)) 5425 continue; 5426 5427 // Ignore A if either A or B is in a predicated block. Although we 5428 // currently prevent group formation for predicated accesses, we may be 5429 // able to relax this limitation in the future once we handle more 5430 // complicated blocks. 5431 if (isPredicated(A->getParent()) || isPredicated(B->getParent())) 5432 continue; 5433 5434 // The index of A is the index of B plus A's distance to B in multiples 5435 // of the size. 5436 int IndexA = 5437 Group->getIndex(B) + DistanceToB / static_cast<int64_t>(DesB.Size); 5438 5439 // Try to insert A into B's group. 5440 if (Group->insertMember(A, IndexA, DesA.Align)) { 5441 DEBUG(dbgs() << "LV: Inserted:" << *A << '\n' 5442 << " into the interleave group with" << *B << '\n'); 5443 InterleaveGroupMap[A] = Group; 5444 5445 // Set the first load in program order as the insert position. 5446 if (A->mayReadFromMemory()) 5447 Group->setInsertPos(A); 5448 } 5449 } // Iteration over A accesses. 5450 } // Iteration over B accesses. 5451 5452 // Remove interleaved store groups with gaps. 5453 for (InterleaveGroup *Group : StoreGroups) 5454 if (Group->getNumMembers() != Group->getFactor()) 5455 releaseGroup(Group); 5456 5457 // If there is a non-reversed interleaved load group with gaps, we will need 5458 // to execute at least one scalar epilogue iteration. This will ensure that 5459 // we don't speculatively access memory out-of-bounds. Note that we only need 5460 // to look for a member at index factor - 1, since every group must have a 5461 // member at index zero. 5462 for (InterleaveGroup *Group : LoadGroups) 5463 if (!Group->getMember(Group->getFactor() - 1)) { 5464 if (Group->isReverse()) { 5465 releaseGroup(Group); 5466 } else { 5467 DEBUG(dbgs() << "LV: Interleaved group requires epilogue iteration.\n"); 5468 RequiresScalarEpilogue = true; 5469 } 5470 } 5471 } 5472 5473 LoopVectorizationCostModel::VectorizationFactor 5474 LoopVectorizationCostModel::selectVectorizationFactor(bool OptForSize) { 5475 // Width 1 means no vectorize 5476 VectorizationFactor Factor = {1U, 0U}; 5477 if (OptForSize && Legal->getRuntimePointerChecking()->Need) { 5478 emitAnalysis( 5479 VectorizationReport() 5480 << "runtime pointer checks needed. Enable vectorization of this " 5481 "loop with '#pragma clang loop vectorize(enable)' when " 5482 "compiling with -Os/-Oz"); 5483 DEBUG(dbgs() 5484 << "LV: Aborting. Runtime ptr check is required with -Os/-Oz.\n"); 5485 return Factor; 5486 } 5487 5488 if (!EnableCondStoresVectorization && Legal->getNumPredStores()) { 5489 emitAnalysis( 5490 VectorizationReport() 5491 << "store that is conditionally executed prevents vectorization"); 5492 DEBUG(dbgs() << "LV: No vectorization. There are conditional stores.\n"); 5493 return Factor; 5494 } 5495 5496 // Find the trip count. 5497 unsigned TC = PSE.getSE()->getSmallConstantTripCount(TheLoop); 5498 DEBUG(dbgs() << "LV: Found trip count: " << TC << '\n'); 5499 5500 MinBWs = computeMinimumValueSizes(TheLoop->getBlocks(), *DB, &TTI); 5501 unsigned SmallestType, WidestType; 5502 std::tie(SmallestType, WidestType) = getSmallestAndWidestTypes(); 5503 unsigned WidestRegister = TTI.getRegisterBitWidth(true); 5504 unsigned MaxSafeDepDist = -1U; 5505 5506 // Get the maximum safe dependence distance in bits computed by LAA. If the 5507 // loop contains any interleaved accesses, we divide the dependence distance 5508 // by the maximum interleave factor of all interleaved groups. Note that 5509 // although the division ensures correctness, this is a fairly conservative 5510 // computation because the maximum distance computed by LAA may not involve 5511 // any of the interleaved accesses. 5512 if (Legal->getMaxSafeDepDistBytes() != -1U) 5513 MaxSafeDepDist = 5514 Legal->getMaxSafeDepDistBytes() * 8 / Legal->getMaxInterleaveFactor(); 5515 5516 WidestRegister = 5517 ((WidestRegister < MaxSafeDepDist) ? WidestRegister : MaxSafeDepDist); 5518 unsigned MaxVectorSize = WidestRegister / WidestType; 5519 5520 DEBUG(dbgs() << "LV: The Smallest and Widest types: " << SmallestType << " / " 5521 << WidestType << " bits.\n"); 5522 DEBUG(dbgs() << "LV: The Widest register is: " << WidestRegister 5523 << " bits.\n"); 5524 5525 if (MaxVectorSize == 0) { 5526 DEBUG(dbgs() << "LV: The target has no vector registers.\n"); 5527 MaxVectorSize = 1; 5528 } 5529 5530 assert(MaxVectorSize <= 64 && "Did not expect to pack so many elements" 5531 " into one vector!"); 5532 5533 unsigned VF = MaxVectorSize; 5534 if (MaximizeBandwidth && !OptForSize) { 5535 // Collect all viable vectorization factors. 5536 SmallVector<unsigned, 8> VFs; 5537 unsigned NewMaxVectorSize = WidestRegister / SmallestType; 5538 for (unsigned VS = MaxVectorSize; VS <= NewMaxVectorSize; VS *= 2) 5539 VFs.push_back(VS); 5540 5541 // For each VF calculate its register usage. 5542 auto RUs = calculateRegisterUsage(VFs); 5543 5544 // Select the largest VF which doesn't require more registers than existing 5545 // ones. 5546 unsigned TargetNumRegisters = TTI.getNumberOfRegisters(true); 5547 for (int i = RUs.size() - 1; i >= 0; --i) { 5548 if (RUs[i].MaxLocalUsers <= TargetNumRegisters) { 5549 VF = VFs[i]; 5550 break; 5551 } 5552 } 5553 } 5554 5555 // If we optimize the program for size, avoid creating the tail loop. 5556 if (OptForSize) { 5557 // If we are unable to calculate the trip count then don't try to vectorize. 5558 if (TC < 2) { 5559 emitAnalysis( 5560 VectorizationReport() 5561 << "unable to calculate the loop count due to complex control flow"); 5562 DEBUG(dbgs() << "LV: Aborting. A tail loop is required with -Os/-Oz.\n"); 5563 return Factor; 5564 } 5565 5566 // Find the maximum SIMD width that can fit within the trip count. 5567 VF = TC % MaxVectorSize; 5568 5569 if (VF == 0) 5570 VF = MaxVectorSize; 5571 else { 5572 // If the trip count that we found modulo the vectorization factor is not 5573 // zero then we require a tail. 5574 emitAnalysis(VectorizationReport() 5575 << "cannot optimize for size and vectorize at the " 5576 "same time. Enable vectorization of this loop " 5577 "with '#pragma clang loop vectorize(enable)' " 5578 "when compiling with -Os/-Oz"); 5579 DEBUG(dbgs() << "LV: Aborting. A tail loop is required with -Os/-Oz.\n"); 5580 return Factor; 5581 } 5582 } 5583 5584 int UserVF = Hints->getWidth(); 5585 if (UserVF != 0) { 5586 assert(isPowerOf2_32(UserVF) && "VF needs to be a power of two"); 5587 DEBUG(dbgs() << "LV: Using user VF " << UserVF << ".\n"); 5588 5589 Factor.Width = UserVF; 5590 return Factor; 5591 } 5592 5593 float Cost = expectedCost(1).first; 5594 #ifndef NDEBUG 5595 const float ScalarCost = Cost; 5596 #endif /* NDEBUG */ 5597 unsigned Width = 1; 5598 DEBUG(dbgs() << "LV: Scalar loop costs: " << (int)ScalarCost << ".\n"); 5599 5600 bool ForceVectorization = Hints->getForce() == LoopVectorizeHints::FK_Enabled; 5601 // Ignore scalar width, because the user explicitly wants vectorization. 5602 if (ForceVectorization && VF > 1) { 5603 Width = 2; 5604 Cost = expectedCost(Width).first / (float)Width; 5605 } 5606 5607 for (unsigned i = 2; i <= VF; i *= 2) { 5608 // Notice that the vector loop needs to be executed less times, so 5609 // we need to divide the cost of the vector loops by the width of 5610 // the vector elements. 5611 VectorizationCostTy C = expectedCost(i); 5612 float VectorCost = C.first / (float)i; 5613 DEBUG(dbgs() << "LV: Vector loop of width " << i 5614 << " costs: " << (int)VectorCost << ".\n"); 5615 if (!C.second && !ForceVectorization) { 5616 DEBUG( 5617 dbgs() << "LV: Not considering vector loop of width " << i 5618 << " because it will not generate any vector instructions.\n"); 5619 continue; 5620 } 5621 if (VectorCost < Cost) { 5622 Cost = VectorCost; 5623 Width = i; 5624 } 5625 } 5626 5627 DEBUG(if (ForceVectorization && Width > 1 && Cost >= ScalarCost) dbgs() 5628 << "LV: Vectorization seems to be not beneficial, " 5629 << "but was forced by a user.\n"); 5630 DEBUG(dbgs() << "LV: Selecting VF: " << Width << ".\n"); 5631 Factor.Width = Width; 5632 Factor.Cost = Width * Cost; 5633 return Factor; 5634 } 5635 5636 std::pair<unsigned, unsigned> 5637 LoopVectorizationCostModel::getSmallestAndWidestTypes() { 5638 unsigned MinWidth = -1U; 5639 unsigned MaxWidth = 8; 5640 const DataLayout &DL = TheFunction->getParent()->getDataLayout(); 5641 5642 // For each block. 5643 for (BasicBlock *BB : TheLoop->blocks()) { 5644 // For each instruction in the loop. 5645 for (Instruction &I : *BB) { 5646 Type *T = I.getType(); 5647 5648 // Skip ignored values. 5649 if (ValuesToIgnore.count(&I)) 5650 continue; 5651 5652 // Only examine Loads, Stores and PHINodes. 5653 if (!isa<LoadInst>(I) && !isa<StoreInst>(I) && !isa<PHINode>(I)) 5654 continue; 5655 5656 // Examine PHI nodes that are reduction variables. Update the type to 5657 // account for the recurrence type. 5658 if (auto *PN = dyn_cast<PHINode>(&I)) { 5659 if (!Legal->isReductionVariable(PN)) 5660 continue; 5661 RecurrenceDescriptor RdxDesc = (*Legal->getReductionVars())[PN]; 5662 T = RdxDesc.getRecurrenceType(); 5663 } 5664 5665 // Examine the stored values. 5666 if (auto *ST = dyn_cast<StoreInst>(&I)) 5667 T = ST->getValueOperand()->getType(); 5668 5669 // Ignore loaded pointer types and stored pointer types that are not 5670 // consecutive. However, we do want to take consecutive stores/loads of 5671 // pointer vectors into account. 5672 if (T->isPointerTy() && !isConsecutiveLoadOrStore(&I)) 5673 continue; 5674 5675 MinWidth = std::min(MinWidth, 5676 (unsigned)DL.getTypeSizeInBits(T->getScalarType())); 5677 MaxWidth = std::max(MaxWidth, 5678 (unsigned)DL.getTypeSizeInBits(T->getScalarType())); 5679 } 5680 } 5681 5682 return {MinWidth, MaxWidth}; 5683 } 5684 5685 unsigned LoopVectorizationCostModel::selectInterleaveCount(bool OptForSize, 5686 unsigned VF, 5687 unsigned LoopCost) { 5688 5689 // -- The interleave heuristics -- 5690 // We interleave the loop in order to expose ILP and reduce the loop overhead. 5691 // There are many micro-architectural considerations that we can't predict 5692 // at this level. For example, frontend pressure (on decode or fetch) due to 5693 // code size, or the number and capabilities of the execution ports. 5694 // 5695 // We use the following heuristics to select the interleave count: 5696 // 1. If the code has reductions, then we interleave to break the cross 5697 // iteration dependency. 5698 // 2. If the loop is really small, then we interleave to reduce the loop 5699 // overhead. 5700 // 3. We don't interleave if we think that we will spill registers to memory 5701 // due to the increased register pressure. 5702 5703 // When we optimize for size, we don't interleave. 5704 if (OptForSize) 5705 return 1; 5706 5707 // We used the distance for the interleave count. 5708 if (Legal->getMaxSafeDepDistBytes() != -1U) 5709 return 1; 5710 5711 // Do not interleave loops with a relatively small trip count. 5712 unsigned TC = PSE.getSE()->getSmallConstantTripCount(TheLoop); 5713 if (TC > 1 && TC < TinyTripCountInterleaveThreshold) 5714 return 1; 5715 5716 unsigned TargetNumRegisters = TTI.getNumberOfRegisters(VF > 1); 5717 DEBUG(dbgs() << "LV: The target has " << TargetNumRegisters 5718 << " registers\n"); 5719 5720 if (VF == 1) { 5721 if (ForceTargetNumScalarRegs.getNumOccurrences() > 0) 5722 TargetNumRegisters = ForceTargetNumScalarRegs; 5723 } else { 5724 if (ForceTargetNumVectorRegs.getNumOccurrences() > 0) 5725 TargetNumRegisters = ForceTargetNumVectorRegs; 5726 } 5727 5728 RegisterUsage R = calculateRegisterUsage({VF})[0]; 5729 // We divide by these constants so assume that we have at least one 5730 // instruction that uses at least one register. 5731 R.MaxLocalUsers = std::max(R.MaxLocalUsers, 1U); 5732 R.NumInstructions = std::max(R.NumInstructions, 1U); 5733 5734 // We calculate the interleave count using the following formula. 5735 // Subtract the number of loop invariants from the number of available 5736 // registers. These registers are used by all of the interleaved instances. 5737 // Next, divide the remaining registers by the number of registers that is 5738 // required by the loop, in order to estimate how many parallel instances 5739 // fit without causing spills. All of this is rounded down if necessary to be 5740 // a power of two. We want power of two interleave count to simplify any 5741 // addressing operations or alignment considerations. 5742 unsigned IC = PowerOf2Floor((TargetNumRegisters - R.LoopInvariantRegs) / 5743 R.MaxLocalUsers); 5744 5745 // Don't count the induction variable as interleaved. 5746 if (EnableIndVarRegisterHeur) 5747 IC = PowerOf2Floor((TargetNumRegisters - R.LoopInvariantRegs - 1) / 5748 std::max(1U, (R.MaxLocalUsers - 1))); 5749 5750 // Clamp the interleave ranges to reasonable counts. 5751 unsigned MaxInterleaveCount = TTI.getMaxInterleaveFactor(VF); 5752 5753 // Check if the user has overridden the max. 5754 if (VF == 1) { 5755 if (ForceTargetMaxScalarInterleaveFactor.getNumOccurrences() > 0) 5756 MaxInterleaveCount = ForceTargetMaxScalarInterleaveFactor; 5757 } else { 5758 if (ForceTargetMaxVectorInterleaveFactor.getNumOccurrences() > 0) 5759 MaxInterleaveCount = ForceTargetMaxVectorInterleaveFactor; 5760 } 5761 5762 // If we did not calculate the cost for VF (because the user selected the VF) 5763 // then we calculate the cost of VF here. 5764 if (LoopCost == 0) 5765 LoopCost = expectedCost(VF).first; 5766 5767 // Clamp the calculated IC to be between the 1 and the max interleave count 5768 // that the target allows. 5769 if (IC > MaxInterleaveCount) 5770 IC = MaxInterleaveCount; 5771 else if (IC < 1) 5772 IC = 1; 5773 5774 // Interleave if we vectorized this loop and there is a reduction that could 5775 // benefit from interleaving. 5776 if (VF > 1 && Legal->getReductionVars()->size()) { 5777 DEBUG(dbgs() << "LV: Interleaving because of reductions.\n"); 5778 return IC; 5779 } 5780 5781 // Note that if we've already vectorized the loop we will have done the 5782 // runtime check and so interleaving won't require further checks. 5783 bool InterleavingRequiresRuntimePointerCheck = 5784 (VF == 1 && Legal->getRuntimePointerChecking()->Need); 5785 5786 // We want to interleave small loops in order to reduce the loop overhead and 5787 // potentially expose ILP opportunities. 5788 DEBUG(dbgs() << "LV: Loop cost is " << LoopCost << '\n'); 5789 if (!InterleavingRequiresRuntimePointerCheck && LoopCost < SmallLoopCost) { 5790 // We assume that the cost overhead is 1 and we use the cost model 5791 // to estimate the cost of the loop and interleave until the cost of the 5792 // loop overhead is about 5% of the cost of the loop. 5793 unsigned SmallIC = 5794 std::min(IC, (unsigned)PowerOf2Floor(SmallLoopCost / LoopCost)); 5795 5796 // Interleave until store/load ports (estimated by max interleave count) are 5797 // saturated. 5798 unsigned NumStores = Legal->getNumStores(); 5799 unsigned NumLoads = Legal->getNumLoads(); 5800 unsigned StoresIC = IC / (NumStores ? NumStores : 1); 5801 unsigned LoadsIC = IC / (NumLoads ? NumLoads : 1); 5802 5803 // If we have a scalar reduction (vector reductions are already dealt with 5804 // by this point), we can increase the critical path length if the loop 5805 // we're interleaving is inside another loop. Limit, by default to 2, so the 5806 // critical path only gets increased by one reduction operation. 5807 if (Legal->getReductionVars()->size() && TheLoop->getLoopDepth() > 1) { 5808 unsigned F = static_cast<unsigned>(MaxNestedScalarReductionIC); 5809 SmallIC = std::min(SmallIC, F); 5810 StoresIC = std::min(StoresIC, F); 5811 LoadsIC = std::min(LoadsIC, F); 5812 } 5813 5814 if (EnableLoadStoreRuntimeInterleave && 5815 std::max(StoresIC, LoadsIC) > SmallIC) { 5816 DEBUG(dbgs() << "LV: Interleaving to saturate store or load ports.\n"); 5817 return std::max(StoresIC, LoadsIC); 5818 } 5819 5820 DEBUG(dbgs() << "LV: Interleaving to reduce branch cost.\n"); 5821 return SmallIC; 5822 } 5823 5824 // Interleave if this is a large loop (small loops are already dealt with by 5825 // this point) that could benefit from interleaving. 5826 bool HasReductions = (Legal->getReductionVars()->size() > 0); 5827 if (TTI.enableAggressiveInterleaving(HasReductions)) { 5828 DEBUG(dbgs() << "LV: Interleaving to expose ILP.\n"); 5829 return IC; 5830 } 5831 5832 DEBUG(dbgs() << "LV: Not Interleaving.\n"); 5833 return 1; 5834 } 5835 5836 SmallVector<LoopVectorizationCostModel::RegisterUsage, 8> 5837 LoopVectorizationCostModel::calculateRegisterUsage(ArrayRef<unsigned> VFs) { 5838 // This function calculates the register usage by measuring the highest number 5839 // of values that are alive at a single location. Obviously, this is a very 5840 // rough estimation. We scan the loop in a topological order in order and 5841 // assign a number to each instruction. We use RPO to ensure that defs are 5842 // met before their users. We assume that each instruction that has in-loop 5843 // users starts an interval. We record every time that an in-loop value is 5844 // used, so we have a list of the first and last occurrences of each 5845 // instruction. Next, we transpose this data structure into a multi map that 5846 // holds the list of intervals that *end* at a specific location. This multi 5847 // map allows us to perform a linear search. We scan the instructions linearly 5848 // and record each time that a new interval starts, by placing it in a set. 5849 // If we find this value in the multi-map then we remove it from the set. 5850 // The max register usage is the maximum size of the set. 5851 // We also search for instructions that are defined outside the loop, but are 5852 // used inside the loop. We need this number separately from the max-interval 5853 // usage number because when we unroll, loop-invariant values do not take 5854 // more register. 5855 LoopBlocksDFS DFS(TheLoop); 5856 DFS.perform(LI); 5857 5858 RegisterUsage RU; 5859 RU.NumInstructions = 0; 5860 5861 // Each 'key' in the map opens a new interval. The values 5862 // of the map are the index of the 'last seen' usage of the 5863 // instruction that is the key. 5864 typedef DenseMap<Instruction *, unsigned> IntervalMap; 5865 // Maps instruction to its index. 5866 DenseMap<unsigned, Instruction *> IdxToInstr; 5867 // Marks the end of each interval. 5868 IntervalMap EndPoint; 5869 // Saves the list of instruction indices that are used in the loop. 5870 SmallSet<Instruction *, 8> Ends; 5871 // Saves the list of values that are used in the loop but are 5872 // defined outside the loop, such as arguments and constants. 5873 SmallPtrSet<Value *, 8> LoopInvariants; 5874 5875 unsigned Index = 0; 5876 for (BasicBlock *BB : make_range(DFS.beginRPO(), DFS.endRPO())) { 5877 RU.NumInstructions += BB->size(); 5878 for (Instruction &I : *BB) { 5879 IdxToInstr[Index++] = &I; 5880 5881 // Save the end location of each USE. 5882 for (Value *U : I.operands()) { 5883 auto *Instr = dyn_cast<Instruction>(U); 5884 5885 // Ignore non-instruction values such as arguments, constants, etc. 5886 if (!Instr) 5887 continue; 5888 5889 // If this instruction is outside the loop then record it and continue. 5890 if (!TheLoop->contains(Instr)) { 5891 LoopInvariants.insert(Instr); 5892 continue; 5893 } 5894 5895 // Overwrite previous end points. 5896 EndPoint[Instr] = Index; 5897 Ends.insert(Instr); 5898 } 5899 } 5900 } 5901 5902 // Saves the list of intervals that end with the index in 'key'. 5903 typedef SmallVector<Instruction *, 2> InstrList; 5904 DenseMap<unsigned, InstrList> TransposeEnds; 5905 5906 // Transpose the EndPoints to a list of values that end at each index. 5907 for (auto &Interval : EndPoint) 5908 TransposeEnds[Interval.second].push_back(Interval.first); 5909 5910 SmallSet<Instruction *, 8> OpenIntervals; 5911 5912 // Get the size of the widest register. 5913 unsigned MaxSafeDepDist = -1U; 5914 if (Legal->getMaxSafeDepDistBytes() != -1U) 5915 MaxSafeDepDist = Legal->getMaxSafeDepDistBytes() * 8; 5916 unsigned WidestRegister = 5917 std::min(TTI.getRegisterBitWidth(true), MaxSafeDepDist); 5918 const DataLayout &DL = TheFunction->getParent()->getDataLayout(); 5919 5920 SmallVector<RegisterUsage, 8> RUs(VFs.size()); 5921 SmallVector<unsigned, 8> MaxUsages(VFs.size(), 0); 5922 5923 DEBUG(dbgs() << "LV(REG): Calculating max register usage:\n"); 5924 5925 // A lambda that gets the register usage for the given type and VF. 5926 auto GetRegUsage = [&DL, WidestRegister](Type *Ty, unsigned VF) { 5927 if (Ty->isTokenTy()) 5928 return 0U; 5929 unsigned TypeSize = DL.getTypeSizeInBits(Ty->getScalarType()); 5930 return std::max<unsigned>(1, VF * TypeSize / WidestRegister); 5931 }; 5932 5933 for (unsigned int i = 0; i < Index; ++i) { 5934 Instruction *I = IdxToInstr[i]; 5935 // Ignore instructions that are never used within the loop. 5936 if (!Ends.count(I)) 5937 continue; 5938 5939 // Remove all of the instructions that end at this location. 5940 InstrList &List = TransposeEnds[i]; 5941 for (Instruction *ToRemove : List) 5942 OpenIntervals.erase(ToRemove); 5943 5944 // Skip ignored values. 5945 if (ValuesToIgnore.count(I)) 5946 continue; 5947 5948 // For each VF find the maximum usage of registers. 5949 for (unsigned j = 0, e = VFs.size(); j < e; ++j) { 5950 if (VFs[j] == 1) { 5951 MaxUsages[j] = std::max(MaxUsages[j], OpenIntervals.size()); 5952 continue; 5953 } 5954 5955 // Count the number of live intervals. 5956 unsigned RegUsage = 0; 5957 for (auto Inst : OpenIntervals) { 5958 // Skip ignored values for VF > 1. 5959 if (VecValuesToIgnore.count(Inst)) 5960 continue; 5961 RegUsage += GetRegUsage(Inst->getType(), VFs[j]); 5962 } 5963 MaxUsages[j] = std::max(MaxUsages[j], RegUsage); 5964 } 5965 5966 DEBUG(dbgs() << "LV(REG): At #" << i << " Interval # " 5967 << OpenIntervals.size() << '\n'); 5968 5969 // Add the current instruction to the list of open intervals. 5970 OpenIntervals.insert(I); 5971 } 5972 5973 for (unsigned i = 0, e = VFs.size(); i < e; ++i) { 5974 unsigned Invariant = 0; 5975 if (VFs[i] == 1) 5976 Invariant = LoopInvariants.size(); 5977 else { 5978 for (auto Inst : LoopInvariants) 5979 Invariant += GetRegUsage(Inst->getType(), VFs[i]); 5980 } 5981 5982 DEBUG(dbgs() << "LV(REG): VF = " << VFs[i] << '\n'); 5983 DEBUG(dbgs() << "LV(REG): Found max usage: " << MaxUsages[i] << '\n'); 5984 DEBUG(dbgs() << "LV(REG): Found invariant usage: " << Invariant << '\n'); 5985 DEBUG(dbgs() << "LV(REG): LoopSize: " << RU.NumInstructions << '\n'); 5986 5987 RU.LoopInvariantRegs = Invariant; 5988 RU.MaxLocalUsers = MaxUsages[i]; 5989 RUs[i] = RU; 5990 } 5991 5992 return RUs; 5993 } 5994 5995 LoopVectorizationCostModel::VectorizationCostTy 5996 LoopVectorizationCostModel::expectedCost(unsigned VF) { 5997 VectorizationCostTy Cost; 5998 5999 // For each block. 6000 for (BasicBlock *BB : TheLoop->blocks()) { 6001 VectorizationCostTy BlockCost; 6002 6003 // For each instruction in the old loop. 6004 for (Instruction &I : *BB) { 6005 // Skip dbg intrinsics. 6006 if (isa<DbgInfoIntrinsic>(I)) 6007 continue; 6008 6009 // Skip ignored values. 6010 if (ValuesToIgnore.count(&I)) 6011 continue; 6012 6013 VectorizationCostTy C = getInstructionCost(&I, VF); 6014 6015 // Check if we should override the cost. 6016 if (ForceTargetInstructionCost.getNumOccurrences() > 0) 6017 C.first = ForceTargetInstructionCost; 6018 6019 BlockCost.first += C.first; 6020 BlockCost.second |= C.second; 6021 DEBUG(dbgs() << "LV: Found an estimated cost of " << C.first << " for VF " 6022 << VF << " For instruction: " << I << '\n'); 6023 } 6024 6025 // We assume that if-converted blocks have a 50% chance of being executed. 6026 // When the code is scalar then some of the blocks are avoided due to CF. 6027 // When the code is vectorized we execute all code paths. 6028 if (VF == 1 && Legal->blockNeedsPredication(BB)) 6029 BlockCost.first /= 2; 6030 6031 Cost.first += BlockCost.first; 6032 Cost.second |= BlockCost.second; 6033 } 6034 6035 return Cost; 6036 } 6037 6038 /// \brief Check whether the address computation for a non-consecutive memory 6039 /// access looks like an unlikely candidate for being merged into the indexing 6040 /// mode. 6041 /// 6042 /// We look for a GEP which has one index that is an induction variable and all 6043 /// other indices are loop invariant. If the stride of this access is also 6044 /// within a small bound we decide that this address computation can likely be 6045 /// merged into the addressing mode. 6046 /// In all other cases, we identify the address computation as complex. 6047 static bool isLikelyComplexAddressComputation(Value *Ptr, 6048 LoopVectorizationLegality *Legal, 6049 ScalarEvolution *SE, 6050 const Loop *TheLoop) { 6051 auto *Gep = dyn_cast<GetElementPtrInst>(Ptr); 6052 if (!Gep) 6053 return true; 6054 6055 // We are looking for a gep with all loop invariant indices except for one 6056 // which should be an induction variable. 6057 unsigned NumOperands = Gep->getNumOperands(); 6058 for (unsigned i = 1; i < NumOperands; ++i) { 6059 Value *Opd = Gep->getOperand(i); 6060 if (!SE->isLoopInvariant(SE->getSCEV(Opd), TheLoop) && 6061 !Legal->isInductionVariable(Opd)) 6062 return true; 6063 } 6064 6065 // Now we know we have a GEP ptr, %inv, %ind, %inv. Make sure that the step 6066 // can likely be merged into the address computation. 6067 unsigned MaxMergeDistance = 64; 6068 6069 const SCEVAddRecExpr *AddRec = dyn_cast<SCEVAddRecExpr>(SE->getSCEV(Ptr)); 6070 if (!AddRec) 6071 return true; 6072 6073 // Check the step is constant. 6074 const SCEV *Step = AddRec->getStepRecurrence(*SE); 6075 // Calculate the pointer stride and check if it is consecutive. 6076 const auto *C = dyn_cast<SCEVConstant>(Step); 6077 if (!C) 6078 return true; 6079 6080 const APInt &APStepVal = C->getAPInt(); 6081 6082 // Huge step value - give up. 6083 if (APStepVal.getBitWidth() > 64) 6084 return true; 6085 6086 int64_t StepVal = APStepVal.getSExtValue(); 6087 6088 return StepVal > MaxMergeDistance; 6089 } 6090 6091 static bool isStrideMul(Instruction *I, LoopVectorizationLegality *Legal) { 6092 return Legal->hasStride(I->getOperand(0)) || 6093 Legal->hasStride(I->getOperand(1)); 6094 } 6095 6096 LoopVectorizationCostModel::VectorizationCostTy 6097 LoopVectorizationCostModel::getInstructionCost(Instruction *I, unsigned VF) { 6098 // If we know that this instruction will remain uniform, check the cost of 6099 // the scalar version. 6100 if (Legal->isUniformAfterVectorization(I)) 6101 VF = 1; 6102 6103 Type *VectorTy; 6104 unsigned C = getInstructionCost(I, VF, VectorTy); 6105 6106 bool TypeNotScalarized = 6107 VF > 1 && !VectorTy->isVoidTy() && TTI.getNumberOfParts(VectorTy) < VF; 6108 return VectorizationCostTy(C, TypeNotScalarized); 6109 } 6110 6111 unsigned LoopVectorizationCostModel::getInstructionCost(Instruction *I, 6112 unsigned VF, 6113 Type *&VectorTy) { 6114 Type *RetTy = I->getType(); 6115 if (VF > 1 && MinBWs.count(I)) 6116 RetTy = IntegerType::get(RetTy->getContext(), MinBWs[I]); 6117 VectorTy = ToVectorTy(RetTy, VF); 6118 auto SE = PSE.getSE(); 6119 6120 // TODO: We need to estimate the cost of intrinsic calls. 6121 switch (I->getOpcode()) { 6122 case Instruction::GetElementPtr: 6123 // We mark this instruction as zero-cost because the cost of GEPs in 6124 // vectorized code depends on whether the corresponding memory instruction 6125 // is scalarized or not. Therefore, we handle GEPs with the memory 6126 // instruction cost. 6127 return 0; 6128 case Instruction::Br: { 6129 return TTI.getCFInstrCost(I->getOpcode()); 6130 } 6131 case Instruction::PHI: { 6132 auto *Phi = cast<PHINode>(I); 6133 6134 // First-order recurrences are replaced by vector shuffles inside the loop. 6135 if (VF > 1 && Legal->isFirstOrderRecurrence(Phi)) 6136 return TTI.getShuffleCost(TargetTransformInfo::SK_ExtractSubvector, 6137 VectorTy, VF - 1, VectorTy); 6138 6139 // TODO: IF-converted IFs become selects. 6140 return 0; 6141 } 6142 case Instruction::Add: 6143 case Instruction::FAdd: 6144 case Instruction::Sub: 6145 case Instruction::FSub: 6146 case Instruction::Mul: 6147 case Instruction::FMul: 6148 case Instruction::UDiv: 6149 case Instruction::SDiv: 6150 case Instruction::FDiv: 6151 case Instruction::URem: 6152 case Instruction::SRem: 6153 case Instruction::FRem: 6154 case Instruction::Shl: 6155 case Instruction::LShr: 6156 case Instruction::AShr: 6157 case Instruction::And: 6158 case Instruction::Or: 6159 case Instruction::Xor: { 6160 // Since we will replace the stride by 1 the multiplication should go away. 6161 if (I->getOpcode() == Instruction::Mul && isStrideMul(I, Legal)) 6162 return 0; 6163 // Certain instructions can be cheaper to vectorize if they have a constant 6164 // second vector operand. One example of this are shifts on x86. 6165 TargetTransformInfo::OperandValueKind Op1VK = 6166 TargetTransformInfo::OK_AnyValue; 6167 TargetTransformInfo::OperandValueKind Op2VK = 6168 TargetTransformInfo::OK_AnyValue; 6169 TargetTransformInfo::OperandValueProperties Op1VP = 6170 TargetTransformInfo::OP_None; 6171 TargetTransformInfo::OperandValueProperties Op2VP = 6172 TargetTransformInfo::OP_None; 6173 Value *Op2 = I->getOperand(1); 6174 6175 // Check for a splat or for a non uniform vector of constants. 6176 if (isa<ConstantInt>(Op2)) { 6177 ConstantInt *CInt = cast<ConstantInt>(Op2); 6178 if (CInt && CInt->getValue().isPowerOf2()) 6179 Op2VP = TargetTransformInfo::OP_PowerOf2; 6180 Op2VK = TargetTransformInfo::OK_UniformConstantValue; 6181 } else if (isa<ConstantVector>(Op2) || isa<ConstantDataVector>(Op2)) { 6182 Op2VK = TargetTransformInfo::OK_NonUniformConstantValue; 6183 Constant *SplatValue = cast<Constant>(Op2)->getSplatValue(); 6184 if (SplatValue) { 6185 ConstantInt *CInt = dyn_cast<ConstantInt>(SplatValue); 6186 if (CInt && CInt->getValue().isPowerOf2()) 6187 Op2VP = TargetTransformInfo::OP_PowerOf2; 6188 Op2VK = TargetTransformInfo::OK_UniformConstantValue; 6189 } 6190 } else if (Legal->isUniform(Op2)) { 6191 Op2VK = TargetTransformInfo::OK_UniformValue; 6192 } 6193 6194 return TTI.getArithmeticInstrCost(I->getOpcode(), VectorTy, Op1VK, Op2VK, 6195 Op1VP, Op2VP); 6196 } 6197 case Instruction::Select: { 6198 SelectInst *SI = cast<SelectInst>(I); 6199 const SCEV *CondSCEV = SE->getSCEV(SI->getCondition()); 6200 bool ScalarCond = (SE->isLoopInvariant(CondSCEV, TheLoop)); 6201 Type *CondTy = SI->getCondition()->getType(); 6202 if (!ScalarCond) 6203 CondTy = VectorType::get(CondTy, VF); 6204 6205 return TTI.getCmpSelInstrCost(I->getOpcode(), VectorTy, CondTy); 6206 } 6207 case Instruction::ICmp: 6208 case Instruction::FCmp: { 6209 Type *ValTy = I->getOperand(0)->getType(); 6210 Instruction *Op0AsInstruction = dyn_cast<Instruction>(I->getOperand(0)); 6211 auto It = MinBWs.find(Op0AsInstruction); 6212 if (VF > 1 && It != MinBWs.end()) 6213 ValTy = IntegerType::get(ValTy->getContext(), It->second); 6214 VectorTy = ToVectorTy(ValTy, VF); 6215 return TTI.getCmpSelInstrCost(I->getOpcode(), VectorTy); 6216 } 6217 case Instruction::Store: 6218 case Instruction::Load: { 6219 StoreInst *SI = dyn_cast<StoreInst>(I); 6220 LoadInst *LI = dyn_cast<LoadInst>(I); 6221 Type *ValTy = (SI ? SI->getValueOperand()->getType() : LI->getType()); 6222 VectorTy = ToVectorTy(ValTy, VF); 6223 6224 unsigned Alignment = SI ? SI->getAlignment() : LI->getAlignment(); 6225 unsigned AS = 6226 SI ? SI->getPointerAddressSpace() : LI->getPointerAddressSpace(); 6227 Value *Ptr = getPointerOperand(I); 6228 // We add the cost of address computation here instead of with the gep 6229 // instruction because only here we know whether the operation is 6230 // scalarized. 6231 if (VF == 1) 6232 return TTI.getAddressComputationCost(VectorTy) + 6233 TTI.getMemoryOpCost(I->getOpcode(), VectorTy, Alignment, AS); 6234 6235 if (LI && Legal->isUniform(Ptr)) { 6236 // Scalar load + broadcast 6237 unsigned Cost = TTI.getAddressComputationCost(ValTy->getScalarType()); 6238 Cost += TTI.getMemoryOpCost(I->getOpcode(), ValTy->getScalarType(), 6239 Alignment, AS); 6240 return Cost + 6241 TTI.getShuffleCost(TargetTransformInfo::SK_Broadcast, ValTy); 6242 } 6243 6244 // For an interleaved access, calculate the total cost of the whole 6245 // interleave group. 6246 if (Legal->isAccessInterleaved(I)) { 6247 auto Group = Legal->getInterleavedAccessGroup(I); 6248 assert(Group && "Fail to get an interleaved access group."); 6249 6250 // Only calculate the cost once at the insert position. 6251 if (Group->getInsertPos() != I) 6252 return 0; 6253 6254 unsigned InterleaveFactor = Group->getFactor(); 6255 Type *WideVecTy = 6256 VectorType::get(VectorTy->getVectorElementType(), 6257 VectorTy->getVectorNumElements() * InterleaveFactor); 6258 6259 // Holds the indices of existing members in an interleaved load group. 6260 // An interleaved store group doesn't need this as it doesn't allow gaps. 6261 SmallVector<unsigned, 4> Indices; 6262 if (LI) { 6263 for (unsigned i = 0; i < InterleaveFactor; i++) 6264 if (Group->getMember(i)) 6265 Indices.push_back(i); 6266 } 6267 6268 // Calculate the cost of the whole interleaved group. 6269 unsigned Cost = TTI.getInterleavedMemoryOpCost( 6270 I->getOpcode(), WideVecTy, Group->getFactor(), Indices, 6271 Group->getAlignment(), AS); 6272 6273 if (Group->isReverse()) 6274 Cost += 6275 Group->getNumMembers() * 6276 TTI.getShuffleCost(TargetTransformInfo::SK_Reverse, VectorTy, 0); 6277 6278 // FIXME: The interleaved load group with a huge gap could be even more 6279 // expensive than scalar operations. Then we could ignore such group and 6280 // use scalar operations instead. 6281 return Cost; 6282 } 6283 6284 // Scalarized loads/stores. 6285 int ConsecutiveStride = Legal->isConsecutivePtr(Ptr); 6286 bool UseGatherOrScatter = 6287 (ConsecutiveStride == 0) && Legal->isLegalGatherOrScatter(I); 6288 6289 bool Reverse = ConsecutiveStride < 0; 6290 const DataLayout &DL = I->getModule()->getDataLayout(); 6291 uint64_t ScalarAllocatedSize = DL.getTypeAllocSize(ValTy); 6292 uint64_t VectorElementSize = DL.getTypeStoreSize(VectorTy) / VF; 6293 if ((!ConsecutiveStride && !UseGatherOrScatter) || 6294 ScalarAllocatedSize != VectorElementSize) { 6295 bool IsComplexComputation = 6296 isLikelyComplexAddressComputation(Ptr, Legal, SE, TheLoop); 6297 unsigned Cost = 0; 6298 // The cost of extracting from the value vector and pointer vector. 6299 Type *PtrTy = ToVectorTy(Ptr->getType(), VF); 6300 for (unsigned i = 0; i < VF; ++i) { 6301 // The cost of extracting the pointer operand. 6302 Cost += TTI.getVectorInstrCost(Instruction::ExtractElement, PtrTy, i); 6303 // In case of STORE, the cost of ExtractElement from the vector. 6304 // In case of LOAD, the cost of InsertElement into the returned 6305 // vector. 6306 Cost += TTI.getVectorInstrCost(SI ? Instruction::ExtractElement 6307 : Instruction::InsertElement, 6308 VectorTy, i); 6309 } 6310 6311 // The cost of the scalar loads/stores. 6312 Cost += VF * TTI.getAddressComputationCost(PtrTy, IsComplexComputation); 6313 Cost += VF * 6314 TTI.getMemoryOpCost(I->getOpcode(), ValTy->getScalarType(), 6315 Alignment, AS); 6316 return Cost; 6317 } 6318 6319 unsigned Cost = TTI.getAddressComputationCost(VectorTy); 6320 if (UseGatherOrScatter) { 6321 assert(ConsecutiveStride == 0 && 6322 "Gather/Scatter are not used for consecutive stride"); 6323 return Cost + 6324 TTI.getGatherScatterOpCost(I->getOpcode(), VectorTy, Ptr, 6325 Legal->isMaskRequired(I), Alignment); 6326 } 6327 // Wide load/stores. 6328 if (Legal->isMaskRequired(I)) 6329 Cost += 6330 TTI.getMaskedMemoryOpCost(I->getOpcode(), VectorTy, Alignment, AS); 6331 else 6332 Cost += TTI.getMemoryOpCost(I->getOpcode(), VectorTy, Alignment, AS); 6333 6334 if (Reverse) 6335 Cost += TTI.getShuffleCost(TargetTransformInfo::SK_Reverse, VectorTy, 0); 6336 return Cost; 6337 } 6338 case Instruction::ZExt: 6339 case Instruction::SExt: 6340 case Instruction::FPToUI: 6341 case Instruction::FPToSI: 6342 case Instruction::FPExt: 6343 case Instruction::PtrToInt: 6344 case Instruction::IntToPtr: 6345 case Instruction::SIToFP: 6346 case Instruction::UIToFP: 6347 case Instruction::Trunc: 6348 case Instruction::FPTrunc: 6349 case Instruction::BitCast: { 6350 // We optimize the truncation of induction variable. 6351 // The cost of these is the same as the scalar operation. 6352 if (I->getOpcode() == Instruction::Trunc && 6353 Legal->isInductionVariable(I->getOperand(0))) 6354 return TTI.getCastInstrCost(I->getOpcode(), I->getType(), 6355 I->getOperand(0)->getType()); 6356 6357 Type *SrcScalarTy = I->getOperand(0)->getType(); 6358 Type *SrcVecTy = ToVectorTy(SrcScalarTy, VF); 6359 if (VF > 1 && MinBWs.count(I)) { 6360 // This cast is going to be shrunk. This may remove the cast or it might 6361 // turn it into slightly different cast. For example, if MinBW == 16, 6362 // "zext i8 %1 to i32" becomes "zext i8 %1 to i16". 6363 // 6364 // Calculate the modified src and dest types. 6365 Type *MinVecTy = VectorTy; 6366 if (I->getOpcode() == Instruction::Trunc) { 6367 SrcVecTy = smallestIntegerVectorType(SrcVecTy, MinVecTy); 6368 VectorTy = 6369 largestIntegerVectorType(ToVectorTy(I->getType(), VF), MinVecTy); 6370 } else if (I->getOpcode() == Instruction::ZExt || 6371 I->getOpcode() == Instruction::SExt) { 6372 SrcVecTy = largestIntegerVectorType(SrcVecTy, MinVecTy); 6373 VectorTy = 6374 smallestIntegerVectorType(ToVectorTy(I->getType(), VF), MinVecTy); 6375 } 6376 } 6377 6378 return TTI.getCastInstrCost(I->getOpcode(), VectorTy, SrcVecTy); 6379 } 6380 case Instruction::Call: { 6381 bool NeedToScalarize; 6382 CallInst *CI = cast<CallInst>(I); 6383 unsigned CallCost = getVectorCallCost(CI, VF, TTI, TLI, NeedToScalarize); 6384 if (getVectorIntrinsicIDForCall(CI, TLI)) 6385 return std::min(CallCost, getVectorIntrinsicCost(CI, VF, TTI, TLI)); 6386 return CallCost; 6387 } 6388 default: { 6389 // We are scalarizing the instruction. Return the cost of the scalar 6390 // instruction, plus the cost of insert and extract into vector 6391 // elements, times the vector width. 6392 unsigned Cost = 0; 6393 6394 if (!RetTy->isVoidTy() && VF != 1) { 6395 unsigned InsCost = 6396 TTI.getVectorInstrCost(Instruction::InsertElement, VectorTy); 6397 unsigned ExtCost = 6398 TTI.getVectorInstrCost(Instruction::ExtractElement, VectorTy); 6399 6400 // The cost of inserting the results plus extracting each one of the 6401 // operands. 6402 Cost += VF * (InsCost + ExtCost * I->getNumOperands()); 6403 } 6404 6405 // The cost of executing VF copies of the scalar instruction. This opcode 6406 // is unknown. Assume that it is the same as 'mul'. 6407 Cost += VF * TTI.getArithmeticInstrCost(Instruction::Mul, VectorTy); 6408 return Cost; 6409 } 6410 } // end of switch. 6411 } 6412 6413 char LoopVectorize::ID = 0; 6414 static const char lv_name[] = "Loop Vectorization"; 6415 INITIALIZE_PASS_BEGIN(LoopVectorize, LV_NAME, lv_name, false, false) 6416 INITIALIZE_PASS_DEPENDENCY(TargetTransformInfoWrapperPass) 6417 INITIALIZE_PASS_DEPENDENCY(BasicAAWrapperPass) 6418 INITIALIZE_PASS_DEPENDENCY(AAResultsWrapperPass) 6419 INITIALIZE_PASS_DEPENDENCY(GlobalsAAWrapperPass) 6420 INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker) 6421 INITIALIZE_PASS_DEPENDENCY(BlockFrequencyInfoWrapperPass) 6422 INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass) 6423 INITIALIZE_PASS_DEPENDENCY(ScalarEvolutionWrapperPass) 6424 INITIALIZE_PASS_DEPENDENCY(LCSSAWrapperPass) 6425 INITIALIZE_PASS_DEPENDENCY(LoopInfoWrapperPass) 6426 INITIALIZE_PASS_DEPENDENCY(LoopSimplify) 6427 INITIALIZE_PASS_DEPENDENCY(LoopAccessLegacyAnalysis) 6428 INITIALIZE_PASS_DEPENDENCY(DemandedBitsWrapperPass) 6429 INITIALIZE_PASS_DEPENDENCY(OptimizationRemarkEmitterWrapperPass) 6430 INITIALIZE_PASS_END(LoopVectorize, LV_NAME, lv_name, false, false) 6431 6432 namespace llvm { 6433 Pass *createLoopVectorizePass(bool NoUnrolling, bool AlwaysVectorize) { 6434 return new LoopVectorize(NoUnrolling, AlwaysVectorize); 6435 } 6436 } 6437 6438 bool LoopVectorizationCostModel::isConsecutiveLoadOrStore(Instruction *Inst) { 6439 6440 // Check if the pointer operand of a load or store instruction is 6441 // consecutive. 6442 if (auto *Ptr = getPointerOperand(Inst)) 6443 return Legal->isConsecutivePtr(Ptr); 6444 return false; 6445 } 6446 6447 void LoopVectorizationCostModel::collectValuesToIgnore() { 6448 // Ignore ephemeral values. 6449 CodeMetrics::collectEphemeralValues(TheLoop, AC, ValuesToIgnore); 6450 6451 // Ignore type-promoting instructions we identified during reduction 6452 // detection. 6453 for (auto &Reduction : *Legal->getReductionVars()) { 6454 RecurrenceDescriptor &RedDes = Reduction.second; 6455 SmallPtrSetImpl<Instruction *> &Casts = RedDes.getCastInsts(); 6456 VecValuesToIgnore.insert(Casts.begin(), Casts.end()); 6457 } 6458 6459 // Insert values known to be scalar into VecValuesToIgnore. 6460 for (auto *BB : TheLoop->getBlocks()) 6461 for (auto &I : *BB) 6462 if (Legal->isScalarAfterVectorization(&I)) 6463 VecValuesToIgnore.insert(&I); 6464 } 6465 6466 void InnerLoopUnroller::scalarizeInstruction(Instruction *Instr, 6467 bool IfPredicateStore) { 6468 assert(!Instr->getType()->isAggregateType() && "Can't handle vectors"); 6469 // Holds vector parameters or scalars, in case of uniform vals. 6470 SmallVector<VectorParts, 4> Params; 6471 6472 setDebugLocFromInst(Builder, Instr); 6473 6474 // Find all of the vectorized parameters. 6475 for (Value *SrcOp : Instr->operands()) { 6476 // If we are accessing the old induction variable, use the new one. 6477 if (SrcOp == OldInduction) { 6478 Params.push_back(getVectorValue(SrcOp)); 6479 continue; 6480 } 6481 6482 // Try using previously calculated values. 6483 Instruction *SrcInst = dyn_cast<Instruction>(SrcOp); 6484 6485 // If the src is an instruction that appeared earlier in the basic block 6486 // then it should already be vectorized. 6487 if (SrcInst && OrigLoop->contains(SrcInst)) { 6488 assert(WidenMap.has(SrcInst) && "Source operand is unavailable"); 6489 // The parameter is a vector value from earlier. 6490 Params.push_back(WidenMap.get(SrcInst)); 6491 } else { 6492 // The parameter is a scalar from outside the loop. Maybe even a constant. 6493 VectorParts Scalars; 6494 Scalars.append(UF, SrcOp); 6495 Params.push_back(Scalars); 6496 } 6497 } 6498 6499 assert(Params.size() == Instr->getNumOperands() && 6500 "Invalid number of operands"); 6501 6502 // Does this instruction return a value ? 6503 bool IsVoidRetTy = Instr->getType()->isVoidTy(); 6504 6505 Value *UndefVec = IsVoidRetTy ? nullptr : UndefValue::get(Instr->getType()); 6506 // Create a new entry in the WidenMap and initialize it to Undef or Null. 6507 VectorParts &VecResults = WidenMap.splat(Instr, UndefVec); 6508 6509 VectorParts Cond; 6510 if (IfPredicateStore) { 6511 assert(Instr->getParent()->getSinglePredecessor() && 6512 "Only support single predecessor blocks"); 6513 Cond = createEdgeMask(Instr->getParent()->getSinglePredecessor(), 6514 Instr->getParent()); 6515 } 6516 6517 // For each vector unroll 'part': 6518 for (unsigned Part = 0; Part < UF; ++Part) { 6519 // For each scalar that we create: 6520 6521 // Start an "if (pred) a[i] = ..." block. 6522 Value *Cmp = nullptr; 6523 if (IfPredicateStore) { 6524 if (Cond[Part]->getType()->isVectorTy()) 6525 Cond[Part] = 6526 Builder.CreateExtractElement(Cond[Part], Builder.getInt32(0)); 6527 Cmp = Builder.CreateICmp(ICmpInst::ICMP_EQ, Cond[Part], 6528 ConstantInt::get(Cond[Part]->getType(), 1)); 6529 } 6530 6531 Instruction *Cloned = Instr->clone(); 6532 if (!IsVoidRetTy) 6533 Cloned->setName(Instr->getName() + ".cloned"); 6534 // Replace the operands of the cloned instructions with extracted scalars. 6535 for (unsigned op = 0, e = Instr->getNumOperands(); op != e; ++op) { 6536 Value *Op = Params[op][Part]; 6537 Cloned->setOperand(op, Op); 6538 } 6539 6540 // Place the cloned scalar in the new loop. 6541 Builder.Insert(Cloned); 6542 6543 // If we just cloned a new assumption, add it the assumption cache. 6544 if (auto *II = dyn_cast<IntrinsicInst>(Cloned)) 6545 if (II->getIntrinsicID() == Intrinsic::assume) 6546 AC->registerAssumption(II); 6547 6548 // If the original scalar returns a value we need to place it in a vector 6549 // so that future users will be able to use it. 6550 if (!IsVoidRetTy) 6551 VecResults[Part] = Cloned; 6552 6553 // End if-block. 6554 if (IfPredicateStore) 6555 PredicatedStores.push_back(std::make_pair(cast<StoreInst>(Cloned), Cmp)); 6556 } 6557 } 6558 6559 void InnerLoopUnroller::vectorizeMemoryInstruction(Instruction *Instr) { 6560 auto *SI = dyn_cast<StoreInst>(Instr); 6561 bool IfPredicateStore = (SI && Legal->blockNeedsPredication(SI->getParent())); 6562 6563 return scalarizeInstruction(Instr, IfPredicateStore); 6564 } 6565 6566 Value *InnerLoopUnroller::reverseVector(Value *Vec) { return Vec; } 6567 6568 Value *InnerLoopUnroller::getBroadcastInstrs(Value *V) { return V; } 6569 6570 Value *InnerLoopUnroller::getStepVector(Value *Val, int StartIdx, Value *Step, 6571 Instruction::BinaryOps BinOp) { 6572 // When unrolling and the VF is 1, we only need to add a simple scalar. 6573 Type *Ty = Val->getType(); 6574 assert(!Ty->isVectorTy() && "Val must be a scalar"); 6575 6576 if (Ty->isFloatingPointTy()) { 6577 Constant *C = ConstantFP::get(Ty, (double)StartIdx); 6578 6579 // Floating point operations had to be 'fast' to enable the unrolling. 6580 Value *MulOp = addFastMathFlag(Builder.CreateFMul(C, Step)); 6581 return addFastMathFlag(Builder.CreateBinOp(BinOp, Val, MulOp)); 6582 } 6583 Constant *C = ConstantInt::get(Ty, StartIdx); 6584 return Builder.CreateAdd(Val, Builder.CreateMul(C, Step), "induction"); 6585 } 6586 6587 static void AddRuntimeUnrollDisableMetaData(Loop *L) { 6588 SmallVector<Metadata *, 4> MDs; 6589 // Reserve first location for self reference to the LoopID metadata node. 6590 MDs.push_back(nullptr); 6591 bool IsUnrollMetadata = false; 6592 MDNode *LoopID = L->getLoopID(); 6593 if (LoopID) { 6594 // First find existing loop unrolling disable metadata. 6595 for (unsigned i = 1, ie = LoopID->getNumOperands(); i < ie; ++i) { 6596 auto *MD = dyn_cast<MDNode>(LoopID->getOperand(i)); 6597 if (MD) { 6598 const auto *S = dyn_cast<MDString>(MD->getOperand(0)); 6599 IsUnrollMetadata = 6600 S && S->getString().startswith("llvm.loop.unroll.disable"); 6601 } 6602 MDs.push_back(LoopID->getOperand(i)); 6603 } 6604 } 6605 6606 if (!IsUnrollMetadata) { 6607 // Add runtime unroll disable metadata. 6608 LLVMContext &Context = L->getHeader()->getContext(); 6609 SmallVector<Metadata *, 1> DisableOperands; 6610 DisableOperands.push_back( 6611 MDString::get(Context, "llvm.loop.unroll.runtime.disable")); 6612 MDNode *DisableNode = MDNode::get(Context, DisableOperands); 6613 MDs.push_back(DisableNode); 6614 MDNode *NewLoopID = MDNode::get(Context, MDs); 6615 // Set operand 0 to refer to the loop id itself. 6616 NewLoopID->replaceOperandWith(0, NewLoopID); 6617 L->setLoopID(NewLoopID); 6618 } 6619 } 6620 6621 bool LoopVectorizePass::processLoop(Loop *L) { 6622 assert(L->empty() && "Only process inner loops."); 6623 6624 #ifndef NDEBUG 6625 const std::string DebugLocStr = getDebugLocString(L); 6626 #endif /* NDEBUG */ 6627 6628 DEBUG(dbgs() << "\nLV: Checking a loop in \"" 6629 << L->getHeader()->getParent()->getName() << "\" from " 6630 << DebugLocStr << "\n"); 6631 6632 LoopVectorizeHints Hints(L, DisableUnrolling, *ORE); 6633 6634 DEBUG(dbgs() << "LV: Loop hints:" 6635 << " force=" 6636 << (Hints.getForce() == LoopVectorizeHints::FK_Disabled 6637 ? "disabled" 6638 : (Hints.getForce() == LoopVectorizeHints::FK_Enabled 6639 ? "enabled" 6640 : "?")) 6641 << " width=" << Hints.getWidth() 6642 << " unroll=" << Hints.getInterleave() << "\n"); 6643 6644 // Function containing loop 6645 Function *F = L->getHeader()->getParent(); 6646 6647 // Looking at the diagnostic output is the only way to determine if a loop 6648 // was vectorized (other than looking at the IR or machine code), so it 6649 // is important to generate an optimization remark for each loop. Most of 6650 // these messages are generated by emitOptimizationRemarkAnalysis. Remarks 6651 // generated by emitOptimizationRemark and emitOptimizationRemarkMissed are 6652 // less verbose reporting vectorized loops and unvectorized loops that may 6653 // benefit from vectorization, respectively. 6654 6655 if (!Hints.allowVectorization(F, L, AlwaysVectorize)) { 6656 DEBUG(dbgs() << "LV: Loop hints prevent vectorization.\n"); 6657 return false; 6658 } 6659 6660 // Check the loop for a trip count threshold: 6661 // do not vectorize loops with a tiny trip count. 6662 const unsigned TC = SE->getSmallConstantTripCount(L); 6663 if (TC > 0u && TC < TinyTripCountVectorThreshold) { 6664 DEBUG(dbgs() << "LV: Found a loop with a very small trip count. " 6665 << "This loop is not worth vectorizing."); 6666 if (Hints.getForce() == LoopVectorizeHints::FK_Enabled) 6667 DEBUG(dbgs() << " But vectorizing was explicitly forced.\n"); 6668 else { 6669 DEBUG(dbgs() << "\n"); 6670 emitAnalysisDiag(L, Hints, *ORE, VectorizationReport() 6671 << "vectorization is not beneficial " 6672 "and is not explicitly forced"); 6673 return false; 6674 } 6675 } 6676 6677 PredicatedScalarEvolution PSE(*SE, *L); 6678 6679 // Check if it is legal to vectorize the loop. 6680 LoopVectorizationRequirements Requirements(*ORE); 6681 LoopVectorizationLegality LVL(L, PSE, DT, TLI, AA, F, TTI, GetLAA, LI, ORE, 6682 &Requirements, &Hints); 6683 if (!LVL.canVectorize()) { 6684 DEBUG(dbgs() << "LV: Not vectorizing: Cannot prove legality.\n"); 6685 emitMissedWarning(F, L, Hints, ORE); 6686 return false; 6687 } 6688 6689 // Use the cost model. 6690 LoopVectorizationCostModel CM(L, PSE, LI, &LVL, *TTI, TLI, DB, AC, ORE, F, 6691 &Hints); 6692 CM.collectValuesToIgnore(); 6693 6694 // Check the function attributes to find out if this function should be 6695 // optimized for size. 6696 bool OptForSize = 6697 Hints.getForce() != LoopVectorizeHints::FK_Enabled && F->optForSize(); 6698 6699 // Compute the weighted frequency of this loop being executed and see if it 6700 // is less than 20% of the function entry baseline frequency. Note that we 6701 // always have a canonical loop here because we think we *can* vectorize. 6702 // FIXME: This is hidden behind a flag due to pervasive problems with 6703 // exactly what block frequency models. 6704 if (LoopVectorizeWithBlockFrequency) { 6705 BlockFrequency LoopEntryFreq = BFI->getBlockFreq(L->getLoopPreheader()); 6706 if (Hints.getForce() != LoopVectorizeHints::FK_Enabled && 6707 LoopEntryFreq < ColdEntryFreq) 6708 OptForSize = true; 6709 } 6710 6711 // Check the function attributes to see if implicit floats are allowed. 6712 // FIXME: This check doesn't seem possibly correct -- what if the loop is 6713 // an integer loop and the vector instructions selected are purely integer 6714 // vector instructions? 6715 if (F->hasFnAttribute(Attribute::NoImplicitFloat)) { 6716 DEBUG(dbgs() << "LV: Can't vectorize when the NoImplicitFloat" 6717 "attribute is used.\n"); 6718 emitAnalysisDiag( 6719 L, Hints, *ORE, 6720 VectorizationReport() 6721 << "loop not vectorized due to NoImplicitFloat attribute"); 6722 emitMissedWarning(F, L, Hints, ORE); 6723 return false; 6724 } 6725 6726 // Check if the target supports potentially unsafe FP vectorization. 6727 // FIXME: Add a check for the type of safety issue (denormal, signaling) 6728 // for the target we're vectorizing for, to make sure none of the 6729 // additional fp-math flags can help. 6730 if (Hints.isPotentiallyUnsafe() && 6731 TTI->isFPVectorizationPotentiallyUnsafe()) { 6732 DEBUG(dbgs() << "LV: Potentially unsafe FP op prevents vectorization.\n"); 6733 emitAnalysisDiag(L, Hints, *ORE, 6734 VectorizationReport() 6735 << "loop not vectorized due to unsafe FP support."); 6736 emitMissedWarning(F, L, Hints, ORE); 6737 return false; 6738 } 6739 6740 // Select the optimal vectorization factor. 6741 const LoopVectorizationCostModel::VectorizationFactor VF = 6742 CM.selectVectorizationFactor(OptForSize); 6743 6744 // Select the interleave count. 6745 unsigned IC = CM.selectInterleaveCount(OptForSize, VF.Width, VF.Cost); 6746 6747 // Get user interleave count. 6748 unsigned UserIC = Hints.getInterleave(); 6749 6750 // Identify the diagnostic messages that should be produced. 6751 std::string VecDiagMsg, IntDiagMsg; 6752 bool VectorizeLoop = true, InterleaveLoop = true; 6753 if (Requirements.doesNotMeet(F, L, Hints)) { 6754 DEBUG(dbgs() << "LV: Not vectorizing: loop did not meet vectorization " 6755 "requirements.\n"); 6756 emitMissedWarning(F, L, Hints, ORE); 6757 return false; 6758 } 6759 6760 if (VF.Width == 1) { 6761 DEBUG(dbgs() << "LV: Vectorization is possible but not beneficial.\n"); 6762 VecDiagMsg = 6763 "the cost-model indicates that vectorization is not beneficial"; 6764 VectorizeLoop = false; 6765 } 6766 6767 if (IC == 1 && UserIC <= 1) { 6768 // Tell the user interleaving is not beneficial. 6769 DEBUG(dbgs() << "LV: Interleaving is not beneficial.\n"); 6770 IntDiagMsg = 6771 "the cost-model indicates that interleaving is not beneficial"; 6772 InterleaveLoop = false; 6773 if (UserIC == 1) 6774 IntDiagMsg += 6775 " and is explicitly disabled or interleave count is set to 1"; 6776 } else if (IC > 1 && UserIC == 1) { 6777 // Tell the user interleaving is beneficial, but it explicitly disabled. 6778 DEBUG(dbgs() 6779 << "LV: Interleaving is beneficial but is explicitly disabled."); 6780 IntDiagMsg = "the cost-model indicates that interleaving is beneficial " 6781 "but is explicitly disabled or interleave count is set to 1"; 6782 InterleaveLoop = false; 6783 } 6784 6785 // Override IC if user provided an interleave count. 6786 IC = UserIC > 0 ? UserIC : IC; 6787 6788 // Emit diagnostic messages, if any. 6789 const char *VAPassName = Hints.vectorizeAnalysisPassName(); 6790 if (!VectorizeLoop && !InterleaveLoop) { 6791 // Do not vectorize or interleaving the loop. 6792 ORE->emitOptimizationRemarkAnalysis(VAPassName, L, VecDiagMsg); 6793 ORE->emitOptimizationRemarkAnalysis(LV_NAME, L, IntDiagMsg); 6794 return false; 6795 } else if (!VectorizeLoop && InterleaveLoop) { 6796 DEBUG(dbgs() << "LV: Interleave Count is " << IC << '\n'); 6797 ORE->emitOptimizationRemarkAnalysis(VAPassName, L, VecDiagMsg); 6798 } else if (VectorizeLoop && !InterleaveLoop) { 6799 DEBUG(dbgs() << "LV: Found a vectorizable loop (" << VF.Width << ") in " 6800 << DebugLocStr << '\n'); 6801 ORE->emitOptimizationRemarkAnalysis(LV_NAME, L, IntDiagMsg); 6802 } else if (VectorizeLoop && InterleaveLoop) { 6803 DEBUG(dbgs() << "LV: Found a vectorizable loop (" << VF.Width << ") in " 6804 << DebugLocStr << '\n'); 6805 DEBUG(dbgs() << "LV: Interleave Count is " << IC << '\n'); 6806 } 6807 6808 if (!VectorizeLoop) { 6809 assert(IC > 1 && "interleave count should not be 1 or 0"); 6810 // If we decided that it is not legal to vectorize the loop, then 6811 // interleave it. 6812 InnerLoopUnroller Unroller(L, PSE, LI, DT, TLI, TTI, AC, ORE, IC); 6813 Unroller.vectorize(&LVL, CM.MinBWs); 6814 6815 ORE->emitOptimizationRemark(LV_NAME, L, 6816 Twine("interleaved loop (interleaved count: ") + 6817 Twine(IC) + ")"); 6818 } else { 6819 // If we decided that it is *legal* to vectorize the loop, then do it. 6820 InnerLoopVectorizer LB(L, PSE, LI, DT, TLI, TTI, AC, ORE, VF.Width, IC); 6821 LB.vectorize(&LVL, CM.MinBWs); 6822 ++LoopsVectorized; 6823 6824 // Add metadata to disable runtime unrolling a scalar loop when there are 6825 // no runtime checks about strides and memory. A scalar loop that is 6826 // rarely used is not worth unrolling. 6827 if (!LB.areSafetyChecksAdded()) 6828 AddRuntimeUnrollDisableMetaData(L); 6829 6830 // Report the vectorization decision. 6831 ORE->emitOptimizationRemark( 6832 LV_NAME, L, Twine("vectorized loop (vectorization width: ") + 6833 Twine(VF.Width) + ", interleaved count: " + Twine(IC) + 6834 ")"); 6835 } 6836 6837 // Mark the loop as already vectorized to avoid vectorizing again. 6838 Hints.setAlreadyVectorized(); 6839 6840 DEBUG(verifyFunction(*L->getHeader()->getParent())); 6841 return true; 6842 } 6843 6844 bool LoopVectorizePass::runImpl( 6845 Function &F, ScalarEvolution &SE_, LoopInfo &LI_, TargetTransformInfo &TTI_, 6846 DominatorTree &DT_, BlockFrequencyInfo &BFI_, TargetLibraryInfo *TLI_, 6847 DemandedBits &DB_, AliasAnalysis &AA_, AssumptionCache &AC_, 6848 std::function<const LoopAccessInfo &(Loop &)> &GetLAA_, 6849 OptimizationRemarkEmitter &ORE_) { 6850 6851 SE = &SE_; 6852 LI = &LI_; 6853 TTI = &TTI_; 6854 DT = &DT_; 6855 BFI = &BFI_; 6856 TLI = TLI_; 6857 AA = &AA_; 6858 AC = &AC_; 6859 GetLAA = &GetLAA_; 6860 DB = &DB_; 6861 ORE = &ORE_; 6862 6863 // Compute some weights outside of the loop over the loops. Compute this 6864 // using a BranchProbability to re-use its scaling math. 6865 const BranchProbability ColdProb(1, 5); // 20% 6866 ColdEntryFreq = BlockFrequency(BFI->getEntryFreq()) * ColdProb; 6867 6868 // Don't attempt if 6869 // 1. the target claims to have no vector registers, and 6870 // 2. interleaving won't help ILP. 6871 // 6872 // The second condition is necessary because, even if the target has no 6873 // vector registers, loop vectorization may still enable scalar 6874 // interleaving. 6875 if (!TTI->getNumberOfRegisters(true) && TTI->getMaxInterleaveFactor(1) < 2) 6876 return false; 6877 6878 // Build up a worklist of inner-loops to vectorize. This is necessary as 6879 // the act of vectorizing or partially unrolling a loop creates new loops 6880 // and can invalidate iterators across the loops. 6881 SmallVector<Loop *, 8> Worklist; 6882 6883 for (Loop *L : *LI) 6884 addAcyclicInnerLoop(*L, Worklist); 6885 6886 LoopsAnalyzed += Worklist.size(); 6887 6888 // Now walk the identified inner loops. 6889 bool Changed = false; 6890 while (!Worklist.empty()) 6891 Changed |= processLoop(Worklist.pop_back_val()); 6892 6893 // Process each loop nest in the function. 6894 return Changed; 6895 6896 } 6897 6898 6899 PreservedAnalyses LoopVectorizePass::run(Function &F, 6900 FunctionAnalysisManager &AM) { 6901 auto &SE = AM.getResult<ScalarEvolutionAnalysis>(F); 6902 auto &LI = AM.getResult<LoopAnalysis>(F); 6903 auto &TTI = AM.getResult<TargetIRAnalysis>(F); 6904 auto &DT = AM.getResult<DominatorTreeAnalysis>(F); 6905 auto &BFI = AM.getResult<BlockFrequencyAnalysis>(F); 6906 auto *TLI = AM.getCachedResult<TargetLibraryAnalysis>(F); 6907 auto &AA = AM.getResult<AAManager>(F); 6908 auto &AC = AM.getResult<AssumptionAnalysis>(F); 6909 auto &DB = AM.getResult<DemandedBitsAnalysis>(F); 6910 auto &ORE = AM.getResult<OptimizationRemarkEmitterAnalysis>(F); 6911 6912 auto &LAM = AM.getResult<LoopAnalysisManagerFunctionProxy>(F).getManager(); 6913 std::function<const LoopAccessInfo &(Loop &)> GetLAA = 6914 [&](Loop &L) -> const LoopAccessInfo & { 6915 return LAM.getResult<LoopAccessAnalysis>(L); 6916 }; 6917 bool Changed = 6918 runImpl(F, SE, LI, TTI, DT, BFI, TLI, DB, AA, AC, GetLAA, ORE); 6919 if (!Changed) 6920 return PreservedAnalyses::all(); 6921 PreservedAnalyses PA; 6922 PA.preserve<LoopAnalysis>(); 6923 PA.preserve<DominatorTreeAnalysis>(); 6924 PA.preserve<BasicAA>(); 6925 PA.preserve<GlobalsAA>(); 6926 return PA; 6927 } 6928