1 //===- LoopVectorize.cpp - A Loop Vectorizer ------------------------------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // This is the LLVM loop vectorizer. This pass modifies 'vectorizable' loops 11 // and generates target-independent LLVM-IR. 12 // The vectorizer uses the TargetTransformInfo analysis to estimate the costs 13 // of instructions in order to estimate the profitability of vectorization. 14 // 15 // The loop vectorizer combines consecutive loop iterations into a single 16 // 'wide' iteration. After this transformation the index is incremented 17 // by the SIMD vector width, and not by one. 18 // 19 // This pass has three parts: 20 // 1. The main loop pass that drives the different parts. 21 // 2. LoopVectorizationLegality - A unit that checks for the legality 22 // of the vectorization. 23 // 3. InnerLoopVectorizer - A unit that performs the actual 24 // widening of instructions. 25 // 4. LoopVectorizationCostModel - A unit that checks for the profitability 26 // of vectorization. It decides on the optimal vector width, which 27 // can be one, if vectorization is not profitable. 28 // 29 //===----------------------------------------------------------------------===// 30 // 31 // The reduction-variable vectorization is based on the paper: 32 // D. Nuzman and R. Henderson. Multi-platform Auto-vectorization. 33 // 34 // Variable uniformity checks are inspired by: 35 // Karrenberg, R. and Hack, S. Whole Function Vectorization. 36 // 37 // The interleaved access vectorization is based on the paper: 38 // Dorit Nuzman, Ira Rosen and Ayal Zaks. Auto-Vectorization of Interleaved 39 // Data for SIMD 40 // 41 // Other ideas/concepts are from: 42 // A. Zaks and D. Nuzman. Autovectorization in GCC-two years later. 43 // 44 // S. Maleki, Y. Gao, M. Garzaran, T. Wong and D. Padua. An Evaluation of 45 // Vectorizing Compilers. 46 // 47 //===----------------------------------------------------------------------===// 48 49 #include "llvm/Transforms/Vectorize/LoopVectorize.h" 50 #include "llvm/ADT/DenseMap.h" 51 #include "llvm/ADT/Hashing.h" 52 #include "llvm/ADT/MapVector.h" 53 #include "llvm/ADT/SCCIterator.h" 54 #include "llvm/ADT/SetVector.h" 55 #include "llvm/ADT/SmallPtrSet.h" 56 #include "llvm/ADT/SmallSet.h" 57 #include "llvm/ADT/SmallVector.h" 58 #include "llvm/ADT/Statistic.h" 59 #include "llvm/ADT/StringExtras.h" 60 #include "llvm/Analysis/CodeMetrics.h" 61 #include "llvm/Analysis/GlobalsModRef.h" 62 #include "llvm/Analysis/LoopInfo.h" 63 #include "llvm/Analysis/LoopIterator.h" 64 #include "llvm/Analysis/LoopPass.h" 65 #include "llvm/Analysis/ScalarEvolutionExpander.h" 66 #include "llvm/Analysis/ScalarEvolutionExpressions.h" 67 #include "llvm/Analysis/ValueTracking.h" 68 #include "llvm/Analysis/VectorUtils.h" 69 #include "llvm/IR/Constants.h" 70 #include "llvm/IR/DataLayout.h" 71 #include "llvm/IR/DebugInfo.h" 72 #include "llvm/IR/DerivedTypes.h" 73 #include "llvm/IR/DiagnosticInfo.h" 74 #include "llvm/IR/Dominators.h" 75 #include "llvm/IR/Function.h" 76 #include "llvm/IR/IRBuilder.h" 77 #include "llvm/IR/Instructions.h" 78 #include "llvm/IR/IntrinsicInst.h" 79 #include "llvm/IR/LLVMContext.h" 80 #include "llvm/IR/Module.h" 81 #include "llvm/IR/PatternMatch.h" 82 #include "llvm/IR/Type.h" 83 #include "llvm/IR/Value.h" 84 #include "llvm/IR/ValueHandle.h" 85 #include "llvm/IR/Verifier.h" 86 #include "llvm/Pass.h" 87 #include "llvm/Support/BranchProbability.h" 88 #include "llvm/Support/CommandLine.h" 89 #include "llvm/Support/Debug.h" 90 #include "llvm/Support/raw_ostream.h" 91 #include "llvm/Transforms/Scalar.h" 92 #include "llvm/Transforms/Utils/BasicBlockUtils.h" 93 #include "llvm/Transforms/Utils/Local.h" 94 #include "llvm/Transforms/Utils/LoopUtils.h" 95 #include "llvm/Transforms/Utils/LoopVersioning.h" 96 #include "llvm/Transforms/Vectorize.h" 97 #include <algorithm> 98 #include <map> 99 #include <tuple> 100 101 using namespace llvm; 102 using namespace llvm::PatternMatch; 103 104 #define LV_NAME "loop-vectorize" 105 #define DEBUG_TYPE LV_NAME 106 107 STATISTIC(LoopsVectorized, "Number of loops vectorized"); 108 STATISTIC(LoopsAnalyzed, "Number of loops analyzed for vectorization"); 109 110 static cl::opt<bool> 111 EnableIfConversion("enable-if-conversion", cl::init(true), cl::Hidden, 112 cl::desc("Enable if-conversion during vectorization.")); 113 114 /// We don't vectorize loops with a known constant trip count below this number. 115 static cl::opt<unsigned> TinyTripCountVectorThreshold( 116 "vectorizer-min-trip-count", cl::init(16), cl::Hidden, 117 cl::desc("Don't vectorize loops with a constant " 118 "trip count that is smaller than this " 119 "value.")); 120 121 static cl::opt<bool> MaximizeBandwidth( 122 "vectorizer-maximize-bandwidth", cl::init(false), cl::Hidden, 123 cl::desc("Maximize bandwidth when selecting vectorization factor which " 124 "will be determined by the smallest type in loop.")); 125 126 static cl::opt<bool> EnableInterleavedMemAccesses( 127 "enable-interleaved-mem-accesses", cl::init(false), cl::Hidden, 128 cl::desc("Enable vectorization on interleaved memory accesses in a loop")); 129 130 /// Maximum factor for an interleaved memory access. 131 static cl::opt<unsigned> MaxInterleaveGroupFactor( 132 "max-interleave-group-factor", cl::Hidden, 133 cl::desc("Maximum factor for an interleaved access group (default = 8)"), 134 cl::init(8)); 135 136 /// We don't interleave loops with a known constant trip count below this 137 /// number. 138 static const unsigned TinyTripCountInterleaveThreshold = 128; 139 140 static cl::opt<unsigned> ForceTargetNumScalarRegs( 141 "force-target-num-scalar-regs", cl::init(0), cl::Hidden, 142 cl::desc("A flag that overrides the target's number of scalar registers.")); 143 144 static cl::opt<unsigned> ForceTargetNumVectorRegs( 145 "force-target-num-vector-regs", cl::init(0), cl::Hidden, 146 cl::desc("A flag that overrides the target's number of vector registers.")); 147 148 /// Maximum vectorization interleave count. 149 static const unsigned MaxInterleaveFactor = 16; 150 151 static cl::opt<unsigned> ForceTargetMaxScalarInterleaveFactor( 152 "force-target-max-scalar-interleave", cl::init(0), cl::Hidden, 153 cl::desc("A flag that overrides the target's max interleave factor for " 154 "scalar loops.")); 155 156 static cl::opt<unsigned> ForceTargetMaxVectorInterleaveFactor( 157 "force-target-max-vector-interleave", cl::init(0), cl::Hidden, 158 cl::desc("A flag that overrides the target's max interleave factor for " 159 "vectorized loops.")); 160 161 static cl::opt<unsigned> ForceTargetInstructionCost( 162 "force-target-instruction-cost", cl::init(0), cl::Hidden, 163 cl::desc("A flag that overrides the target's expected cost for " 164 "an instruction to a single constant value. Mostly " 165 "useful for getting consistent testing.")); 166 167 static cl::opt<unsigned> SmallLoopCost( 168 "small-loop-cost", cl::init(20), cl::Hidden, 169 cl::desc( 170 "The cost of a loop that is considered 'small' by the interleaver.")); 171 172 static cl::opt<bool> LoopVectorizeWithBlockFrequency( 173 "loop-vectorize-with-block-frequency", cl::init(false), cl::Hidden, 174 cl::desc("Enable the use of the block frequency analysis to access PGO " 175 "heuristics minimizing code growth in cold regions and being more " 176 "aggressive in hot regions.")); 177 178 // Runtime interleave loops for load/store throughput. 179 static cl::opt<bool> EnableLoadStoreRuntimeInterleave( 180 "enable-loadstore-runtime-interleave", cl::init(true), cl::Hidden, 181 cl::desc( 182 "Enable runtime interleaving until load/store ports are saturated")); 183 184 /// The number of stores in a loop that are allowed to need predication. 185 static cl::opt<unsigned> NumberOfStoresToPredicate( 186 "vectorize-num-stores-pred", cl::init(1), cl::Hidden, 187 cl::desc("Max number of stores to be predicated behind an if.")); 188 189 static cl::opt<bool> EnableIndVarRegisterHeur( 190 "enable-ind-var-reg-heur", cl::init(true), cl::Hidden, 191 cl::desc("Count the induction variable only once when interleaving")); 192 193 static cl::opt<bool> EnableCondStoresVectorization( 194 "enable-cond-stores-vec", cl::init(false), cl::Hidden, 195 cl::desc("Enable if predication of stores during vectorization.")); 196 197 static cl::opt<unsigned> MaxNestedScalarReductionIC( 198 "max-nested-scalar-reduction-interleave", cl::init(2), cl::Hidden, 199 cl::desc("The maximum interleave count to use when interleaving a scalar " 200 "reduction in a nested loop.")); 201 202 static cl::opt<unsigned> PragmaVectorizeMemoryCheckThreshold( 203 "pragma-vectorize-memory-check-threshold", cl::init(128), cl::Hidden, 204 cl::desc("The maximum allowed number of runtime memory checks with a " 205 "vectorize(enable) pragma.")); 206 207 static cl::opt<unsigned> VectorizeSCEVCheckThreshold( 208 "vectorize-scev-check-threshold", cl::init(16), cl::Hidden, 209 cl::desc("The maximum number of SCEV checks allowed.")); 210 211 static cl::opt<unsigned> PragmaVectorizeSCEVCheckThreshold( 212 "pragma-vectorize-scev-check-threshold", cl::init(128), cl::Hidden, 213 cl::desc("The maximum number of SCEV checks allowed with a " 214 "vectorize(enable) pragma")); 215 216 /// Create an analysis remark that explains why vectorization failed 217 /// 218 /// \p PassName is the name of the pass (e.g. can be AlwaysPrint). \p 219 /// RemarkName is the identifier for the remark. If \p I is passed it is an 220 /// instruction that prevents vectorization. Otherwise \p TheLoop is used for 221 /// the location of the remark. \return the remark object that can be 222 /// streamed to. 223 static OptimizationRemarkAnalysis 224 createMissedAnalysis(const char *PassName, StringRef RemarkName, Loop *TheLoop, 225 Instruction *I = nullptr) { 226 Value *CodeRegion = TheLoop->getHeader(); 227 DebugLoc DL = TheLoop->getStartLoc(); 228 229 if (I) { 230 CodeRegion = I->getParent(); 231 // If there is no debug location attached to the instruction, revert back to 232 // using the loop's. 233 if (I->getDebugLoc()) 234 DL = I->getDebugLoc(); 235 } 236 237 OptimizationRemarkAnalysis R(PassName, RemarkName, DL, CodeRegion); 238 R << "loop not vectorized: "; 239 return R; 240 } 241 242 namespace { 243 244 // Forward declarations. 245 class LoopVectorizeHints; 246 class LoopVectorizationLegality; 247 class LoopVectorizationCostModel; 248 class LoopVectorizationRequirements; 249 250 /// Returns true if the given loop body has a cycle, excluding the loop 251 /// itself. 252 static bool hasCyclesInLoopBody(const Loop &L) { 253 if (!L.empty()) 254 return true; 255 256 for (const auto &SCC : 257 make_range(scc_iterator<Loop, LoopBodyTraits>::begin(L), 258 scc_iterator<Loop, LoopBodyTraits>::end(L))) { 259 if (SCC.size() > 1) { 260 DEBUG(dbgs() << "LVL: Detected a cycle in the loop body:\n"); 261 DEBUG(L.dump()); 262 return true; 263 } 264 } 265 return false; 266 } 267 268 /// \brief This modifies LoopAccessReport to initialize message with 269 /// loop-vectorizer-specific part. 270 class VectorizationReport : public LoopAccessReport { 271 public: 272 VectorizationReport(Instruction *I = nullptr) 273 : LoopAccessReport("loop not vectorized: ", I) {} 274 275 /// \brief This allows promotion of the loop-access analysis report into the 276 /// loop-vectorizer report. It modifies the message to add the 277 /// loop-vectorizer-specific part of the message. 278 explicit VectorizationReport(const LoopAccessReport &R) 279 : LoopAccessReport(Twine("loop not vectorized: ") + R.str(), 280 R.getInstr()) {} 281 }; 282 283 /// A helper function for converting Scalar types to vector types. 284 /// If the incoming type is void, we return void. If the VF is 1, we return 285 /// the scalar type. 286 static Type *ToVectorTy(Type *Scalar, unsigned VF) { 287 if (Scalar->isVoidTy() || VF == 1) 288 return Scalar; 289 return VectorType::get(Scalar, VF); 290 } 291 292 /// A helper function that returns GEP instruction and knows to skip a 293 /// 'bitcast'. The 'bitcast' may be skipped if the source and the destination 294 /// pointee types of the 'bitcast' have the same size. 295 /// For example: 296 /// bitcast double** %var to i64* - can be skipped 297 /// bitcast double** %var to i8* - can not 298 static GetElementPtrInst *getGEPInstruction(Value *Ptr) { 299 300 if (isa<GetElementPtrInst>(Ptr)) 301 return cast<GetElementPtrInst>(Ptr); 302 303 if (isa<BitCastInst>(Ptr) && 304 isa<GetElementPtrInst>(cast<BitCastInst>(Ptr)->getOperand(0))) { 305 Type *BitcastTy = Ptr->getType(); 306 Type *GEPTy = cast<BitCastInst>(Ptr)->getSrcTy(); 307 if (!isa<PointerType>(BitcastTy) || !isa<PointerType>(GEPTy)) 308 return nullptr; 309 Type *Pointee1Ty = cast<PointerType>(BitcastTy)->getPointerElementType(); 310 Type *Pointee2Ty = cast<PointerType>(GEPTy)->getPointerElementType(); 311 const DataLayout &DL = cast<BitCastInst>(Ptr)->getModule()->getDataLayout(); 312 if (DL.getTypeSizeInBits(Pointee1Ty) == DL.getTypeSizeInBits(Pointee2Ty)) 313 return cast<GetElementPtrInst>(cast<BitCastInst>(Ptr)->getOperand(0)); 314 } 315 return nullptr; 316 } 317 318 /// A helper function that returns the pointer operand of a load or store 319 /// instruction. 320 static Value *getPointerOperand(Value *I) { 321 if (auto *LI = dyn_cast<LoadInst>(I)) 322 return LI->getPointerOperand(); 323 if (auto *SI = dyn_cast<StoreInst>(I)) 324 return SI->getPointerOperand(); 325 return nullptr; 326 } 327 328 /// A helper function that returns true if the given type is irregular. The 329 /// type is irregular if its allocated size doesn't equal the store size of an 330 /// element of the corresponding vector type at the given vectorization factor. 331 static bool hasIrregularType(Type *Ty, const DataLayout &DL, unsigned VF) { 332 333 // Determine if an array of VF elements of type Ty is "bitcast compatible" 334 // with a <VF x Ty> vector. 335 if (VF > 1) { 336 auto *VectorTy = VectorType::get(Ty, VF); 337 return VF * DL.getTypeAllocSize(Ty) != DL.getTypeStoreSize(VectorTy); 338 } 339 340 // If the vectorization factor is one, we just check if an array of type Ty 341 // requires padding between elements. 342 return DL.getTypeAllocSizeInBits(Ty) != DL.getTypeSizeInBits(Ty); 343 } 344 345 /// A helper function that returns the reciprocal of the block probability of 346 /// predicated blocks. If we return X, we are assuming the predicated block 347 /// will execute once for for every X iterations of the loop header. 348 /// 349 /// TODO: We should use actual block probability here, if available. Currently, 350 /// we always assume predicated blocks have a 50% chance of executing. 351 static unsigned getReciprocalPredBlockProb() { return 2; } 352 353 /// InnerLoopVectorizer vectorizes loops which contain only one basic 354 /// block to a specified vectorization factor (VF). 355 /// This class performs the widening of scalars into vectors, or multiple 356 /// scalars. This class also implements the following features: 357 /// * It inserts an epilogue loop for handling loops that don't have iteration 358 /// counts that are known to be a multiple of the vectorization factor. 359 /// * It handles the code generation for reduction variables. 360 /// * Scalarization (implementation using scalars) of un-vectorizable 361 /// instructions. 362 /// InnerLoopVectorizer does not perform any vectorization-legality 363 /// checks, and relies on the caller to check for the different legality 364 /// aspects. The InnerLoopVectorizer relies on the 365 /// LoopVectorizationLegality class to provide information about the induction 366 /// and reduction variables that were found to a given vectorization factor. 367 class InnerLoopVectorizer { 368 public: 369 InnerLoopVectorizer(Loop *OrigLoop, PredicatedScalarEvolution &PSE, 370 LoopInfo *LI, DominatorTree *DT, 371 const TargetLibraryInfo *TLI, 372 const TargetTransformInfo *TTI, AssumptionCache *AC, 373 OptimizationRemarkEmitter *ORE, unsigned VecWidth, 374 unsigned UnrollFactor, LoopVectorizationLegality *LVL, 375 LoopVectorizationCostModel *CM) 376 : OrigLoop(OrigLoop), PSE(PSE), LI(LI), DT(DT), TLI(TLI), TTI(TTI), 377 AC(AC), ORE(ORE), VF(VecWidth), UF(UnrollFactor), 378 Builder(PSE.getSE()->getContext()), Induction(nullptr), 379 OldInduction(nullptr), VectorLoopValueMap(UnrollFactor, VecWidth), 380 TripCount(nullptr), VectorTripCount(nullptr), Legal(LVL), Cost(CM), 381 AddedSafetyChecks(false) {} 382 383 // Perform the actual loop widening (vectorization). 384 void vectorize() { 385 // Create a new empty loop. Unlink the old loop and connect the new one. 386 createEmptyLoop(); 387 // Widen each instruction in the old loop to a new one in the new loop. 388 vectorizeLoop(); 389 } 390 391 // Return true if any runtime check is added. 392 bool areSafetyChecksAdded() { return AddedSafetyChecks; } 393 394 virtual ~InnerLoopVectorizer() {} 395 396 protected: 397 /// A small list of PHINodes. 398 typedef SmallVector<PHINode *, 4> PhiVector; 399 400 /// A type for vectorized values in the new loop. Each value from the 401 /// original loop, when vectorized, is represented by UF vector values in the 402 /// new unrolled loop, where UF is the unroll factor. 403 typedef SmallVector<Value *, 2> VectorParts; 404 405 /// A type for scalarized values in the new loop. Each value from the 406 /// original loop, when scalarized, is represented by UF x VF scalar values 407 /// in the new unrolled loop, where UF is the unroll factor and VF is the 408 /// vectorization factor. 409 typedef SmallVector<SmallVector<Value *, 4>, 2> ScalarParts; 410 411 // When we if-convert we need to create edge masks. We have to cache values 412 // so that we don't end up with exponential recursion/IR. 413 typedef DenseMap<std::pair<BasicBlock *, BasicBlock *>, VectorParts> 414 EdgeMaskCache; 415 416 /// Create an empty loop, based on the loop ranges of the old loop. 417 void createEmptyLoop(); 418 419 /// Set up the values of the IVs correctly when exiting the vector loop. 420 void fixupIVUsers(PHINode *OrigPhi, const InductionDescriptor &II, 421 Value *CountRoundDown, Value *EndValue, 422 BasicBlock *MiddleBlock); 423 424 /// Create a new induction variable inside L. 425 PHINode *createInductionVariable(Loop *L, Value *Start, Value *End, 426 Value *Step, Instruction *DL); 427 /// Copy and widen the instructions from the old loop. 428 virtual void vectorizeLoop(); 429 430 /// Fix a first-order recurrence. This is the second phase of vectorizing 431 /// this phi node. 432 void fixFirstOrderRecurrence(PHINode *Phi); 433 434 /// \brief The Loop exit block may have single value PHI nodes where the 435 /// incoming value is 'Undef'. While vectorizing we only handled real values 436 /// that were defined inside the loop. Here we fix the 'undef case'. 437 /// See PR14725. 438 void fixLCSSAPHIs(); 439 440 /// Iteratively sink the scalarized operands of a predicated instruction into 441 /// the block that was created for it. 442 void sinkScalarOperands(Instruction *PredInst); 443 444 /// Predicate conditional instructions that require predication on their 445 /// respective conditions. 446 void predicateInstructions(); 447 448 /// Collect the instructions from the original loop that would be trivially 449 /// dead in the vectorized loop if generated. 450 void collectTriviallyDeadInstructions(); 451 452 /// Shrinks vector element sizes to the smallest bitwidth they can be legally 453 /// represented as. 454 void truncateToMinimalBitwidths(); 455 456 /// A helper function that computes the predicate of the block BB, assuming 457 /// that the header block of the loop is set to True. It returns the *entry* 458 /// mask for the block BB. 459 VectorParts createBlockInMask(BasicBlock *BB); 460 /// A helper function that computes the predicate of the edge between SRC 461 /// and DST. 462 VectorParts createEdgeMask(BasicBlock *Src, BasicBlock *Dst); 463 464 /// A helper function to vectorize a single BB within the innermost loop. 465 void vectorizeBlockInLoop(BasicBlock *BB, PhiVector *PV); 466 467 /// Vectorize a single PHINode in a block. This method handles the induction 468 /// variable canonicalization. It supports both VF = 1 for unrolled loops and 469 /// arbitrary length vectors. 470 void widenPHIInstruction(Instruction *PN, unsigned UF, unsigned VF, 471 PhiVector *PV); 472 473 /// Insert the new loop to the loop hierarchy and pass manager 474 /// and update the analysis passes. 475 void updateAnalysis(); 476 477 /// This instruction is un-vectorizable. Implement it as a sequence 478 /// of scalars. If \p IfPredicateInstr is true we need to 'hide' each 479 /// scalarized instruction behind an if block predicated on the control 480 /// dependence of the instruction. 481 virtual void scalarizeInstruction(Instruction *Instr, 482 bool IfPredicateInstr = false); 483 484 /// Vectorize Load and Store instructions, 485 virtual void vectorizeMemoryInstruction(Instruction *Instr); 486 487 /// Create a broadcast instruction. This method generates a broadcast 488 /// instruction (shuffle) for loop invariant values and for the induction 489 /// value. If this is the induction variable then we extend it to N, N+1, ... 490 /// this is needed because each iteration in the loop corresponds to a SIMD 491 /// element. 492 virtual Value *getBroadcastInstrs(Value *V); 493 494 /// This function adds (StartIdx, StartIdx + Step, StartIdx + 2*Step, ...) 495 /// to each vector element of Val. The sequence starts at StartIndex. 496 /// \p Opcode is relevant for FP induction variable. 497 virtual Value *getStepVector(Value *Val, int StartIdx, Value *Step, 498 Instruction::BinaryOps Opcode = 499 Instruction::BinaryOpsEnd); 500 501 /// Compute scalar induction steps. \p ScalarIV is the scalar induction 502 /// variable on which to base the steps, \p Step is the size of the step, and 503 /// \p EntryVal is the value from the original loop that maps to the steps. 504 /// Note that \p EntryVal doesn't have to be an induction variable (e.g., it 505 /// can be a truncate instruction). 506 void buildScalarSteps(Value *ScalarIV, Value *Step, Value *EntryVal); 507 508 /// Create a vector induction phi node based on an existing scalar one. This 509 /// currently only works for integer induction variables with a constant 510 /// step. \p EntryVal is the value from the original loop that maps to the 511 /// vector phi node. If \p EntryVal is a truncate instruction, instead of 512 /// widening the original IV, we widen a version of the IV truncated to \p 513 /// EntryVal's type. 514 void createVectorIntInductionPHI(const InductionDescriptor &II, 515 Instruction *EntryVal); 516 517 /// Widen an integer induction variable \p IV. If \p Trunc is provided, the 518 /// induction variable will first be truncated to the corresponding type. 519 void widenIntInduction(PHINode *IV, TruncInst *Trunc = nullptr); 520 521 /// Returns true if we should generate a scalar version of \p IV. 522 bool needsScalarInduction(Instruction *IV) const; 523 524 /// Return a constant reference to the VectorParts corresponding to \p V from 525 /// the original loop. If the value has already been vectorized, the 526 /// corresponding vector entry in VectorLoopValueMap is returned. If, 527 /// however, the value has a scalar entry in VectorLoopValueMap, we construct 528 /// new vector values on-demand by inserting the scalar values into vectors 529 /// with an insertelement sequence. If the value has been neither vectorized 530 /// nor scalarized, it must be loop invariant, so we simply broadcast the 531 /// value into vectors. 532 const VectorParts &getVectorValue(Value *V); 533 534 /// Return a value in the new loop corresponding to \p V from the original 535 /// loop at unroll index \p Part and vector index \p Lane. If the value has 536 /// been vectorized but not scalarized, the necessary extractelement 537 /// instruction will be generated. 538 Value *getScalarValue(Value *V, unsigned Part, unsigned Lane); 539 540 /// Try to vectorize the interleaved access group that \p Instr belongs to. 541 void vectorizeInterleaveGroup(Instruction *Instr); 542 543 /// Generate a shuffle sequence that will reverse the vector Vec. 544 virtual Value *reverseVector(Value *Vec); 545 546 /// Returns (and creates if needed) the original loop trip count. 547 Value *getOrCreateTripCount(Loop *NewLoop); 548 549 /// Returns (and creates if needed) the trip count of the widened loop. 550 Value *getOrCreateVectorTripCount(Loop *NewLoop); 551 552 /// Emit a bypass check to see if the trip count would overflow, or we 553 /// wouldn't have enough iterations to execute one vector loop. 554 void emitMinimumIterationCountCheck(Loop *L, BasicBlock *Bypass); 555 /// Emit a bypass check to see if the vector trip count is nonzero. 556 void emitVectorLoopEnteredCheck(Loop *L, BasicBlock *Bypass); 557 /// Emit a bypass check to see if all of the SCEV assumptions we've 558 /// had to make are correct. 559 void emitSCEVChecks(Loop *L, BasicBlock *Bypass); 560 /// Emit bypass checks to check any memory assumptions we may have made. 561 void emitMemRuntimeChecks(Loop *L, BasicBlock *Bypass); 562 563 /// Add additional metadata to \p To that was not present on \p Orig. 564 /// 565 /// Currently this is used to add the noalias annotations based on the 566 /// inserted memchecks. Use this for instructions that are *cloned* into the 567 /// vector loop. 568 void addNewMetadata(Instruction *To, const Instruction *Orig); 569 570 /// Add metadata from one instruction to another. 571 /// 572 /// This includes both the original MDs from \p From and additional ones (\see 573 /// addNewMetadata). Use this for *newly created* instructions in the vector 574 /// loop. 575 void addMetadata(Instruction *To, Instruction *From); 576 577 /// \brief Similar to the previous function but it adds the metadata to a 578 /// vector of instructions. 579 void addMetadata(ArrayRef<Value *> To, Instruction *From); 580 581 /// This is a helper class for maintaining vectorization state. It's used for 582 /// mapping values from the original loop to their corresponding values in 583 /// the new loop. Two mappings are maintained: one for vectorized values and 584 /// one for scalarized values. Vectorized values are represented with UF 585 /// vector values in the new loop, and scalarized values are represented with 586 /// UF x VF scalar values in the new loop. UF and VF are the unroll and 587 /// vectorization factors, respectively. 588 /// 589 /// Entries can be added to either map with initVector and initScalar, which 590 /// initialize and return a constant reference to the new entry. If a 591 /// non-constant reference to a vector entry is required, getVector can be 592 /// used to retrieve a mutable entry. We currently directly modify the mapped 593 /// values during "fix-up" operations that occur once the first phase of 594 /// widening is complete. These operations include type truncation and the 595 /// second phase of recurrence widening. 596 /// 597 /// Otherwise, entries from either map should be accessed using the 598 /// getVectorValue or getScalarValue functions from InnerLoopVectorizer. 599 /// getVectorValue and getScalarValue coordinate to generate a vector or 600 /// scalar value on-demand if one is not yet available. When vectorizing a 601 /// loop, we visit the definition of an instruction before its uses. When 602 /// visiting the definition, we either vectorize or scalarize the 603 /// instruction, creating an entry for it in the corresponding map. (In some 604 /// cases, such as induction variables, we will create both vector and scalar 605 /// entries.) Then, as we encounter uses of the definition, we derive values 606 /// for each scalar or vector use unless such a value is already available. 607 /// For example, if we scalarize a definition and one of its uses is vector, 608 /// we build the required vector on-demand with an insertelement sequence 609 /// when visiting the use. Otherwise, if the use is scalar, we can use the 610 /// existing scalar definition. 611 struct ValueMap { 612 613 /// Construct an empty map with the given unroll and vectorization factors. 614 ValueMap(unsigned UnrollFactor, unsigned VecWidth) 615 : UF(UnrollFactor), VF(VecWidth) { 616 // The unroll and vectorization factors are only used in asserts builds 617 // to verify map entries are sized appropriately. 618 (void)UF; 619 (void)VF; 620 } 621 622 /// \return True if the map has a vector entry for \p Key. 623 bool hasVector(Value *Key) const { return VectorMapStorage.count(Key); } 624 625 /// \return True if the map has a scalar entry for \p Key. 626 bool hasScalar(Value *Key) const { return ScalarMapStorage.count(Key); } 627 628 /// \brief Map \p Key to the given VectorParts \p Entry, and return a 629 /// constant reference to the new vector map entry. The given key should 630 /// not already be in the map, and the given VectorParts should be 631 /// correctly sized for the current unroll factor. 632 const VectorParts &initVector(Value *Key, const VectorParts &Entry) { 633 assert(!hasVector(Key) && "Vector entry already initialized"); 634 assert(Entry.size() == UF && "VectorParts has wrong dimensions"); 635 VectorMapStorage[Key] = Entry; 636 return VectorMapStorage[Key]; 637 } 638 639 /// \brief Map \p Key to the given ScalarParts \p Entry, and return a 640 /// constant reference to the new scalar map entry. The given key should 641 /// not already be in the map, and the given ScalarParts should be 642 /// correctly sized for the current unroll and vectorization factors. 643 const ScalarParts &initScalar(Value *Key, const ScalarParts &Entry) { 644 assert(!hasScalar(Key) && "Scalar entry already initialized"); 645 assert(Entry.size() == UF && 646 all_of(make_range(Entry.begin(), Entry.end()), 647 [&](const SmallVectorImpl<Value *> &Values) -> bool { 648 return Values.size() == VF; 649 }) && 650 "ScalarParts has wrong dimensions"); 651 ScalarMapStorage[Key] = Entry; 652 return ScalarMapStorage[Key]; 653 } 654 655 /// \return A reference to the vector map entry corresponding to \p Key. 656 /// The key should already be in the map. This function should only be used 657 /// when it's necessary to update values that have already been vectorized. 658 /// This is the case for "fix-up" operations including type truncation and 659 /// the second phase of recurrence vectorization. If a non-const reference 660 /// isn't required, getVectorValue should be used instead. 661 VectorParts &getVector(Value *Key) { 662 assert(hasVector(Key) && "Vector entry not initialized"); 663 return VectorMapStorage.find(Key)->second; 664 } 665 666 /// Retrieve an entry from the vector or scalar maps. The preferred way to 667 /// access an existing mapped entry is with getVectorValue or 668 /// getScalarValue from InnerLoopVectorizer. Until those functions can be 669 /// moved inside ValueMap, we have to declare them as friends. 670 friend const VectorParts &InnerLoopVectorizer::getVectorValue(Value *V); 671 friend Value *InnerLoopVectorizer::getScalarValue(Value *V, unsigned Part, 672 unsigned Lane); 673 674 private: 675 /// The unroll factor. Each entry in the vector map contains UF vector 676 /// values. 677 unsigned UF; 678 679 /// The vectorization factor. Each entry in the scalar map contains UF x VF 680 /// scalar values. 681 unsigned VF; 682 683 /// The vector and scalar map storage. We use std::map and not DenseMap 684 /// because insertions to DenseMap invalidate its iterators. 685 std::map<Value *, VectorParts> VectorMapStorage; 686 std::map<Value *, ScalarParts> ScalarMapStorage; 687 }; 688 689 /// The original loop. 690 Loop *OrigLoop; 691 /// A wrapper around ScalarEvolution used to add runtime SCEV checks. Applies 692 /// dynamic knowledge to simplify SCEV expressions and converts them to a 693 /// more usable form. 694 PredicatedScalarEvolution &PSE; 695 /// Loop Info. 696 LoopInfo *LI; 697 /// Dominator Tree. 698 DominatorTree *DT; 699 /// Alias Analysis. 700 AliasAnalysis *AA; 701 /// Target Library Info. 702 const TargetLibraryInfo *TLI; 703 /// Target Transform Info. 704 const TargetTransformInfo *TTI; 705 /// Assumption Cache. 706 AssumptionCache *AC; 707 /// Interface to emit optimization remarks. 708 OptimizationRemarkEmitter *ORE; 709 710 /// \brief LoopVersioning. It's only set up (non-null) if memchecks were 711 /// used. 712 /// 713 /// This is currently only used to add no-alias metadata based on the 714 /// memchecks. The actually versioning is performed manually. 715 std::unique_ptr<LoopVersioning> LVer; 716 717 /// The vectorization SIMD factor to use. Each vector will have this many 718 /// vector elements. 719 unsigned VF; 720 721 protected: 722 /// The vectorization unroll factor to use. Each scalar is vectorized to this 723 /// many different vector instructions. 724 unsigned UF; 725 726 /// The builder that we use 727 IRBuilder<> Builder; 728 729 // --- Vectorization state --- 730 731 /// The vector-loop preheader. 732 BasicBlock *LoopVectorPreHeader; 733 /// The scalar-loop preheader. 734 BasicBlock *LoopScalarPreHeader; 735 /// Middle Block between the vector and the scalar. 736 BasicBlock *LoopMiddleBlock; 737 /// The ExitBlock of the scalar loop. 738 BasicBlock *LoopExitBlock; 739 /// The vector loop body. 740 BasicBlock *LoopVectorBody; 741 /// The scalar loop body. 742 BasicBlock *LoopScalarBody; 743 /// A list of all bypass blocks. The first block is the entry of the loop. 744 SmallVector<BasicBlock *, 4> LoopBypassBlocks; 745 746 /// The new Induction variable which was added to the new block. 747 PHINode *Induction; 748 /// The induction variable of the old basic block. 749 PHINode *OldInduction; 750 751 /// Maps values from the original loop to their corresponding values in the 752 /// vectorized loop. A key value can map to either vector values, scalar 753 /// values or both kinds of values, depending on whether the key was 754 /// vectorized and scalarized. 755 ValueMap VectorLoopValueMap; 756 757 /// Store instructions that should be predicated, as a pair 758 /// <StoreInst, Predicate> 759 SmallVector<std::pair<Instruction *, Value *>, 4> PredicatedInstructions; 760 EdgeMaskCache MaskCache; 761 /// Trip count of the original loop. 762 Value *TripCount; 763 /// Trip count of the widened loop (TripCount - TripCount % (VF*UF)) 764 Value *VectorTripCount; 765 766 /// The legality analysis. 767 LoopVectorizationLegality *Legal; 768 769 /// The profitablity analysis. 770 LoopVectorizationCostModel *Cost; 771 772 // Record whether runtime checks are added. 773 bool AddedSafetyChecks; 774 775 // Holds instructions from the original loop whose counterparts in the 776 // vectorized loop would be trivially dead if generated. For example, 777 // original induction update instructions can become dead because we 778 // separately emit induction "steps" when generating code for the new loop. 779 // Similarly, we create a new latch condition when setting up the structure 780 // of the new loop, so the old one can become dead. 781 SmallPtrSet<Instruction *, 4> DeadInstructions; 782 }; 783 784 class InnerLoopUnroller : public InnerLoopVectorizer { 785 public: 786 InnerLoopUnroller(Loop *OrigLoop, PredicatedScalarEvolution &PSE, 787 LoopInfo *LI, DominatorTree *DT, 788 const TargetLibraryInfo *TLI, 789 const TargetTransformInfo *TTI, AssumptionCache *AC, 790 OptimizationRemarkEmitter *ORE, unsigned UnrollFactor, 791 LoopVectorizationLegality *LVL, 792 LoopVectorizationCostModel *CM) 793 : InnerLoopVectorizer(OrigLoop, PSE, LI, DT, TLI, TTI, AC, ORE, 1, 794 UnrollFactor, LVL, CM) {} 795 796 private: 797 void scalarizeInstruction(Instruction *Instr, 798 bool IfPredicateInstr = false) override; 799 void vectorizeMemoryInstruction(Instruction *Instr) override; 800 Value *getBroadcastInstrs(Value *V) override; 801 Value *getStepVector(Value *Val, int StartIdx, Value *Step, 802 Instruction::BinaryOps Opcode = 803 Instruction::BinaryOpsEnd) override; 804 Value *reverseVector(Value *Vec) override; 805 }; 806 807 /// \brief Look for a meaningful debug location on the instruction or it's 808 /// operands. 809 static Instruction *getDebugLocFromInstOrOperands(Instruction *I) { 810 if (!I) 811 return I; 812 813 DebugLoc Empty; 814 if (I->getDebugLoc() != Empty) 815 return I; 816 817 for (User::op_iterator OI = I->op_begin(), OE = I->op_end(); OI != OE; ++OI) { 818 if (Instruction *OpInst = dyn_cast<Instruction>(*OI)) 819 if (OpInst->getDebugLoc() != Empty) 820 return OpInst; 821 } 822 823 return I; 824 } 825 826 /// \brief Set the debug location in the builder using the debug location in the 827 /// instruction. 828 static void setDebugLocFromInst(IRBuilder<> &B, const Value *Ptr) { 829 if (const Instruction *Inst = dyn_cast_or_null<Instruction>(Ptr)) 830 B.SetCurrentDebugLocation(Inst->getDebugLoc()); 831 else 832 B.SetCurrentDebugLocation(DebugLoc()); 833 } 834 835 #ifndef NDEBUG 836 /// \return string containing a file name and a line # for the given loop. 837 static std::string getDebugLocString(const Loop *L) { 838 std::string Result; 839 if (L) { 840 raw_string_ostream OS(Result); 841 if (const DebugLoc LoopDbgLoc = L->getStartLoc()) 842 LoopDbgLoc.print(OS); 843 else 844 // Just print the module name. 845 OS << L->getHeader()->getParent()->getParent()->getModuleIdentifier(); 846 OS.flush(); 847 } 848 return Result; 849 } 850 #endif 851 852 void InnerLoopVectorizer::addNewMetadata(Instruction *To, 853 const Instruction *Orig) { 854 // If the loop was versioned with memchecks, add the corresponding no-alias 855 // metadata. 856 if (LVer && (isa<LoadInst>(Orig) || isa<StoreInst>(Orig))) 857 LVer->annotateInstWithNoAlias(To, Orig); 858 } 859 860 void InnerLoopVectorizer::addMetadata(Instruction *To, 861 Instruction *From) { 862 propagateMetadata(To, From); 863 addNewMetadata(To, From); 864 } 865 866 void InnerLoopVectorizer::addMetadata(ArrayRef<Value *> To, 867 Instruction *From) { 868 for (Value *V : To) { 869 if (Instruction *I = dyn_cast<Instruction>(V)) 870 addMetadata(I, From); 871 } 872 } 873 874 /// \brief The group of interleaved loads/stores sharing the same stride and 875 /// close to each other. 876 /// 877 /// Each member in this group has an index starting from 0, and the largest 878 /// index should be less than interleaved factor, which is equal to the absolute 879 /// value of the access's stride. 880 /// 881 /// E.g. An interleaved load group of factor 4: 882 /// for (unsigned i = 0; i < 1024; i+=4) { 883 /// a = A[i]; // Member of index 0 884 /// b = A[i+1]; // Member of index 1 885 /// d = A[i+3]; // Member of index 3 886 /// ... 887 /// } 888 /// 889 /// An interleaved store group of factor 4: 890 /// for (unsigned i = 0; i < 1024; i+=4) { 891 /// ... 892 /// A[i] = a; // Member of index 0 893 /// A[i+1] = b; // Member of index 1 894 /// A[i+2] = c; // Member of index 2 895 /// A[i+3] = d; // Member of index 3 896 /// } 897 /// 898 /// Note: the interleaved load group could have gaps (missing members), but 899 /// the interleaved store group doesn't allow gaps. 900 class InterleaveGroup { 901 public: 902 InterleaveGroup(Instruction *Instr, int Stride, unsigned Align) 903 : Align(Align), SmallestKey(0), LargestKey(0), InsertPos(Instr) { 904 assert(Align && "The alignment should be non-zero"); 905 906 Factor = std::abs(Stride); 907 assert(Factor > 1 && "Invalid interleave factor"); 908 909 Reverse = Stride < 0; 910 Members[0] = Instr; 911 } 912 913 bool isReverse() const { return Reverse; } 914 unsigned getFactor() const { return Factor; } 915 unsigned getAlignment() const { return Align; } 916 unsigned getNumMembers() const { return Members.size(); } 917 918 /// \brief Try to insert a new member \p Instr with index \p Index and 919 /// alignment \p NewAlign. The index is related to the leader and it could be 920 /// negative if it is the new leader. 921 /// 922 /// \returns false if the instruction doesn't belong to the group. 923 bool insertMember(Instruction *Instr, int Index, unsigned NewAlign) { 924 assert(NewAlign && "The new member's alignment should be non-zero"); 925 926 int Key = Index + SmallestKey; 927 928 // Skip if there is already a member with the same index. 929 if (Members.count(Key)) 930 return false; 931 932 if (Key > LargestKey) { 933 // The largest index is always less than the interleave factor. 934 if (Index >= static_cast<int>(Factor)) 935 return false; 936 937 LargestKey = Key; 938 } else if (Key < SmallestKey) { 939 // The largest index is always less than the interleave factor. 940 if (LargestKey - Key >= static_cast<int>(Factor)) 941 return false; 942 943 SmallestKey = Key; 944 } 945 946 // It's always safe to select the minimum alignment. 947 Align = std::min(Align, NewAlign); 948 Members[Key] = Instr; 949 return true; 950 } 951 952 /// \brief Get the member with the given index \p Index 953 /// 954 /// \returns nullptr if contains no such member. 955 Instruction *getMember(unsigned Index) const { 956 int Key = SmallestKey + Index; 957 if (!Members.count(Key)) 958 return nullptr; 959 960 return Members.find(Key)->second; 961 } 962 963 /// \brief Get the index for the given member. Unlike the key in the member 964 /// map, the index starts from 0. 965 unsigned getIndex(Instruction *Instr) const { 966 for (auto I : Members) 967 if (I.second == Instr) 968 return I.first - SmallestKey; 969 970 llvm_unreachable("InterleaveGroup contains no such member"); 971 } 972 973 Instruction *getInsertPos() const { return InsertPos; } 974 void setInsertPos(Instruction *Inst) { InsertPos = Inst; } 975 976 private: 977 unsigned Factor; // Interleave Factor. 978 bool Reverse; 979 unsigned Align; 980 DenseMap<int, Instruction *> Members; 981 int SmallestKey; 982 int LargestKey; 983 984 // To avoid breaking dependences, vectorized instructions of an interleave 985 // group should be inserted at either the first load or the last store in 986 // program order. 987 // 988 // E.g. %even = load i32 // Insert Position 989 // %add = add i32 %even // Use of %even 990 // %odd = load i32 991 // 992 // store i32 %even 993 // %odd = add i32 // Def of %odd 994 // store i32 %odd // Insert Position 995 Instruction *InsertPos; 996 }; 997 998 /// \brief Drive the analysis of interleaved memory accesses in the loop. 999 /// 1000 /// Use this class to analyze interleaved accesses only when we can vectorize 1001 /// a loop. Otherwise it's meaningless to do analysis as the vectorization 1002 /// on interleaved accesses is unsafe. 1003 /// 1004 /// The analysis collects interleave groups and records the relationships 1005 /// between the member and the group in a map. 1006 class InterleavedAccessInfo { 1007 public: 1008 InterleavedAccessInfo(PredicatedScalarEvolution &PSE, Loop *L, 1009 DominatorTree *DT, LoopInfo *LI) 1010 : PSE(PSE), TheLoop(L), DT(DT), LI(LI), LAI(nullptr), 1011 RequiresScalarEpilogue(false) {} 1012 1013 ~InterleavedAccessInfo() { 1014 SmallSet<InterleaveGroup *, 4> DelSet; 1015 // Avoid releasing a pointer twice. 1016 for (auto &I : InterleaveGroupMap) 1017 DelSet.insert(I.second); 1018 for (auto *Ptr : DelSet) 1019 delete Ptr; 1020 } 1021 1022 /// \brief Analyze the interleaved accesses and collect them in interleave 1023 /// groups. Substitute symbolic strides using \p Strides. 1024 void analyzeInterleaving(const ValueToValueMap &Strides); 1025 1026 /// \brief Check if \p Instr belongs to any interleave group. 1027 bool isInterleaved(Instruction *Instr) const { 1028 return InterleaveGroupMap.count(Instr); 1029 } 1030 1031 /// \brief Return the maximum interleave factor of all interleaved groups. 1032 unsigned getMaxInterleaveFactor() const { 1033 unsigned MaxFactor = 1; 1034 for (auto &Entry : InterleaveGroupMap) 1035 MaxFactor = std::max(MaxFactor, Entry.second->getFactor()); 1036 return MaxFactor; 1037 } 1038 1039 /// \brief Get the interleave group that \p Instr belongs to. 1040 /// 1041 /// \returns nullptr if doesn't have such group. 1042 InterleaveGroup *getInterleaveGroup(Instruction *Instr) const { 1043 if (InterleaveGroupMap.count(Instr)) 1044 return InterleaveGroupMap.find(Instr)->second; 1045 return nullptr; 1046 } 1047 1048 /// \brief Returns true if an interleaved group that may access memory 1049 /// out-of-bounds requires a scalar epilogue iteration for correctness. 1050 bool requiresScalarEpilogue() const { return RequiresScalarEpilogue; } 1051 1052 /// \brief Initialize the LoopAccessInfo used for dependence checking. 1053 void setLAI(const LoopAccessInfo *Info) { LAI = Info; } 1054 1055 private: 1056 /// A wrapper around ScalarEvolution, used to add runtime SCEV checks. 1057 /// Simplifies SCEV expressions in the context of existing SCEV assumptions. 1058 /// The interleaved access analysis can also add new predicates (for example 1059 /// by versioning strides of pointers). 1060 PredicatedScalarEvolution &PSE; 1061 Loop *TheLoop; 1062 DominatorTree *DT; 1063 LoopInfo *LI; 1064 const LoopAccessInfo *LAI; 1065 1066 /// True if the loop may contain non-reversed interleaved groups with 1067 /// out-of-bounds accesses. We ensure we don't speculatively access memory 1068 /// out-of-bounds by executing at least one scalar epilogue iteration. 1069 bool RequiresScalarEpilogue; 1070 1071 /// Holds the relationships between the members and the interleave group. 1072 DenseMap<Instruction *, InterleaveGroup *> InterleaveGroupMap; 1073 1074 /// Holds dependences among the memory accesses in the loop. It maps a source 1075 /// access to a set of dependent sink accesses. 1076 DenseMap<Instruction *, SmallPtrSet<Instruction *, 2>> Dependences; 1077 1078 /// \brief The descriptor for a strided memory access. 1079 struct StrideDescriptor { 1080 StrideDescriptor(int64_t Stride, const SCEV *Scev, uint64_t Size, 1081 unsigned Align) 1082 : Stride(Stride), Scev(Scev), Size(Size), Align(Align) {} 1083 1084 StrideDescriptor() = default; 1085 1086 // The access's stride. It is negative for a reverse access. 1087 int64_t Stride = 0; 1088 const SCEV *Scev = nullptr; // The scalar expression of this access 1089 uint64_t Size = 0; // The size of the memory object. 1090 unsigned Align = 0; // The alignment of this access. 1091 }; 1092 1093 /// \brief A type for holding instructions and their stride descriptors. 1094 typedef std::pair<Instruction *, StrideDescriptor> StrideEntry; 1095 1096 /// \brief Create a new interleave group with the given instruction \p Instr, 1097 /// stride \p Stride and alignment \p Align. 1098 /// 1099 /// \returns the newly created interleave group. 1100 InterleaveGroup *createInterleaveGroup(Instruction *Instr, int Stride, 1101 unsigned Align) { 1102 assert(!InterleaveGroupMap.count(Instr) && 1103 "Already in an interleaved access group"); 1104 InterleaveGroupMap[Instr] = new InterleaveGroup(Instr, Stride, Align); 1105 return InterleaveGroupMap[Instr]; 1106 } 1107 1108 /// \brief Release the group and remove all the relationships. 1109 void releaseGroup(InterleaveGroup *Group) { 1110 for (unsigned i = 0; i < Group->getFactor(); i++) 1111 if (Instruction *Member = Group->getMember(i)) 1112 InterleaveGroupMap.erase(Member); 1113 1114 delete Group; 1115 } 1116 1117 /// \brief Collect all the accesses with a constant stride in program order. 1118 void collectConstStrideAccesses( 1119 MapVector<Instruction *, StrideDescriptor> &AccessStrideInfo, 1120 const ValueToValueMap &Strides); 1121 1122 /// \brief Returns true if \p Stride is allowed in an interleaved group. 1123 static bool isStrided(int Stride) { 1124 unsigned Factor = std::abs(Stride); 1125 return Factor >= 2 && Factor <= MaxInterleaveGroupFactor; 1126 } 1127 1128 /// \brief Returns true if \p BB is a predicated block. 1129 bool isPredicated(BasicBlock *BB) const { 1130 return LoopAccessInfo::blockNeedsPredication(BB, TheLoop, DT); 1131 } 1132 1133 /// \brief Returns true if LoopAccessInfo can be used for dependence queries. 1134 bool areDependencesValid() const { 1135 return LAI && LAI->getDepChecker().getDependences(); 1136 } 1137 1138 /// \brief Returns true if memory accesses \p A and \p B can be reordered, if 1139 /// necessary, when constructing interleaved groups. 1140 /// 1141 /// \p A must precede \p B in program order. We return false if reordering is 1142 /// not necessary or is prevented because \p A and \p B may be dependent. 1143 bool canReorderMemAccessesForInterleavedGroups(StrideEntry *A, 1144 StrideEntry *B) const { 1145 1146 // Code motion for interleaved accesses can potentially hoist strided loads 1147 // and sink strided stores. The code below checks the legality of the 1148 // following two conditions: 1149 // 1150 // 1. Potentially moving a strided load (B) before any store (A) that 1151 // precedes B, or 1152 // 1153 // 2. Potentially moving a strided store (A) after any load or store (B) 1154 // that A precedes. 1155 // 1156 // It's legal to reorder A and B if we know there isn't a dependence from A 1157 // to B. Note that this determination is conservative since some 1158 // dependences could potentially be reordered safely. 1159 1160 // A is potentially the source of a dependence. 1161 auto *Src = A->first; 1162 auto SrcDes = A->second; 1163 1164 // B is potentially the sink of a dependence. 1165 auto *Sink = B->first; 1166 auto SinkDes = B->second; 1167 1168 // Code motion for interleaved accesses can't violate WAR dependences. 1169 // Thus, reordering is legal if the source isn't a write. 1170 if (!Src->mayWriteToMemory()) 1171 return true; 1172 1173 // At least one of the accesses must be strided. 1174 if (!isStrided(SrcDes.Stride) && !isStrided(SinkDes.Stride)) 1175 return true; 1176 1177 // If dependence information is not available from LoopAccessInfo, 1178 // conservatively assume the instructions can't be reordered. 1179 if (!areDependencesValid()) 1180 return false; 1181 1182 // If we know there is a dependence from source to sink, assume the 1183 // instructions can't be reordered. Otherwise, reordering is legal. 1184 return !Dependences.count(Src) || !Dependences.lookup(Src).count(Sink); 1185 } 1186 1187 /// \brief Collect the dependences from LoopAccessInfo. 1188 /// 1189 /// We process the dependences once during the interleaved access analysis to 1190 /// enable constant-time dependence queries. 1191 void collectDependences() { 1192 if (!areDependencesValid()) 1193 return; 1194 auto *Deps = LAI->getDepChecker().getDependences(); 1195 for (auto Dep : *Deps) 1196 Dependences[Dep.getSource(*LAI)].insert(Dep.getDestination(*LAI)); 1197 } 1198 }; 1199 1200 /// Utility class for getting and setting loop vectorizer hints in the form 1201 /// of loop metadata. 1202 /// This class keeps a number of loop annotations locally (as member variables) 1203 /// and can, upon request, write them back as metadata on the loop. It will 1204 /// initially scan the loop for existing metadata, and will update the local 1205 /// values based on information in the loop. 1206 /// We cannot write all values to metadata, as the mere presence of some info, 1207 /// for example 'force', means a decision has been made. So, we need to be 1208 /// careful NOT to add them if the user hasn't specifically asked so. 1209 class LoopVectorizeHints { 1210 enum HintKind { HK_WIDTH, HK_UNROLL, HK_FORCE }; 1211 1212 /// Hint - associates name and validation with the hint value. 1213 struct Hint { 1214 const char *Name; 1215 unsigned Value; // This may have to change for non-numeric values. 1216 HintKind Kind; 1217 1218 Hint(const char *Name, unsigned Value, HintKind Kind) 1219 : Name(Name), Value(Value), Kind(Kind) {} 1220 1221 bool validate(unsigned Val) { 1222 switch (Kind) { 1223 case HK_WIDTH: 1224 return isPowerOf2_32(Val) && Val <= VectorizerParams::MaxVectorWidth; 1225 case HK_UNROLL: 1226 return isPowerOf2_32(Val) && Val <= MaxInterleaveFactor; 1227 case HK_FORCE: 1228 return (Val <= 1); 1229 } 1230 return false; 1231 } 1232 }; 1233 1234 /// Vectorization width. 1235 Hint Width; 1236 /// Vectorization interleave factor. 1237 Hint Interleave; 1238 /// Vectorization forced 1239 Hint Force; 1240 1241 /// Return the loop metadata prefix. 1242 static StringRef Prefix() { return "llvm.loop."; } 1243 1244 /// True if there is any unsafe math in the loop. 1245 bool PotentiallyUnsafe; 1246 1247 public: 1248 enum ForceKind { 1249 FK_Undefined = -1, ///< Not selected. 1250 FK_Disabled = 0, ///< Forcing disabled. 1251 FK_Enabled = 1, ///< Forcing enabled. 1252 }; 1253 1254 LoopVectorizeHints(const Loop *L, bool DisableInterleaving, 1255 OptimizationRemarkEmitter &ORE) 1256 : Width("vectorize.width", VectorizerParams::VectorizationFactor, 1257 HK_WIDTH), 1258 Interleave("interleave.count", DisableInterleaving, HK_UNROLL), 1259 Force("vectorize.enable", FK_Undefined, HK_FORCE), 1260 PotentiallyUnsafe(false), TheLoop(L), ORE(ORE) { 1261 // Populate values with existing loop metadata. 1262 getHintsFromMetadata(); 1263 1264 // force-vector-interleave overrides DisableInterleaving. 1265 if (VectorizerParams::isInterleaveForced()) 1266 Interleave.Value = VectorizerParams::VectorizationInterleave; 1267 1268 DEBUG(if (DisableInterleaving && Interleave.Value == 1) dbgs() 1269 << "LV: Interleaving disabled by the pass manager\n"); 1270 } 1271 1272 /// Mark the loop L as already vectorized by setting the width to 1. 1273 void setAlreadyVectorized() { 1274 Width.Value = Interleave.Value = 1; 1275 Hint Hints[] = {Width, Interleave}; 1276 writeHintsToMetadata(Hints); 1277 } 1278 1279 bool allowVectorization(Function *F, Loop *L, bool AlwaysVectorize) const { 1280 if (getForce() == LoopVectorizeHints::FK_Disabled) { 1281 DEBUG(dbgs() << "LV: Not vectorizing: #pragma vectorize disable.\n"); 1282 emitRemarkWithHints(); 1283 return false; 1284 } 1285 1286 if (!AlwaysVectorize && getForce() != LoopVectorizeHints::FK_Enabled) { 1287 DEBUG(dbgs() << "LV: Not vectorizing: No #pragma vectorize enable.\n"); 1288 emitRemarkWithHints(); 1289 return false; 1290 } 1291 1292 if (getWidth() == 1 && getInterleave() == 1) { 1293 // FIXME: Add a separate metadata to indicate when the loop has already 1294 // been vectorized instead of setting width and count to 1. 1295 DEBUG(dbgs() << "LV: Not vectorizing: Disabled/already vectorized.\n"); 1296 // FIXME: Add interleave.disable metadata. This will allow 1297 // vectorize.disable to be used without disabling the pass and errors 1298 // to differentiate between disabled vectorization and a width of 1. 1299 ORE.emit(OptimizationRemarkAnalysis(vectorizeAnalysisPassName(), 1300 "AllDisabled", L->getStartLoc(), 1301 L->getHeader()) 1302 << "loop not vectorized: vectorization and interleaving are " 1303 "explicitly disabled, or vectorize width and interleave " 1304 "count are both set to 1"); 1305 return false; 1306 } 1307 1308 return true; 1309 } 1310 1311 /// Dumps all the hint information. 1312 void emitRemarkWithHints() const { 1313 using namespace ore; 1314 if (Force.Value == LoopVectorizeHints::FK_Disabled) 1315 ORE.emit(OptimizationRemarkMissed(LV_NAME, "MissedExplicitlyDisabled", 1316 TheLoop->getStartLoc(), 1317 TheLoop->getHeader()) 1318 << "loop not vectorized: vectorization is explicitly disabled"); 1319 else { 1320 OptimizationRemarkMissed R(LV_NAME, "MissedDetails", 1321 TheLoop->getStartLoc(), TheLoop->getHeader()); 1322 R << "loop not vectorized"; 1323 if (Force.Value == LoopVectorizeHints::FK_Enabled) { 1324 R << " (Force=" << NV("Force", true); 1325 if (Width.Value != 0) 1326 R << ", Vector Width=" << NV("VectorWidth", Width.Value); 1327 if (Interleave.Value != 0) 1328 R << ", Interleave Count=" << NV("InterleaveCount", Interleave.Value); 1329 R << ")"; 1330 } 1331 ORE.emit(R); 1332 } 1333 } 1334 1335 unsigned getWidth() const { return Width.Value; } 1336 unsigned getInterleave() const { return Interleave.Value; } 1337 enum ForceKind getForce() const { return (ForceKind)Force.Value; } 1338 1339 /// \brief If hints are provided that force vectorization, use the AlwaysPrint 1340 /// pass name to force the frontend to print the diagnostic. 1341 const char *vectorizeAnalysisPassName() const { 1342 if (getWidth() == 1) 1343 return LV_NAME; 1344 if (getForce() == LoopVectorizeHints::FK_Disabled) 1345 return LV_NAME; 1346 if (getForce() == LoopVectorizeHints::FK_Undefined && getWidth() == 0) 1347 return LV_NAME; 1348 return OptimizationRemarkAnalysis::AlwaysPrint; 1349 } 1350 1351 bool allowReordering() const { 1352 // When enabling loop hints are provided we allow the vectorizer to change 1353 // the order of operations that is given by the scalar loop. This is not 1354 // enabled by default because can be unsafe or inefficient. For example, 1355 // reordering floating-point operations will change the way round-off 1356 // error accumulates in the loop. 1357 return getForce() == LoopVectorizeHints::FK_Enabled || getWidth() > 1; 1358 } 1359 1360 bool isPotentiallyUnsafe() const { 1361 // Avoid FP vectorization if the target is unsure about proper support. 1362 // This may be related to the SIMD unit in the target not handling 1363 // IEEE 754 FP ops properly, or bad single-to-double promotions. 1364 // Otherwise, a sequence of vectorized loops, even without reduction, 1365 // could lead to different end results on the destination vectors. 1366 return getForce() != LoopVectorizeHints::FK_Enabled && PotentiallyUnsafe; 1367 } 1368 1369 void setPotentiallyUnsafe() { PotentiallyUnsafe = true; } 1370 1371 private: 1372 /// Find hints specified in the loop metadata and update local values. 1373 void getHintsFromMetadata() { 1374 MDNode *LoopID = TheLoop->getLoopID(); 1375 if (!LoopID) 1376 return; 1377 1378 // First operand should refer to the loop id itself. 1379 assert(LoopID->getNumOperands() > 0 && "requires at least one operand"); 1380 assert(LoopID->getOperand(0) == LoopID && "invalid loop id"); 1381 1382 for (unsigned i = 1, ie = LoopID->getNumOperands(); i < ie; ++i) { 1383 const MDString *S = nullptr; 1384 SmallVector<Metadata *, 4> Args; 1385 1386 // The expected hint is either a MDString or a MDNode with the first 1387 // operand a MDString. 1388 if (const MDNode *MD = dyn_cast<MDNode>(LoopID->getOperand(i))) { 1389 if (!MD || MD->getNumOperands() == 0) 1390 continue; 1391 S = dyn_cast<MDString>(MD->getOperand(0)); 1392 for (unsigned i = 1, ie = MD->getNumOperands(); i < ie; ++i) 1393 Args.push_back(MD->getOperand(i)); 1394 } else { 1395 S = dyn_cast<MDString>(LoopID->getOperand(i)); 1396 assert(Args.size() == 0 && "too many arguments for MDString"); 1397 } 1398 1399 if (!S) 1400 continue; 1401 1402 // Check if the hint starts with the loop metadata prefix. 1403 StringRef Name = S->getString(); 1404 if (Args.size() == 1) 1405 setHint(Name, Args[0]); 1406 } 1407 } 1408 1409 /// Checks string hint with one operand and set value if valid. 1410 void setHint(StringRef Name, Metadata *Arg) { 1411 if (!Name.startswith(Prefix())) 1412 return; 1413 Name = Name.substr(Prefix().size(), StringRef::npos); 1414 1415 const ConstantInt *C = mdconst::dyn_extract<ConstantInt>(Arg); 1416 if (!C) 1417 return; 1418 unsigned Val = C->getZExtValue(); 1419 1420 Hint *Hints[] = {&Width, &Interleave, &Force}; 1421 for (auto H : Hints) { 1422 if (Name == H->Name) { 1423 if (H->validate(Val)) 1424 H->Value = Val; 1425 else 1426 DEBUG(dbgs() << "LV: ignoring invalid hint '" << Name << "'\n"); 1427 break; 1428 } 1429 } 1430 } 1431 1432 /// Create a new hint from name / value pair. 1433 MDNode *createHintMetadata(StringRef Name, unsigned V) const { 1434 LLVMContext &Context = TheLoop->getHeader()->getContext(); 1435 Metadata *MDs[] = {MDString::get(Context, Name), 1436 ConstantAsMetadata::get( 1437 ConstantInt::get(Type::getInt32Ty(Context), V))}; 1438 return MDNode::get(Context, MDs); 1439 } 1440 1441 /// Matches metadata with hint name. 1442 bool matchesHintMetadataName(MDNode *Node, ArrayRef<Hint> HintTypes) { 1443 MDString *Name = dyn_cast<MDString>(Node->getOperand(0)); 1444 if (!Name) 1445 return false; 1446 1447 for (auto H : HintTypes) 1448 if (Name->getString().endswith(H.Name)) 1449 return true; 1450 return false; 1451 } 1452 1453 /// Sets current hints into loop metadata, keeping other values intact. 1454 void writeHintsToMetadata(ArrayRef<Hint> HintTypes) { 1455 if (HintTypes.size() == 0) 1456 return; 1457 1458 // Reserve the first element to LoopID (see below). 1459 SmallVector<Metadata *, 4> MDs(1); 1460 // If the loop already has metadata, then ignore the existing operands. 1461 MDNode *LoopID = TheLoop->getLoopID(); 1462 if (LoopID) { 1463 for (unsigned i = 1, ie = LoopID->getNumOperands(); i < ie; ++i) { 1464 MDNode *Node = cast<MDNode>(LoopID->getOperand(i)); 1465 // If node in update list, ignore old value. 1466 if (!matchesHintMetadataName(Node, HintTypes)) 1467 MDs.push_back(Node); 1468 } 1469 } 1470 1471 // Now, add the missing hints. 1472 for (auto H : HintTypes) 1473 MDs.push_back(createHintMetadata(Twine(Prefix(), H.Name).str(), H.Value)); 1474 1475 // Replace current metadata node with new one. 1476 LLVMContext &Context = TheLoop->getHeader()->getContext(); 1477 MDNode *NewLoopID = MDNode::get(Context, MDs); 1478 // Set operand 0 to refer to the loop id itself. 1479 NewLoopID->replaceOperandWith(0, NewLoopID); 1480 1481 TheLoop->setLoopID(NewLoopID); 1482 } 1483 1484 /// The loop these hints belong to. 1485 const Loop *TheLoop; 1486 1487 /// Interface to emit optimization remarks. 1488 OptimizationRemarkEmitter &ORE; 1489 }; 1490 1491 static void emitAnalysisDiag(const Loop *TheLoop, 1492 const LoopVectorizeHints &Hints, 1493 OptimizationRemarkEmitter &ORE, 1494 const LoopAccessReport &Message) { 1495 const char *Name = Hints.vectorizeAnalysisPassName(); 1496 LoopAccessReport::emitAnalysis(Message, TheLoop, Name, ORE); 1497 } 1498 1499 static void emitMissedWarning(Function *F, Loop *L, 1500 const LoopVectorizeHints &LH, 1501 OptimizationRemarkEmitter *ORE) { 1502 LH.emitRemarkWithHints(); 1503 1504 if (LH.getForce() == LoopVectorizeHints::FK_Enabled) { 1505 if (LH.getWidth() != 1) 1506 emitLoopVectorizeWarning( 1507 F->getContext(), *F, L->getStartLoc(), 1508 "failed explicitly specified loop vectorization"); 1509 else if (LH.getInterleave() != 1) 1510 emitLoopInterleaveWarning( 1511 F->getContext(), *F, L->getStartLoc(), 1512 "failed explicitly specified loop interleaving"); 1513 } 1514 } 1515 1516 /// LoopVectorizationLegality checks if it is legal to vectorize a loop, and 1517 /// to what vectorization factor. 1518 /// This class does not look at the profitability of vectorization, only the 1519 /// legality. This class has two main kinds of checks: 1520 /// * Memory checks - The code in canVectorizeMemory checks if vectorization 1521 /// will change the order of memory accesses in a way that will change the 1522 /// correctness of the program. 1523 /// * Scalars checks - The code in canVectorizeInstrs and canVectorizeMemory 1524 /// checks for a number of different conditions, such as the availability of a 1525 /// single induction variable, that all types are supported and vectorize-able, 1526 /// etc. This code reflects the capabilities of InnerLoopVectorizer. 1527 /// This class is also used by InnerLoopVectorizer for identifying 1528 /// induction variable and the different reduction variables. 1529 class LoopVectorizationLegality { 1530 public: 1531 LoopVectorizationLegality( 1532 Loop *L, PredicatedScalarEvolution &PSE, DominatorTree *DT, 1533 TargetLibraryInfo *TLI, AliasAnalysis *AA, Function *F, 1534 const TargetTransformInfo *TTI, 1535 std::function<const LoopAccessInfo &(Loop &)> *GetLAA, LoopInfo *LI, 1536 OptimizationRemarkEmitter *ORE, LoopVectorizationRequirements *R, 1537 LoopVectorizeHints *H) 1538 : NumPredStores(0), TheLoop(L), PSE(PSE), TLI(TLI), TTI(TTI), DT(DT), 1539 GetLAA(GetLAA), LAI(nullptr), ORE(ORE), InterleaveInfo(PSE, L, DT, LI), 1540 Induction(nullptr), WidestIndTy(nullptr), HasFunNoNaNAttr(false), 1541 Requirements(R), Hints(H) {} 1542 1543 /// ReductionList contains the reduction descriptors for all 1544 /// of the reductions that were found in the loop. 1545 typedef DenseMap<PHINode *, RecurrenceDescriptor> ReductionList; 1546 1547 /// InductionList saves induction variables and maps them to the 1548 /// induction descriptor. 1549 typedef MapVector<PHINode *, InductionDescriptor> InductionList; 1550 1551 /// RecurrenceSet contains the phi nodes that are recurrences other than 1552 /// inductions and reductions. 1553 typedef SmallPtrSet<const PHINode *, 8> RecurrenceSet; 1554 1555 /// Returns true if it is legal to vectorize this loop. 1556 /// This does not mean that it is profitable to vectorize this 1557 /// loop, only that it is legal to do so. 1558 bool canVectorize(); 1559 1560 /// Returns the Induction variable. 1561 PHINode *getInduction() { return Induction; } 1562 1563 /// Returns the reduction variables found in the loop. 1564 ReductionList *getReductionVars() { return &Reductions; } 1565 1566 /// Returns the induction variables found in the loop. 1567 InductionList *getInductionVars() { return &Inductions; } 1568 1569 /// Return the first-order recurrences found in the loop. 1570 RecurrenceSet *getFirstOrderRecurrences() { return &FirstOrderRecurrences; } 1571 1572 /// Returns the widest induction type. 1573 Type *getWidestInductionType() { return WidestIndTy; } 1574 1575 /// Returns True if V is an induction variable in this loop. 1576 bool isInductionVariable(const Value *V); 1577 1578 /// Returns True if PN is a reduction variable in this loop. 1579 bool isReductionVariable(PHINode *PN) { return Reductions.count(PN); } 1580 1581 /// Returns True if Phi is a first-order recurrence in this loop. 1582 bool isFirstOrderRecurrence(const PHINode *Phi); 1583 1584 /// Return true if the block BB needs to be predicated in order for the loop 1585 /// to be vectorized. 1586 bool blockNeedsPredication(BasicBlock *BB); 1587 1588 /// Check if this pointer is consecutive when vectorizing. This happens 1589 /// when the last index of the GEP is the induction variable, or that the 1590 /// pointer itself is an induction variable. 1591 /// This check allows us to vectorize A[idx] into a wide load/store. 1592 /// Returns: 1593 /// 0 - Stride is unknown or non-consecutive. 1594 /// 1 - Address is consecutive. 1595 /// -1 - Address is consecutive, and decreasing. 1596 int isConsecutivePtr(Value *Ptr); 1597 1598 /// Returns true if the value V is uniform within the loop. 1599 bool isUniform(Value *V); 1600 1601 /// Returns true if \p I is known to be uniform after vectorization. 1602 bool isUniformAfterVectorization(Instruction *I) { return Uniforms.count(I); } 1603 1604 /// Returns true if \p I is known to be scalar after vectorization. 1605 bool isScalarAfterVectorization(Instruction *I) { return Scalars.count(I); } 1606 1607 /// Returns the information that we collected about runtime memory check. 1608 const RuntimePointerChecking *getRuntimePointerChecking() const { 1609 return LAI->getRuntimePointerChecking(); 1610 } 1611 1612 const LoopAccessInfo *getLAI() const { return LAI; } 1613 1614 /// \brief Check if \p Instr belongs to any interleaved access group. 1615 bool isAccessInterleaved(Instruction *Instr) { 1616 return InterleaveInfo.isInterleaved(Instr); 1617 } 1618 1619 /// \brief Return the maximum interleave factor of all interleaved groups. 1620 unsigned getMaxInterleaveFactor() const { 1621 return InterleaveInfo.getMaxInterleaveFactor(); 1622 } 1623 1624 /// \brief Get the interleaved access group that \p Instr belongs to. 1625 const InterleaveGroup *getInterleavedAccessGroup(Instruction *Instr) { 1626 return InterleaveInfo.getInterleaveGroup(Instr); 1627 } 1628 1629 /// \brief Returns true if an interleaved group requires a scalar iteration 1630 /// to handle accesses with gaps. 1631 bool requiresScalarEpilogue() const { 1632 return InterleaveInfo.requiresScalarEpilogue(); 1633 } 1634 1635 unsigned getMaxSafeDepDistBytes() { return LAI->getMaxSafeDepDistBytes(); } 1636 1637 bool hasStride(Value *V) { return LAI->hasStride(V); } 1638 1639 /// Returns true if the target machine supports masked store operation 1640 /// for the given \p DataType and kind of access to \p Ptr. 1641 bool isLegalMaskedStore(Type *DataType, Value *Ptr) { 1642 return isConsecutivePtr(Ptr) && TTI->isLegalMaskedStore(DataType); 1643 } 1644 /// Returns true if the target machine supports masked load operation 1645 /// for the given \p DataType and kind of access to \p Ptr. 1646 bool isLegalMaskedLoad(Type *DataType, Value *Ptr) { 1647 return isConsecutivePtr(Ptr) && TTI->isLegalMaskedLoad(DataType); 1648 } 1649 /// Returns true if the target machine supports masked scatter operation 1650 /// for the given \p DataType. 1651 bool isLegalMaskedScatter(Type *DataType) { 1652 return TTI->isLegalMaskedScatter(DataType); 1653 } 1654 /// Returns true if the target machine supports masked gather operation 1655 /// for the given \p DataType. 1656 bool isLegalMaskedGather(Type *DataType) { 1657 return TTI->isLegalMaskedGather(DataType); 1658 } 1659 /// Returns true if the target machine can represent \p V as a masked gather 1660 /// or scatter operation. 1661 bool isLegalGatherOrScatter(Value *V) { 1662 auto *LI = dyn_cast<LoadInst>(V); 1663 auto *SI = dyn_cast<StoreInst>(V); 1664 if (!LI && !SI) 1665 return false; 1666 auto *Ptr = getPointerOperand(V); 1667 auto *Ty = cast<PointerType>(Ptr->getType())->getElementType(); 1668 return (LI && isLegalMaskedGather(Ty)) || (SI && isLegalMaskedScatter(Ty)); 1669 } 1670 1671 /// Returns true if vector representation of the instruction \p I 1672 /// requires mask. 1673 bool isMaskRequired(const Instruction *I) { return (MaskedOp.count(I) != 0); } 1674 unsigned getNumStores() const { return LAI->getNumStores(); } 1675 unsigned getNumLoads() const { return LAI->getNumLoads(); } 1676 unsigned getNumPredStores() const { return NumPredStores; } 1677 1678 /// Returns true if \p I is an instruction that will be scalarized with 1679 /// predication. Such instructions include conditional stores and 1680 /// instructions that may divide by zero. 1681 bool isScalarWithPredication(Instruction *I); 1682 1683 /// Returns true if \p I is a memory instruction that has a consecutive or 1684 /// consecutive-like pointer operand. Consecutive-like pointers are pointers 1685 /// that are treated like consecutive pointers during vectorization. The 1686 /// pointer operands of interleaved accesses are an example. 1687 bool hasConsecutiveLikePtrOperand(Instruction *I); 1688 1689 /// Returns true if \p I is a memory instruction that must be scalarized 1690 /// during vectorization. 1691 bool memoryInstructionMustBeScalarized(Instruction *I, unsigned VF = 1); 1692 1693 private: 1694 /// Check if a single basic block loop is vectorizable. 1695 /// At this point we know that this is a loop with a constant trip count 1696 /// and we only need to check individual instructions. 1697 bool canVectorizeInstrs(); 1698 1699 /// When we vectorize loops we may change the order in which 1700 /// we read and write from memory. This method checks if it is 1701 /// legal to vectorize the code, considering only memory constrains. 1702 /// Returns true if the loop is vectorizable 1703 bool canVectorizeMemory(); 1704 1705 /// Return true if we can vectorize this loop using the IF-conversion 1706 /// transformation. 1707 bool canVectorizeWithIfConvert(); 1708 1709 /// Collect the instructions that are uniform after vectorization. An 1710 /// instruction is uniform if we represent it with a single scalar value in 1711 /// the vectorized loop corresponding to each vector iteration. Examples of 1712 /// uniform instructions include pointer operands of consecutive or 1713 /// interleaved memory accesses. Note that although uniformity implies an 1714 /// instruction will be scalar, the reverse is not true. In general, a 1715 /// scalarized instruction will be represented by VF scalar values in the 1716 /// vectorized loop, each corresponding to an iteration of the original 1717 /// scalar loop. 1718 void collectLoopUniforms(); 1719 1720 /// Collect the instructions that are scalar after vectorization. An 1721 /// instruction is scalar if it is known to be uniform or will be scalarized 1722 /// during vectorization. Non-uniform scalarized instructions will be 1723 /// represented by VF values in the vectorized loop, each corresponding to an 1724 /// iteration of the original scalar loop. 1725 void collectLoopScalars(); 1726 1727 /// Return true if all of the instructions in the block can be speculatively 1728 /// executed. \p SafePtrs is a list of addresses that are known to be legal 1729 /// and we know that we can read from them without segfault. 1730 bool blockCanBePredicated(BasicBlock *BB, SmallPtrSetImpl<Value *> &SafePtrs); 1731 1732 /// Updates the vectorization state by adding \p Phi to the inductions list. 1733 /// This can set \p Phi as the main induction of the loop if \p Phi is a 1734 /// better choice for the main induction than the existing one. 1735 void addInductionPhi(PHINode *Phi, const InductionDescriptor &ID, 1736 SmallPtrSetImpl<Value *> &AllowedExit); 1737 1738 /// Report an analysis message to assist the user in diagnosing loops that are 1739 /// not vectorized. These are handled as LoopAccessReport rather than 1740 /// VectorizationReport because the << operator of VectorizationReport returns 1741 /// LoopAccessReport. 1742 void emitAnalysis(const LoopAccessReport &Message) const { 1743 emitAnalysisDiag(TheLoop, *Hints, *ORE, Message); 1744 } 1745 1746 /// Create an analysis remark that explains why vectorization failed 1747 /// 1748 /// \p RemarkName is the identifier for the remark. If \p I is passed it is 1749 /// an instruction that prevents vectorization. Otherwise the loop is used 1750 /// for the location of the remark. \return the remark object that can be 1751 /// streamed to. 1752 OptimizationRemarkAnalysis 1753 createMissedAnalysis(StringRef RemarkName, Instruction *I = nullptr) const { 1754 return ::createMissedAnalysis(Hints->vectorizeAnalysisPassName(), 1755 RemarkName, TheLoop, I); 1756 } 1757 1758 /// \brief If an access has a symbolic strides, this maps the pointer value to 1759 /// the stride symbol. 1760 const ValueToValueMap *getSymbolicStrides() { 1761 // FIXME: Currently, the set of symbolic strides is sometimes queried before 1762 // it's collected. This happens from canVectorizeWithIfConvert, when the 1763 // pointer is checked to reference consecutive elements suitable for a 1764 // masked access. 1765 return LAI ? &LAI->getSymbolicStrides() : nullptr; 1766 } 1767 1768 unsigned NumPredStores; 1769 1770 /// The loop that we evaluate. 1771 Loop *TheLoop; 1772 /// A wrapper around ScalarEvolution used to add runtime SCEV checks. 1773 /// Applies dynamic knowledge to simplify SCEV expressions in the context 1774 /// of existing SCEV assumptions. The analysis will also add a minimal set 1775 /// of new predicates if this is required to enable vectorization and 1776 /// unrolling. 1777 PredicatedScalarEvolution &PSE; 1778 /// Target Library Info. 1779 TargetLibraryInfo *TLI; 1780 /// Target Transform Info 1781 const TargetTransformInfo *TTI; 1782 /// Dominator Tree. 1783 DominatorTree *DT; 1784 // LoopAccess analysis. 1785 std::function<const LoopAccessInfo &(Loop &)> *GetLAA; 1786 // And the loop-accesses info corresponding to this loop. This pointer is 1787 // null until canVectorizeMemory sets it up. 1788 const LoopAccessInfo *LAI; 1789 /// Interface to emit optimization remarks. 1790 OptimizationRemarkEmitter *ORE; 1791 1792 /// The interleave access information contains groups of interleaved accesses 1793 /// with the same stride and close to each other. 1794 InterleavedAccessInfo InterleaveInfo; 1795 1796 // --- vectorization state --- // 1797 1798 /// Holds the integer induction variable. This is the counter of the 1799 /// loop. 1800 PHINode *Induction; 1801 /// Holds the reduction variables. 1802 ReductionList Reductions; 1803 /// Holds all of the induction variables that we found in the loop. 1804 /// Notice that inductions don't need to start at zero and that induction 1805 /// variables can be pointers. 1806 InductionList Inductions; 1807 /// Holds the phi nodes that are first-order recurrences. 1808 RecurrenceSet FirstOrderRecurrences; 1809 /// Holds the widest induction type encountered. 1810 Type *WidestIndTy; 1811 1812 /// Allowed outside users. This holds the induction and reduction 1813 /// vars which can be accessed from outside the loop. 1814 SmallPtrSet<Value *, 4> AllowedExit; 1815 1816 /// Holds the instructions known to be uniform after vectorization. 1817 SmallPtrSet<Instruction *, 4> Uniforms; 1818 1819 /// Holds the instructions known to be scalar after vectorization. 1820 SmallPtrSet<Instruction *, 4> Scalars; 1821 1822 /// Can we assume the absence of NaNs. 1823 bool HasFunNoNaNAttr; 1824 1825 /// Vectorization requirements that will go through late-evaluation. 1826 LoopVectorizationRequirements *Requirements; 1827 1828 /// Used to emit an analysis of any legality issues. 1829 LoopVectorizeHints *Hints; 1830 1831 /// While vectorizing these instructions we have to generate a 1832 /// call to the appropriate masked intrinsic 1833 SmallPtrSet<const Instruction *, 8> MaskedOp; 1834 }; 1835 1836 /// LoopVectorizationCostModel - estimates the expected speedups due to 1837 /// vectorization. 1838 /// In many cases vectorization is not profitable. This can happen because of 1839 /// a number of reasons. In this class we mainly attempt to predict the 1840 /// expected speedup/slowdowns due to the supported instruction set. We use the 1841 /// TargetTransformInfo to query the different backends for the cost of 1842 /// different operations. 1843 class LoopVectorizationCostModel { 1844 public: 1845 LoopVectorizationCostModel(Loop *L, PredicatedScalarEvolution &PSE, 1846 LoopInfo *LI, LoopVectorizationLegality *Legal, 1847 const TargetTransformInfo &TTI, 1848 const TargetLibraryInfo *TLI, DemandedBits *DB, 1849 AssumptionCache *AC, 1850 OptimizationRemarkEmitter *ORE, const Function *F, 1851 const LoopVectorizeHints *Hints) 1852 : TheLoop(L), PSE(PSE), LI(LI), Legal(Legal), TTI(TTI), TLI(TLI), DB(DB), 1853 AC(AC), ORE(ORE), TheFunction(F), Hints(Hints) {} 1854 1855 /// Information about vectorization costs 1856 struct VectorizationFactor { 1857 unsigned Width; // Vector width with best cost 1858 unsigned Cost; // Cost of the loop with that width 1859 }; 1860 /// \return The most profitable vectorization factor and the cost of that VF. 1861 /// This method checks every power of two up to VF. If UserVF is not ZERO 1862 /// then this vectorization factor will be selected if vectorization is 1863 /// possible. 1864 VectorizationFactor selectVectorizationFactor(bool OptForSize); 1865 1866 /// \return The size (in bits) of the smallest and widest types in the code 1867 /// that needs to be vectorized. We ignore values that remain scalar such as 1868 /// 64 bit loop indices. 1869 std::pair<unsigned, unsigned> getSmallestAndWidestTypes(); 1870 1871 /// \return The desired interleave count. 1872 /// If interleave count has been specified by metadata it will be returned. 1873 /// Otherwise, the interleave count is computed and returned. VF and LoopCost 1874 /// are the selected vectorization factor and the cost of the selected VF. 1875 unsigned selectInterleaveCount(bool OptForSize, unsigned VF, 1876 unsigned LoopCost); 1877 1878 /// \return The most profitable unroll factor. 1879 /// This method finds the best unroll-factor based on register pressure and 1880 /// other parameters. VF and LoopCost are the selected vectorization factor 1881 /// and the cost of the selected VF. 1882 unsigned computeInterleaveCount(bool OptForSize, unsigned VF, 1883 unsigned LoopCost); 1884 1885 /// \brief A struct that represents some properties of the register usage 1886 /// of a loop. 1887 struct RegisterUsage { 1888 /// Holds the number of loop invariant values that are used in the loop. 1889 unsigned LoopInvariantRegs; 1890 /// Holds the maximum number of concurrent live intervals in the loop. 1891 unsigned MaxLocalUsers; 1892 /// Holds the number of instructions in the loop. 1893 unsigned NumInstructions; 1894 }; 1895 1896 /// \return Returns information about the register usages of the loop for the 1897 /// given vectorization factors. 1898 SmallVector<RegisterUsage, 8> calculateRegisterUsage(ArrayRef<unsigned> VFs); 1899 1900 /// Collect values we want to ignore in the cost model. 1901 void collectValuesToIgnore(); 1902 1903 /// \returns The smallest bitwidth each instruction can be represented with. 1904 /// The vector equivalents of these instructions should be truncated to this 1905 /// type. 1906 const MapVector<Instruction *, uint64_t> &getMinimalBitwidths() const { 1907 return MinBWs; 1908 } 1909 1910 private: 1911 /// The vectorization cost is a combination of the cost itself and a boolean 1912 /// indicating whether any of the contributing operations will actually 1913 /// operate on 1914 /// vector values after type legalization in the backend. If this latter value 1915 /// is 1916 /// false, then all operations will be scalarized (i.e. no vectorization has 1917 /// actually taken place). 1918 typedef std::pair<unsigned, bool> VectorizationCostTy; 1919 1920 /// Returns the expected execution cost. The unit of the cost does 1921 /// not matter because we use the 'cost' units to compare different 1922 /// vector widths. The cost that is returned is *not* normalized by 1923 /// the factor width. 1924 VectorizationCostTy expectedCost(unsigned VF); 1925 1926 /// Returns the execution time cost of an instruction for a given vector 1927 /// width. Vector width of one means scalar. 1928 VectorizationCostTy getInstructionCost(Instruction *I, unsigned VF); 1929 1930 /// The cost-computation logic from getInstructionCost which provides 1931 /// the vector type as an output parameter. 1932 unsigned getInstructionCost(Instruction *I, unsigned VF, Type *&VectorTy); 1933 1934 /// Returns whether the instruction is a load or store and will be a emitted 1935 /// as a vector operation. 1936 bool isConsecutiveLoadOrStore(Instruction *I); 1937 1938 /// Create an analysis remark that explains why vectorization failed 1939 /// 1940 /// \p RemarkName is the identifier for the remark. \return the remark object 1941 /// that can be streamed to. 1942 OptimizationRemarkAnalysis createMissedAnalysis(StringRef RemarkName) { 1943 return ::createMissedAnalysis(Hints->vectorizeAnalysisPassName(), 1944 RemarkName, TheLoop); 1945 } 1946 1947 /// Map of scalar integer values to the smallest bitwidth they can be legally 1948 /// represented as. The vector equivalents of these values should be truncated 1949 /// to this type. 1950 MapVector<Instruction *, uint64_t> MinBWs; 1951 1952 public: 1953 /// The loop that we evaluate. 1954 Loop *TheLoop; 1955 /// Predicated scalar evolution analysis. 1956 PredicatedScalarEvolution &PSE; 1957 /// Loop Info analysis. 1958 LoopInfo *LI; 1959 /// Vectorization legality. 1960 LoopVectorizationLegality *Legal; 1961 /// Vector target information. 1962 const TargetTransformInfo &TTI; 1963 /// Target Library Info. 1964 const TargetLibraryInfo *TLI; 1965 /// Demanded bits analysis. 1966 DemandedBits *DB; 1967 /// Assumption cache. 1968 AssumptionCache *AC; 1969 /// Interface to emit optimization remarks. 1970 OptimizationRemarkEmitter *ORE; 1971 1972 const Function *TheFunction; 1973 /// Loop Vectorize Hint. 1974 const LoopVectorizeHints *Hints; 1975 /// Values to ignore in the cost model. 1976 SmallPtrSet<const Value *, 16> ValuesToIgnore; 1977 /// Values to ignore in the cost model when VF > 1. 1978 SmallPtrSet<const Value *, 16> VecValuesToIgnore; 1979 }; 1980 1981 /// \brief This holds vectorization requirements that must be verified late in 1982 /// the process. The requirements are set by legalize and costmodel. Once 1983 /// vectorization has been determined to be possible and profitable the 1984 /// requirements can be verified by looking for metadata or compiler options. 1985 /// For example, some loops require FP commutativity which is only allowed if 1986 /// vectorization is explicitly specified or if the fast-math compiler option 1987 /// has been provided. 1988 /// Late evaluation of these requirements allows helpful diagnostics to be 1989 /// composed that tells the user what need to be done to vectorize the loop. For 1990 /// example, by specifying #pragma clang loop vectorize or -ffast-math. Late 1991 /// evaluation should be used only when diagnostics can generated that can be 1992 /// followed by a non-expert user. 1993 class LoopVectorizationRequirements { 1994 public: 1995 LoopVectorizationRequirements(OptimizationRemarkEmitter &ORE) 1996 : NumRuntimePointerChecks(0), UnsafeAlgebraInst(nullptr), ORE(ORE) {} 1997 1998 void addUnsafeAlgebraInst(Instruction *I) { 1999 // First unsafe algebra instruction. 2000 if (!UnsafeAlgebraInst) 2001 UnsafeAlgebraInst = I; 2002 } 2003 2004 void addRuntimePointerChecks(unsigned Num) { NumRuntimePointerChecks = Num; } 2005 2006 bool doesNotMeet(Function *F, Loop *L, const LoopVectorizeHints &Hints) { 2007 const char *PassName = Hints.vectorizeAnalysisPassName(); 2008 bool Failed = false; 2009 if (UnsafeAlgebraInst && !Hints.allowReordering()) { 2010 ORE.emit( 2011 OptimizationRemarkAnalysisFPCommute(PassName, "CantReorderFPOps", 2012 UnsafeAlgebraInst->getDebugLoc(), 2013 UnsafeAlgebraInst->getParent()) 2014 << "loop not vectorized: cannot prove it is safe to reorder " 2015 "floating-point operations"); 2016 Failed = true; 2017 } 2018 2019 // Test if runtime memcheck thresholds are exceeded. 2020 bool PragmaThresholdReached = 2021 NumRuntimePointerChecks > PragmaVectorizeMemoryCheckThreshold; 2022 bool ThresholdReached = 2023 NumRuntimePointerChecks > VectorizerParams::RuntimeMemoryCheckThreshold; 2024 if ((ThresholdReached && !Hints.allowReordering()) || 2025 PragmaThresholdReached) { 2026 ORE.emit(OptimizationRemarkAnalysisAliasing(PassName, "CantReorderMemOps", 2027 L->getStartLoc(), 2028 L->getHeader()) 2029 << "loop not vectorized: cannot prove it is safe to reorder " 2030 "memory operations"); 2031 DEBUG(dbgs() << "LV: Too many memory checks needed.\n"); 2032 Failed = true; 2033 } 2034 2035 return Failed; 2036 } 2037 2038 private: 2039 unsigned NumRuntimePointerChecks; 2040 Instruction *UnsafeAlgebraInst; 2041 2042 /// Interface to emit optimization remarks. 2043 OptimizationRemarkEmitter &ORE; 2044 }; 2045 2046 static void addAcyclicInnerLoop(Loop &L, SmallVectorImpl<Loop *> &V) { 2047 if (L.empty()) { 2048 if (!hasCyclesInLoopBody(L)) 2049 V.push_back(&L); 2050 return; 2051 } 2052 for (Loop *InnerL : L) 2053 addAcyclicInnerLoop(*InnerL, V); 2054 } 2055 2056 /// The LoopVectorize Pass. 2057 struct LoopVectorize : public FunctionPass { 2058 /// Pass identification, replacement for typeid 2059 static char ID; 2060 2061 explicit LoopVectorize(bool NoUnrolling = false, bool AlwaysVectorize = true) 2062 : FunctionPass(ID) { 2063 Impl.DisableUnrolling = NoUnrolling; 2064 Impl.AlwaysVectorize = AlwaysVectorize; 2065 initializeLoopVectorizePass(*PassRegistry::getPassRegistry()); 2066 } 2067 2068 LoopVectorizePass Impl; 2069 2070 bool runOnFunction(Function &F) override { 2071 if (skipFunction(F)) 2072 return false; 2073 2074 auto *SE = &getAnalysis<ScalarEvolutionWrapperPass>().getSE(); 2075 auto *LI = &getAnalysis<LoopInfoWrapperPass>().getLoopInfo(); 2076 auto *TTI = &getAnalysis<TargetTransformInfoWrapperPass>().getTTI(F); 2077 auto *DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree(); 2078 auto *BFI = &getAnalysis<BlockFrequencyInfoWrapperPass>().getBFI(); 2079 auto *TLIP = getAnalysisIfAvailable<TargetLibraryInfoWrapperPass>(); 2080 auto *TLI = TLIP ? &TLIP->getTLI() : nullptr; 2081 auto *AA = &getAnalysis<AAResultsWrapperPass>().getAAResults(); 2082 auto *AC = &getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F); 2083 auto *LAA = &getAnalysis<LoopAccessLegacyAnalysis>(); 2084 auto *DB = &getAnalysis<DemandedBitsWrapperPass>().getDemandedBits(); 2085 auto *ORE = &getAnalysis<OptimizationRemarkEmitterWrapperPass>().getORE(); 2086 2087 std::function<const LoopAccessInfo &(Loop &)> GetLAA = 2088 [&](Loop &L) -> const LoopAccessInfo & { return LAA->getInfo(&L); }; 2089 2090 return Impl.runImpl(F, *SE, *LI, *TTI, *DT, *BFI, TLI, *DB, *AA, *AC, 2091 GetLAA, *ORE); 2092 } 2093 2094 void getAnalysisUsage(AnalysisUsage &AU) const override { 2095 AU.addRequired<AssumptionCacheTracker>(); 2096 AU.addRequiredID(LoopSimplifyID); 2097 AU.addRequiredID(LCSSAID); 2098 AU.addRequired<BlockFrequencyInfoWrapperPass>(); 2099 AU.addRequired<DominatorTreeWrapperPass>(); 2100 AU.addRequired<LoopInfoWrapperPass>(); 2101 AU.addRequired<ScalarEvolutionWrapperPass>(); 2102 AU.addRequired<TargetTransformInfoWrapperPass>(); 2103 AU.addRequired<AAResultsWrapperPass>(); 2104 AU.addRequired<LoopAccessLegacyAnalysis>(); 2105 AU.addRequired<DemandedBitsWrapperPass>(); 2106 AU.addRequired<OptimizationRemarkEmitterWrapperPass>(); 2107 AU.addPreserved<LoopInfoWrapperPass>(); 2108 AU.addPreserved<DominatorTreeWrapperPass>(); 2109 AU.addPreserved<BasicAAWrapperPass>(); 2110 AU.addPreserved<GlobalsAAWrapperPass>(); 2111 } 2112 }; 2113 2114 } // end anonymous namespace 2115 2116 //===----------------------------------------------------------------------===// 2117 // Implementation of LoopVectorizationLegality, InnerLoopVectorizer and 2118 // LoopVectorizationCostModel. 2119 //===----------------------------------------------------------------------===// 2120 2121 Value *InnerLoopVectorizer::getBroadcastInstrs(Value *V) { 2122 // We need to place the broadcast of invariant variables outside the loop. 2123 Instruction *Instr = dyn_cast<Instruction>(V); 2124 bool NewInstr = (Instr && Instr->getParent() == LoopVectorBody); 2125 bool Invariant = OrigLoop->isLoopInvariant(V) && !NewInstr; 2126 2127 // Place the code for broadcasting invariant variables in the new preheader. 2128 IRBuilder<>::InsertPointGuard Guard(Builder); 2129 if (Invariant) 2130 Builder.SetInsertPoint(LoopVectorPreHeader->getTerminator()); 2131 2132 // Broadcast the scalar into all locations in the vector. 2133 Value *Shuf = Builder.CreateVectorSplat(VF, V, "broadcast"); 2134 2135 return Shuf; 2136 } 2137 2138 void InnerLoopVectorizer::createVectorIntInductionPHI( 2139 const InductionDescriptor &II, Instruction *EntryVal) { 2140 Value *Start = II.getStartValue(); 2141 ConstantInt *Step = II.getConstIntStepValue(); 2142 assert(Step && "Can not widen an IV with a non-constant step"); 2143 2144 // Construct the initial value of the vector IV in the vector loop preheader 2145 auto CurrIP = Builder.saveIP(); 2146 Builder.SetInsertPoint(LoopVectorPreHeader->getTerminator()); 2147 if (isa<TruncInst>(EntryVal)) { 2148 auto *TruncType = cast<IntegerType>(EntryVal->getType()); 2149 Step = ConstantInt::getSigned(TruncType, Step->getSExtValue()); 2150 Start = Builder.CreateCast(Instruction::Trunc, Start, TruncType); 2151 } 2152 Value *SplatStart = Builder.CreateVectorSplat(VF, Start); 2153 Value *SteppedStart = getStepVector(SplatStart, 0, Step); 2154 Builder.restoreIP(CurrIP); 2155 2156 Value *SplatVF = 2157 ConstantVector::getSplat(VF, ConstantInt::getSigned(Start->getType(), 2158 VF * Step->getSExtValue())); 2159 // We may need to add the step a number of times, depending on the unroll 2160 // factor. The last of those goes into the PHI. 2161 PHINode *VecInd = PHINode::Create(SteppedStart->getType(), 2, "vec.ind", 2162 &*LoopVectorBody->getFirstInsertionPt()); 2163 Instruction *LastInduction = VecInd; 2164 VectorParts Entry(UF); 2165 for (unsigned Part = 0; Part < UF; ++Part) { 2166 Entry[Part] = LastInduction; 2167 LastInduction = cast<Instruction>( 2168 Builder.CreateAdd(LastInduction, SplatVF, "step.add")); 2169 } 2170 VectorLoopValueMap.initVector(EntryVal, Entry); 2171 if (isa<TruncInst>(EntryVal)) 2172 addMetadata(Entry, EntryVal); 2173 2174 // Move the last step to the end of the latch block. This ensures consistent 2175 // placement of all induction updates. 2176 auto *LoopVectorLatch = LI->getLoopFor(LoopVectorBody)->getLoopLatch(); 2177 auto *Br = cast<BranchInst>(LoopVectorLatch->getTerminator()); 2178 auto *ICmp = cast<Instruction>(Br->getCondition()); 2179 LastInduction->moveBefore(ICmp); 2180 LastInduction->setName("vec.ind.next"); 2181 2182 VecInd->addIncoming(SteppedStart, LoopVectorPreHeader); 2183 VecInd->addIncoming(LastInduction, LoopVectorLatch); 2184 } 2185 2186 bool InnerLoopVectorizer::needsScalarInduction(Instruction *IV) const { 2187 if (Legal->isScalarAfterVectorization(IV)) 2188 return true; 2189 auto isScalarInst = [&](User *U) -> bool { 2190 auto *I = cast<Instruction>(U); 2191 return (OrigLoop->contains(I) && Legal->isScalarAfterVectorization(I)); 2192 }; 2193 return any_of(IV->users(), isScalarInst); 2194 } 2195 2196 void InnerLoopVectorizer::widenIntInduction(PHINode *IV, TruncInst *Trunc) { 2197 2198 auto II = Legal->getInductionVars()->find(IV); 2199 assert(II != Legal->getInductionVars()->end() && "IV is not an induction"); 2200 2201 auto ID = II->second; 2202 assert(IV->getType() == ID.getStartValue()->getType() && "Types must match"); 2203 2204 // The scalar value to broadcast. This will be derived from the canonical 2205 // induction variable. 2206 Value *ScalarIV = nullptr; 2207 2208 // The step of the induction. 2209 Value *Step = nullptr; 2210 2211 // The value from the original loop to which we are mapping the new induction 2212 // variable. 2213 Instruction *EntryVal = Trunc ? cast<Instruction>(Trunc) : IV; 2214 2215 // True if we have vectorized the induction variable. 2216 auto VectorizedIV = false; 2217 2218 // Determine if we want a scalar version of the induction variable. This is 2219 // true if the induction variable itself is not widened, or if it has at 2220 // least one user in the loop that is not widened. 2221 auto NeedsScalarIV = VF > 1 && needsScalarInduction(EntryVal); 2222 2223 // If the induction variable has a constant integer step value, go ahead and 2224 // get it now. 2225 if (ID.getConstIntStepValue()) 2226 Step = ID.getConstIntStepValue(); 2227 2228 // Try to create a new independent vector induction variable. If we can't 2229 // create the phi node, we will splat the scalar induction variable in each 2230 // loop iteration. 2231 if (VF > 1 && IV->getType() == Induction->getType() && Step && 2232 !Legal->isScalarAfterVectorization(EntryVal)) { 2233 createVectorIntInductionPHI(ID, EntryVal); 2234 VectorizedIV = true; 2235 } 2236 2237 // If we haven't yet vectorized the induction variable, or if we will create 2238 // a scalar one, we need to define the scalar induction variable and step 2239 // values. If we were given a truncation type, truncate the canonical 2240 // induction variable and constant step. Otherwise, derive these values from 2241 // the induction descriptor. 2242 if (!VectorizedIV || NeedsScalarIV) { 2243 if (Trunc) { 2244 auto *TruncType = cast<IntegerType>(Trunc->getType()); 2245 assert(Step && "Truncation requires constant integer step"); 2246 auto StepInt = cast<ConstantInt>(Step)->getSExtValue(); 2247 ScalarIV = Builder.CreateCast(Instruction::Trunc, Induction, TruncType); 2248 Step = ConstantInt::getSigned(TruncType, StepInt); 2249 } else { 2250 ScalarIV = Induction; 2251 auto &DL = OrigLoop->getHeader()->getModule()->getDataLayout(); 2252 if (IV != OldInduction) { 2253 ScalarIV = Builder.CreateSExtOrTrunc(ScalarIV, IV->getType()); 2254 ScalarIV = ID.transform(Builder, ScalarIV, PSE.getSE(), DL); 2255 ScalarIV->setName("offset.idx"); 2256 } 2257 if (!Step) { 2258 SCEVExpander Exp(*PSE.getSE(), DL, "induction"); 2259 Step = Exp.expandCodeFor(ID.getStep(), ID.getStep()->getType(), 2260 &*Builder.GetInsertPoint()); 2261 } 2262 } 2263 } 2264 2265 // If we haven't yet vectorized the induction variable, splat the scalar 2266 // induction variable, and build the necessary step vectors. 2267 if (!VectorizedIV) { 2268 Value *Broadcasted = getBroadcastInstrs(ScalarIV); 2269 VectorParts Entry(UF); 2270 for (unsigned Part = 0; Part < UF; ++Part) 2271 Entry[Part] = getStepVector(Broadcasted, VF * Part, Step); 2272 VectorLoopValueMap.initVector(EntryVal, Entry); 2273 if (Trunc) 2274 addMetadata(Entry, Trunc); 2275 } 2276 2277 // If an induction variable is only used for counting loop iterations or 2278 // calculating addresses, it doesn't need to be widened. Create scalar steps 2279 // that can be used by instructions we will later scalarize. Note that the 2280 // addition of the scalar steps will not increase the number of instructions 2281 // in the loop in the common case prior to InstCombine. We will be trading 2282 // one vector extract for each scalar step. 2283 if (NeedsScalarIV) 2284 buildScalarSteps(ScalarIV, Step, EntryVal); 2285 } 2286 2287 Value *InnerLoopVectorizer::getStepVector(Value *Val, int StartIdx, Value *Step, 2288 Instruction::BinaryOps BinOp) { 2289 // Create and check the types. 2290 assert(Val->getType()->isVectorTy() && "Must be a vector"); 2291 int VLen = Val->getType()->getVectorNumElements(); 2292 2293 Type *STy = Val->getType()->getScalarType(); 2294 assert((STy->isIntegerTy() || STy->isFloatingPointTy()) && 2295 "Induction Step must be an integer or FP"); 2296 assert(Step->getType() == STy && "Step has wrong type"); 2297 2298 SmallVector<Constant *, 8> Indices; 2299 2300 if (STy->isIntegerTy()) { 2301 // Create a vector of consecutive numbers from zero to VF. 2302 for (int i = 0; i < VLen; ++i) 2303 Indices.push_back(ConstantInt::get(STy, StartIdx + i)); 2304 2305 // Add the consecutive indices to the vector value. 2306 Constant *Cv = ConstantVector::get(Indices); 2307 assert(Cv->getType() == Val->getType() && "Invalid consecutive vec"); 2308 Step = Builder.CreateVectorSplat(VLen, Step); 2309 assert(Step->getType() == Val->getType() && "Invalid step vec"); 2310 // FIXME: The newly created binary instructions should contain nsw/nuw flags, 2311 // which can be found from the original scalar operations. 2312 Step = Builder.CreateMul(Cv, Step); 2313 return Builder.CreateAdd(Val, Step, "induction"); 2314 } 2315 2316 // Floating point induction. 2317 assert((BinOp == Instruction::FAdd || BinOp == Instruction::FSub) && 2318 "Binary Opcode should be specified for FP induction"); 2319 // Create a vector of consecutive numbers from zero to VF. 2320 for (int i = 0; i < VLen; ++i) 2321 Indices.push_back(ConstantFP::get(STy, (double)(StartIdx + i))); 2322 2323 // Add the consecutive indices to the vector value. 2324 Constant *Cv = ConstantVector::get(Indices); 2325 2326 Step = Builder.CreateVectorSplat(VLen, Step); 2327 2328 // Floating point operations had to be 'fast' to enable the induction. 2329 FastMathFlags Flags; 2330 Flags.setUnsafeAlgebra(); 2331 2332 Value *MulOp = Builder.CreateFMul(Cv, Step); 2333 if (isa<Instruction>(MulOp)) 2334 // Have to check, MulOp may be a constant 2335 cast<Instruction>(MulOp)->setFastMathFlags(Flags); 2336 2337 Value *BOp = Builder.CreateBinOp(BinOp, Val, MulOp, "induction"); 2338 if (isa<Instruction>(BOp)) 2339 cast<Instruction>(BOp)->setFastMathFlags(Flags); 2340 return BOp; 2341 } 2342 2343 void InnerLoopVectorizer::buildScalarSteps(Value *ScalarIV, Value *Step, 2344 Value *EntryVal) { 2345 2346 // We shouldn't have to build scalar steps if we aren't vectorizing. 2347 assert(VF > 1 && "VF should be greater than one"); 2348 2349 // Get the value type and ensure it and the step have the same integer type. 2350 Type *ScalarIVTy = ScalarIV->getType()->getScalarType(); 2351 assert(ScalarIVTy->isIntegerTy() && ScalarIVTy == Step->getType() && 2352 "Val and Step should have the same integer type"); 2353 2354 // Determine the number of scalars we need to generate for each unroll 2355 // iteration. If EntryVal is uniform, we only need to generate the first 2356 // lane. Otherwise, we generate all VF values. 2357 unsigned Lanes = 2358 Legal->isUniformAfterVectorization(cast<Instruction>(EntryVal)) ? 1 : VF; 2359 2360 // Compute the scalar steps and save the results in VectorLoopValueMap. 2361 ScalarParts Entry(UF); 2362 for (unsigned Part = 0; Part < UF; ++Part) { 2363 Entry[Part].resize(VF); 2364 for (unsigned Lane = 0; Lane < Lanes; ++Lane) { 2365 auto *StartIdx = ConstantInt::get(ScalarIVTy, VF * Part + Lane); 2366 auto *Mul = Builder.CreateMul(StartIdx, Step); 2367 auto *Add = Builder.CreateAdd(ScalarIV, Mul); 2368 Entry[Part][Lane] = Add; 2369 } 2370 } 2371 VectorLoopValueMap.initScalar(EntryVal, Entry); 2372 } 2373 2374 int LoopVectorizationLegality::isConsecutivePtr(Value *Ptr) { 2375 2376 const ValueToValueMap &Strides = getSymbolicStrides() ? *getSymbolicStrides() : 2377 ValueToValueMap(); 2378 2379 int Stride = getPtrStride(PSE, Ptr, TheLoop, Strides, true, false); 2380 if (Stride == 1 || Stride == -1) 2381 return Stride; 2382 return 0; 2383 } 2384 2385 bool LoopVectorizationLegality::isUniform(Value *V) { 2386 return LAI->isUniform(V); 2387 } 2388 2389 const InnerLoopVectorizer::VectorParts & 2390 InnerLoopVectorizer::getVectorValue(Value *V) { 2391 assert(V != Induction && "The new induction variable should not be used."); 2392 assert(!V->getType()->isVectorTy() && "Can't widen a vector"); 2393 assert(!V->getType()->isVoidTy() && "Type does not produce a value"); 2394 2395 // If we have a stride that is replaced by one, do it here. 2396 if (Legal->hasStride(V)) 2397 V = ConstantInt::get(V->getType(), 1); 2398 2399 // If we have this scalar in the map, return it. 2400 if (VectorLoopValueMap.hasVector(V)) 2401 return VectorLoopValueMap.VectorMapStorage[V]; 2402 2403 // If the value has not been vectorized, check if it has been scalarized 2404 // instead. If it has been scalarized, and we actually need the value in 2405 // vector form, we will construct the vector values on demand. 2406 if (VectorLoopValueMap.hasScalar(V)) { 2407 2408 // Initialize a new vector map entry. 2409 VectorParts Entry(UF); 2410 2411 // If we've scalarized a value, that value should be an instruction. 2412 auto *I = cast<Instruction>(V); 2413 2414 // If we aren't vectorizing, we can just copy the scalar map values over to 2415 // the vector map. 2416 if (VF == 1) { 2417 for (unsigned Part = 0; Part < UF; ++Part) 2418 Entry[Part] = getScalarValue(V, Part, 0); 2419 return VectorLoopValueMap.initVector(V, Entry); 2420 } 2421 2422 // Get the last scalar instruction we generated for V. If the value is 2423 // known to be uniform after vectorization, this corresponds to lane zero 2424 // of the last unroll iteration. Otherwise, the last instruction is the one 2425 // we created for the last vector lane of the last unroll iteration. 2426 unsigned LastLane = Legal->isUniformAfterVectorization(I) ? 0 : VF - 1; 2427 auto *LastInst = cast<Instruction>(getScalarValue(V, UF - 1, LastLane)); 2428 2429 // Set the insert point after the last scalarized instruction. This ensures 2430 // the insertelement sequence will directly follow the scalar definitions. 2431 auto OldIP = Builder.saveIP(); 2432 auto NewIP = std::next(BasicBlock::iterator(LastInst)); 2433 Builder.SetInsertPoint(&*NewIP); 2434 2435 // However, if we are vectorizing, we need to construct the vector values. 2436 // If the value is known to be uniform after vectorization, we can just 2437 // broadcast the scalar value corresponding to lane zero for each unroll 2438 // iteration. Otherwise, we construct the vector values using insertelement 2439 // instructions. Since the resulting vectors are stored in 2440 // VectorLoopValueMap, we will only generate the insertelements once. 2441 for (unsigned Part = 0; Part < UF; ++Part) { 2442 Value *VectorValue = nullptr; 2443 if (Legal->isUniformAfterVectorization(I)) { 2444 VectorValue = getBroadcastInstrs(getScalarValue(V, Part, 0)); 2445 } else { 2446 VectorValue = UndefValue::get(VectorType::get(V->getType(), VF)); 2447 for (unsigned Lane = 0; Lane < VF; ++Lane) 2448 VectorValue = Builder.CreateInsertElement( 2449 VectorValue, getScalarValue(V, Part, Lane), 2450 Builder.getInt32(Lane)); 2451 } 2452 Entry[Part] = VectorValue; 2453 } 2454 Builder.restoreIP(OldIP); 2455 return VectorLoopValueMap.initVector(V, Entry); 2456 } 2457 2458 // If this scalar is unknown, assume that it is a constant or that it is 2459 // loop invariant. Broadcast V and save the value for future uses. 2460 Value *B = getBroadcastInstrs(V); 2461 return VectorLoopValueMap.initVector(V, VectorParts(UF, B)); 2462 } 2463 2464 Value *InnerLoopVectorizer::getScalarValue(Value *V, unsigned Part, 2465 unsigned Lane) { 2466 2467 // If the value is not an instruction contained in the loop, it should 2468 // already be scalar. 2469 if (OrigLoop->isLoopInvariant(V)) 2470 return V; 2471 2472 assert(Lane > 0 ? !Legal->isUniformAfterVectorization(cast<Instruction>(V)) 2473 : true && "Uniform values only have lane zero"); 2474 2475 // If the value from the original loop has not been vectorized, it is 2476 // represented by UF x VF scalar values in the new loop. Return the requested 2477 // scalar value. 2478 if (VectorLoopValueMap.hasScalar(V)) 2479 return VectorLoopValueMap.ScalarMapStorage[V][Part][Lane]; 2480 2481 // If the value has not been scalarized, get its entry in VectorLoopValueMap 2482 // for the given unroll part. If this entry is not a vector type (i.e., the 2483 // vectorization factor is one), there is no need to generate an 2484 // extractelement instruction. 2485 auto *U = getVectorValue(V)[Part]; 2486 if (!U->getType()->isVectorTy()) { 2487 assert(VF == 1 && "Value not scalarized has non-vector type"); 2488 return U; 2489 } 2490 2491 // Otherwise, the value from the original loop has been vectorized and is 2492 // represented by UF vector values. Extract and return the requested scalar 2493 // value from the appropriate vector lane. 2494 return Builder.CreateExtractElement(U, Builder.getInt32(Lane)); 2495 } 2496 2497 Value *InnerLoopVectorizer::reverseVector(Value *Vec) { 2498 assert(Vec->getType()->isVectorTy() && "Invalid type"); 2499 SmallVector<Constant *, 8> ShuffleMask; 2500 for (unsigned i = 0; i < VF; ++i) 2501 ShuffleMask.push_back(Builder.getInt32(VF - i - 1)); 2502 2503 return Builder.CreateShuffleVector(Vec, UndefValue::get(Vec->getType()), 2504 ConstantVector::get(ShuffleMask), 2505 "reverse"); 2506 } 2507 2508 // Get a mask to interleave \p NumVec vectors into a wide vector. 2509 // I.e. <0, VF, VF*2, ..., VF*(NumVec-1), 1, VF+1, VF*2+1, ...> 2510 // E.g. For 2 interleaved vectors, if VF is 4, the mask is: 2511 // <0, 4, 1, 5, 2, 6, 3, 7> 2512 static Constant *getInterleavedMask(IRBuilder<> &Builder, unsigned VF, 2513 unsigned NumVec) { 2514 SmallVector<Constant *, 16> Mask; 2515 for (unsigned i = 0; i < VF; i++) 2516 for (unsigned j = 0; j < NumVec; j++) 2517 Mask.push_back(Builder.getInt32(j * VF + i)); 2518 2519 return ConstantVector::get(Mask); 2520 } 2521 2522 // Get the strided mask starting from index \p Start. 2523 // I.e. <Start, Start + Stride, ..., Start + Stride*(VF-1)> 2524 static Constant *getStridedMask(IRBuilder<> &Builder, unsigned Start, 2525 unsigned Stride, unsigned VF) { 2526 SmallVector<Constant *, 16> Mask; 2527 for (unsigned i = 0; i < VF; i++) 2528 Mask.push_back(Builder.getInt32(Start + i * Stride)); 2529 2530 return ConstantVector::get(Mask); 2531 } 2532 2533 // Get a mask of two parts: The first part consists of sequential integers 2534 // starting from 0, The second part consists of UNDEFs. 2535 // I.e. <0, 1, 2, ..., NumInt - 1, undef, ..., undef> 2536 static Constant *getSequentialMask(IRBuilder<> &Builder, unsigned NumInt, 2537 unsigned NumUndef) { 2538 SmallVector<Constant *, 16> Mask; 2539 for (unsigned i = 0; i < NumInt; i++) 2540 Mask.push_back(Builder.getInt32(i)); 2541 2542 Constant *Undef = UndefValue::get(Builder.getInt32Ty()); 2543 for (unsigned i = 0; i < NumUndef; i++) 2544 Mask.push_back(Undef); 2545 2546 return ConstantVector::get(Mask); 2547 } 2548 2549 // Concatenate two vectors with the same element type. The 2nd vector should 2550 // not have more elements than the 1st vector. If the 2nd vector has less 2551 // elements, extend it with UNDEFs. 2552 static Value *ConcatenateTwoVectors(IRBuilder<> &Builder, Value *V1, 2553 Value *V2) { 2554 VectorType *VecTy1 = dyn_cast<VectorType>(V1->getType()); 2555 VectorType *VecTy2 = dyn_cast<VectorType>(V2->getType()); 2556 assert(VecTy1 && VecTy2 && 2557 VecTy1->getScalarType() == VecTy2->getScalarType() && 2558 "Expect two vectors with the same element type"); 2559 2560 unsigned NumElts1 = VecTy1->getNumElements(); 2561 unsigned NumElts2 = VecTy2->getNumElements(); 2562 assert(NumElts1 >= NumElts2 && "Unexpect the first vector has less elements"); 2563 2564 if (NumElts1 > NumElts2) { 2565 // Extend with UNDEFs. 2566 Constant *ExtMask = 2567 getSequentialMask(Builder, NumElts2, NumElts1 - NumElts2); 2568 V2 = Builder.CreateShuffleVector(V2, UndefValue::get(VecTy2), ExtMask); 2569 } 2570 2571 Constant *Mask = getSequentialMask(Builder, NumElts1 + NumElts2, 0); 2572 return Builder.CreateShuffleVector(V1, V2, Mask); 2573 } 2574 2575 // Concatenate vectors in the given list. All vectors have the same type. 2576 static Value *ConcatenateVectors(IRBuilder<> &Builder, 2577 ArrayRef<Value *> InputList) { 2578 unsigned NumVec = InputList.size(); 2579 assert(NumVec > 1 && "Should be at least two vectors"); 2580 2581 SmallVector<Value *, 8> ResList; 2582 ResList.append(InputList.begin(), InputList.end()); 2583 do { 2584 SmallVector<Value *, 8> TmpList; 2585 for (unsigned i = 0; i < NumVec - 1; i += 2) { 2586 Value *V0 = ResList[i], *V1 = ResList[i + 1]; 2587 assert((V0->getType() == V1->getType() || i == NumVec - 2) && 2588 "Only the last vector may have a different type"); 2589 2590 TmpList.push_back(ConcatenateTwoVectors(Builder, V0, V1)); 2591 } 2592 2593 // Push the last vector if the total number of vectors is odd. 2594 if (NumVec % 2 != 0) 2595 TmpList.push_back(ResList[NumVec - 1]); 2596 2597 ResList = TmpList; 2598 NumVec = ResList.size(); 2599 } while (NumVec > 1); 2600 2601 return ResList[0]; 2602 } 2603 2604 // Try to vectorize the interleave group that \p Instr belongs to. 2605 // 2606 // E.g. Translate following interleaved load group (factor = 3): 2607 // for (i = 0; i < N; i+=3) { 2608 // R = Pic[i]; // Member of index 0 2609 // G = Pic[i+1]; // Member of index 1 2610 // B = Pic[i+2]; // Member of index 2 2611 // ... // do something to R, G, B 2612 // } 2613 // To: 2614 // %wide.vec = load <12 x i32> ; Read 4 tuples of R,G,B 2615 // %R.vec = shuffle %wide.vec, undef, <0, 3, 6, 9> ; R elements 2616 // %G.vec = shuffle %wide.vec, undef, <1, 4, 7, 10> ; G elements 2617 // %B.vec = shuffle %wide.vec, undef, <2, 5, 8, 11> ; B elements 2618 // 2619 // Or translate following interleaved store group (factor = 3): 2620 // for (i = 0; i < N; i+=3) { 2621 // ... do something to R, G, B 2622 // Pic[i] = R; // Member of index 0 2623 // Pic[i+1] = G; // Member of index 1 2624 // Pic[i+2] = B; // Member of index 2 2625 // } 2626 // To: 2627 // %R_G.vec = shuffle %R.vec, %G.vec, <0, 1, 2, ..., 7> 2628 // %B_U.vec = shuffle %B.vec, undef, <0, 1, 2, 3, u, u, u, u> 2629 // %interleaved.vec = shuffle %R_G.vec, %B_U.vec, 2630 // <0, 4, 8, 1, 5, 9, 2, 6, 10, 3, 7, 11> ; Interleave R,G,B elements 2631 // store <12 x i32> %interleaved.vec ; Write 4 tuples of R,G,B 2632 void InnerLoopVectorizer::vectorizeInterleaveGroup(Instruction *Instr) { 2633 const InterleaveGroup *Group = Legal->getInterleavedAccessGroup(Instr); 2634 assert(Group && "Fail to get an interleaved access group."); 2635 2636 // Skip if current instruction is not the insert position. 2637 if (Instr != Group->getInsertPos()) 2638 return; 2639 2640 LoadInst *LI = dyn_cast<LoadInst>(Instr); 2641 StoreInst *SI = dyn_cast<StoreInst>(Instr); 2642 Value *Ptr = getPointerOperand(Instr); 2643 2644 // Prepare for the vector type of the interleaved load/store. 2645 Type *ScalarTy = LI ? LI->getType() : SI->getValueOperand()->getType(); 2646 unsigned InterleaveFactor = Group->getFactor(); 2647 Type *VecTy = VectorType::get(ScalarTy, InterleaveFactor * VF); 2648 Type *PtrTy = VecTy->getPointerTo(Ptr->getType()->getPointerAddressSpace()); 2649 2650 // Prepare for the new pointers. 2651 setDebugLocFromInst(Builder, Ptr); 2652 SmallVector<Value *, 2> NewPtrs; 2653 unsigned Index = Group->getIndex(Instr); 2654 2655 // If the group is reverse, adjust the index to refer to the last vector lane 2656 // instead of the first. We adjust the index from the first vector lane, 2657 // rather than directly getting the pointer for lane VF - 1, because the 2658 // pointer operand of the interleaved access is supposed to be uniform. For 2659 // uniform instructions, we're only required to generate a value for the 2660 // first vector lane in each unroll iteration. 2661 if (Group->isReverse()) 2662 Index += (VF - 1) * Group->getFactor(); 2663 2664 for (unsigned Part = 0; Part < UF; Part++) { 2665 Value *NewPtr = getScalarValue(Ptr, Part, 0); 2666 2667 // Notice current instruction could be any index. Need to adjust the address 2668 // to the member of index 0. 2669 // 2670 // E.g. a = A[i+1]; // Member of index 1 (Current instruction) 2671 // b = A[i]; // Member of index 0 2672 // Current pointer is pointed to A[i+1], adjust it to A[i]. 2673 // 2674 // E.g. A[i+1] = a; // Member of index 1 2675 // A[i] = b; // Member of index 0 2676 // A[i+2] = c; // Member of index 2 (Current instruction) 2677 // Current pointer is pointed to A[i+2], adjust it to A[i]. 2678 NewPtr = Builder.CreateGEP(NewPtr, Builder.getInt32(-Index)); 2679 2680 // Cast to the vector pointer type. 2681 NewPtrs.push_back(Builder.CreateBitCast(NewPtr, PtrTy)); 2682 } 2683 2684 setDebugLocFromInst(Builder, Instr); 2685 Value *UndefVec = UndefValue::get(VecTy); 2686 2687 // Vectorize the interleaved load group. 2688 if (LI) { 2689 2690 // For each unroll part, create a wide load for the group. 2691 SmallVector<Value *, 2> NewLoads; 2692 for (unsigned Part = 0; Part < UF; Part++) { 2693 auto *NewLoad = Builder.CreateAlignedLoad( 2694 NewPtrs[Part], Group->getAlignment(), "wide.vec"); 2695 addMetadata(NewLoad, Instr); 2696 NewLoads.push_back(NewLoad); 2697 } 2698 2699 // For each member in the group, shuffle out the appropriate data from the 2700 // wide loads. 2701 for (unsigned I = 0; I < InterleaveFactor; ++I) { 2702 Instruction *Member = Group->getMember(I); 2703 2704 // Skip the gaps in the group. 2705 if (!Member) 2706 continue; 2707 2708 VectorParts Entry(UF); 2709 Constant *StrideMask = getStridedMask(Builder, I, InterleaveFactor, VF); 2710 for (unsigned Part = 0; Part < UF; Part++) { 2711 Value *StridedVec = Builder.CreateShuffleVector( 2712 NewLoads[Part], UndefVec, StrideMask, "strided.vec"); 2713 2714 // If this member has different type, cast the result type. 2715 if (Member->getType() != ScalarTy) { 2716 VectorType *OtherVTy = VectorType::get(Member->getType(), VF); 2717 StridedVec = Builder.CreateBitOrPointerCast(StridedVec, OtherVTy); 2718 } 2719 2720 Entry[Part] = 2721 Group->isReverse() ? reverseVector(StridedVec) : StridedVec; 2722 } 2723 VectorLoopValueMap.initVector(Member, Entry); 2724 } 2725 return; 2726 } 2727 2728 // The sub vector type for current instruction. 2729 VectorType *SubVT = VectorType::get(ScalarTy, VF); 2730 2731 // Vectorize the interleaved store group. 2732 for (unsigned Part = 0; Part < UF; Part++) { 2733 // Collect the stored vector from each member. 2734 SmallVector<Value *, 4> StoredVecs; 2735 for (unsigned i = 0; i < InterleaveFactor; i++) { 2736 // Interleaved store group doesn't allow a gap, so each index has a member 2737 Instruction *Member = Group->getMember(i); 2738 assert(Member && "Fail to get a member from an interleaved store group"); 2739 2740 Value *StoredVec = 2741 getVectorValue(cast<StoreInst>(Member)->getValueOperand())[Part]; 2742 if (Group->isReverse()) 2743 StoredVec = reverseVector(StoredVec); 2744 2745 // If this member has different type, cast it to an unified type. 2746 if (StoredVec->getType() != SubVT) 2747 StoredVec = Builder.CreateBitOrPointerCast(StoredVec, SubVT); 2748 2749 StoredVecs.push_back(StoredVec); 2750 } 2751 2752 // Concatenate all vectors into a wide vector. 2753 Value *WideVec = ConcatenateVectors(Builder, StoredVecs); 2754 2755 // Interleave the elements in the wide vector. 2756 Constant *IMask = getInterleavedMask(Builder, VF, InterleaveFactor); 2757 Value *IVec = Builder.CreateShuffleVector(WideVec, UndefVec, IMask, 2758 "interleaved.vec"); 2759 2760 Instruction *NewStoreInstr = 2761 Builder.CreateAlignedStore(IVec, NewPtrs[Part], Group->getAlignment()); 2762 addMetadata(NewStoreInstr, Instr); 2763 } 2764 } 2765 2766 void InnerLoopVectorizer::vectorizeMemoryInstruction(Instruction *Instr) { 2767 // Attempt to issue a wide load. 2768 LoadInst *LI = dyn_cast<LoadInst>(Instr); 2769 StoreInst *SI = dyn_cast<StoreInst>(Instr); 2770 2771 assert((LI || SI) && "Invalid Load/Store instruction"); 2772 2773 // Try to vectorize the interleave group if this access is interleaved. 2774 if (Legal->isAccessInterleaved(Instr)) 2775 return vectorizeInterleaveGroup(Instr); 2776 2777 Type *ScalarDataTy = LI ? LI->getType() : SI->getValueOperand()->getType(); 2778 Type *DataTy = VectorType::get(ScalarDataTy, VF); 2779 Value *Ptr = getPointerOperand(Instr); 2780 unsigned Alignment = LI ? LI->getAlignment() : SI->getAlignment(); 2781 // An alignment of 0 means target abi alignment. We need to use the scalar's 2782 // target abi alignment in such a case. 2783 const DataLayout &DL = Instr->getModule()->getDataLayout(); 2784 if (!Alignment) 2785 Alignment = DL.getABITypeAlignment(ScalarDataTy); 2786 unsigned AddressSpace = Ptr->getType()->getPointerAddressSpace(); 2787 2788 // Scalarize the memory instruction if necessary. 2789 if (Legal->memoryInstructionMustBeScalarized(Instr, VF)) 2790 return scalarizeInstruction(Instr, Legal->isScalarWithPredication(Instr)); 2791 2792 // Determine if the pointer operand of the access is either consecutive or 2793 // reverse consecutive. 2794 int ConsecutiveStride = Legal->isConsecutivePtr(Ptr); 2795 bool Reverse = ConsecutiveStride < 0; 2796 2797 // Determine if either a gather or scatter operation is legal. 2798 bool CreateGatherScatter = 2799 !ConsecutiveStride && Legal->isLegalGatherOrScatter(Instr); 2800 2801 VectorParts VectorGep; 2802 2803 // Handle consecutive loads/stores. 2804 GetElementPtrInst *Gep = getGEPInstruction(Ptr); 2805 if (ConsecutiveStride) { 2806 if (Gep) { 2807 unsigned NumOperands = Gep->getNumOperands(); 2808 #ifndef NDEBUG 2809 // The original GEP that identified as a consecutive memory access 2810 // should have only one loop-variant operand. 2811 unsigned NumOfLoopVariantOps = 0; 2812 for (unsigned i = 0; i < NumOperands; ++i) 2813 if (!PSE.getSE()->isLoopInvariant(PSE.getSCEV(Gep->getOperand(i)), 2814 OrigLoop)) 2815 NumOfLoopVariantOps++; 2816 assert(NumOfLoopVariantOps == 1 && 2817 "Consecutive GEP should have only one loop-variant operand"); 2818 #endif 2819 GetElementPtrInst *Gep2 = cast<GetElementPtrInst>(Gep->clone()); 2820 Gep2->setName("gep.indvar"); 2821 2822 // A new GEP is created for a 0-lane value of the first unroll iteration. 2823 // The GEPs for the rest of the unroll iterations are computed below as an 2824 // offset from this GEP. 2825 for (unsigned i = 0; i < NumOperands; ++i) 2826 // We can apply getScalarValue() for all GEP indices. It returns an 2827 // original value for loop-invariant operand and 0-lane for consecutive 2828 // operand. 2829 Gep2->setOperand(i, getScalarValue(Gep->getOperand(i), 2830 0, /* First unroll iteration */ 2831 0 /* 0-lane of the vector */ )); 2832 setDebugLocFromInst(Builder, Gep); 2833 Ptr = Builder.Insert(Gep2); 2834 2835 } else { // No GEP 2836 setDebugLocFromInst(Builder, Ptr); 2837 Ptr = getScalarValue(Ptr, 0, 0); 2838 } 2839 } else { 2840 // At this point we should vector version of GEP for Gather or Scatter 2841 assert(CreateGatherScatter && "The instruction should be scalarized"); 2842 if (Gep) { 2843 // Vectorizing GEP, across UF parts. We want to get a vector value for base 2844 // and each index that's defined inside the loop, even if it is 2845 // loop-invariant but wasn't hoisted out. Otherwise we want to keep them 2846 // scalar. 2847 SmallVector<VectorParts, 4> OpsV; 2848 for (Value *Op : Gep->operands()) { 2849 Instruction *SrcInst = dyn_cast<Instruction>(Op); 2850 if (SrcInst && OrigLoop->contains(SrcInst)) 2851 OpsV.push_back(getVectorValue(Op)); 2852 else 2853 OpsV.push_back(VectorParts(UF, Op)); 2854 } 2855 for (unsigned Part = 0; Part < UF; ++Part) { 2856 SmallVector<Value *, 4> Ops; 2857 Value *GEPBasePtr = OpsV[0][Part]; 2858 for (unsigned i = 1; i < Gep->getNumOperands(); i++) 2859 Ops.push_back(OpsV[i][Part]); 2860 Value *NewGep = Builder.CreateGEP(GEPBasePtr, Ops, "VectorGep"); 2861 cast<GetElementPtrInst>(NewGep)->setIsInBounds(Gep->isInBounds()); 2862 assert(NewGep->getType()->isVectorTy() && "Expected vector GEP"); 2863 2864 NewGep = 2865 Builder.CreateBitCast(NewGep, VectorType::get(Ptr->getType(), VF)); 2866 VectorGep.push_back(NewGep); 2867 } 2868 } else 2869 VectorGep = getVectorValue(Ptr); 2870 } 2871 2872 VectorParts Mask = createBlockInMask(Instr->getParent()); 2873 // Handle Stores: 2874 if (SI) { 2875 assert(!Legal->isUniform(SI->getPointerOperand()) && 2876 "We do not allow storing to uniform addresses"); 2877 setDebugLocFromInst(Builder, SI); 2878 // We don't want to update the value in the map as it might be used in 2879 // another expression. So don't use a reference type for "StoredVal". 2880 VectorParts StoredVal = getVectorValue(SI->getValueOperand()); 2881 2882 for (unsigned Part = 0; Part < UF; ++Part) { 2883 Instruction *NewSI = nullptr; 2884 if (CreateGatherScatter) { 2885 Value *MaskPart = Legal->isMaskRequired(SI) ? Mask[Part] : nullptr; 2886 NewSI = Builder.CreateMaskedScatter(StoredVal[Part], VectorGep[Part], 2887 Alignment, MaskPart); 2888 } else { 2889 // Calculate the pointer for the specific unroll-part. 2890 Value *PartPtr = 2891 Builder.CreateGEP(nullptr, Ptr, Builder.getInt32(Part * VF)); 2892 2893 if (Reverse) { 2894 // If we store to reverse consecutive memory locations, then we need 2895 // to reverse the order of elements in the stored value. 2896 StoredVal[Part] = reverseVector(StoredVal[Part]); 2897 // If the address is consecutive but reversed, then the 2898 // wide store needs to start at the last vector element. 2899 PartPtr = 2900 Builder.CreateGEP(nullptr, Ptr, Builder.getInt32(-Part * VF)); 2901 PartPtr = 2902 Builder.CreateGEP(nullptr, PartPtr, Builder.getInt32(1 - VF)); 2903 Mask[Part] = reverseVector(Mask[Part]); 2904 } 2905 2906 Value *VecPtr = 2907 Builder.CreateBitCast(PartPtr, DataTy->getPointerTo(AddressSpace)); 2908 2909 if (Legal->isMaskRequired(SI)) 2910 NewSI = Builder.CreateMaskedStore(StoredVal[Part], VecPtr, Alignment, 2911 Mask[Part]); 2912 else 2913 NewSI = 2914 Builder.CreateAlignedStore(StoredVal[Part], VecPtr, Alignment); 2915 } 2916 addMetadata(NewSI, SI); 2917 } 2918 return; 2919 } 2920 2921 // Handle loads. 2922 assert(LI && "Must have a load instruction"); 2923 setDebugLocFromInst(Builder, LI); 2924 VectorParts Entry(UF); 2925 for (unsigned Part = 0; Part < UF; ++Part) { 2926 Instruction *NewLI; 2927 if (CreateGatherScatter) { 2928 Value *MaskPart = Legal->isMaskRequired(LI) ? Mask[Part] : nullptr; 2929 NewLI = Builder.CreateMaskedGather(VectorGep[Part], Alignment, MaskPart, 2930 0, "wide.masked.gather"); 2931 Entry[Part] = NewLI; 2932 } else { 2933 // Calculate the pointer for the specific unroll-part. 2934 Value *PartPtr = 2935 Builder.CreateGEP(nullptr, Ptr, Builder.getInt32(Part * VF)); 2936 2937 if (Reverse) { 2938 // If the address is consecutive but reversed, then the 2939 // wide load needs to start at the last vector element. 2940 PartPtr = Builder.CreateGEP(nullptr, Ptr, Builder.getInt32(-Part * VF)); 2941 PartPtr = Builder.CreateGEP(nullptr, PartPtr, Builder.getInt32(1 - VF)); 2942 Mask[Part] = reverseVector(Mask[Part]); 2943 } 2944 2945 Value *VecPtr = 2946 Builder.CreateBitCast(PartPtr, DataTy->getPointerTo(AddressSpace)); 2947 if (Legal->isMaskRequired(LI)) 2948 NewLI = Builder.CreateMaskedLoad(VecPtr, Alignment, Mask[Part], 2949 UndefValue::get(DataTy), 2950 "wide.masked.load"); 2951 else 2952 NewLI = Builder.CreateAlignedLoad(VecPtr, Alignment, "wide.load"); 2953 Entry[Part] = Reverse ? reverseVector(NewLI) : NewLI; 2954 } 2955 addMetadata(NewLI, LI); 2956 } 2957 VectorLoopValueMap.initVector(Instr, Entry); 2958 } 2959 2960 void InnerLoopVectorizer::scalarizeInstruction(Instruction *Instr, 2961 bool IfPredicateInstr) { 2962 assert(!Instr->getType()->isAggregateType() && "Can't handle vectors"); 2963 DEBUG(dbgs() << "LV: Scalarizing" 2964 << (IfPredicateInstr ? " and predicating:" : ":") << *Instr 2965 << '\n'); 2966 // Holds vector parameters or scalars, in case of uniform vals. 2967 SmallVector<VectorParts, 4> Params; 2968 2969 setDebugLocFromInst(Builder, Instr); 2970 2971 // Does this instruction return a value ? 2972 bool IsVoidRetTy = Instr->getType()->isVoidTy(); 2973 2974 // Initialize a new scalar map entry. 2975 ScalarParts Entry(UF); 2976 2977 VectorParts Cond; 2978 if (IfPredicateInstr) 2979 Cond = createBlockInMask(Instr->getParent()); 2980 2981 // Determine the number of scalars we need to generate for each unroll 2982 // iteration. If the instruction is uniform, we only need to generate the 2983 // first lane. Otherwise, we generate all VF values. 2984 unsigned Lanes = Legal->isUniformAfterVectorization(Instr) ? 1 : VF; 2985 2986 // For each vector unroll 'part': 2987 for (unsigned Part = 0; Part < UF; ++Part) { 2988 Entry[Part].resize(VF); 2989 // For each scalar that we create: 2990 for (unsigned Lane = 0; Lane < Lanes; ++Lane) { 2991 2992 // Start if-block. 2993 Value *Cmp = nullptr; 2994 if (IfPredicateInstr) { 2995 Cmp = Builder.CreateExtractElement(Cond[Part], Builder.getInt32(Lane)); 2996 Cmp = Builder.CreateICmp(ICmpInst::ICMP_EQ, Cmp, 2997 ConstantInt::get(Cmp->getType(), 1)); 2998 } 2999 3000 Instruction *Cloned = Instr->clone(); 3001 if (!IsVoidRetTy) 3002 Cloned->setName(Instr->getName() + ".cloned"); 3003 3004 // Replace the operands of the cloned instructions with their scalar 3005 // equivalents in the new loop. 3006 for (unsigned op = 0, e = Instr->getNumOperands(); op != e; ++op) { 3007 auto *NewOp = getScalarValue(Instr->getOperand(op), Part, Lane); 3008 Cloned->setOperand(op, NewOp); 3009 } 3010 addNewMetadata(Cloned, Instr); 3011 3012 // Place the cloned scalar in the new loop. 3013 Builder.Insert(Cloned); 3014 3015 // Add the cloned scalar to the scalar map entry. 3016 Entry[Part][Lane] = Cloned; 3017 3018 // If we just cloned a new assumption, add it the assumption cache. 3019 if (auto *II = dyn_cast<IntrinsicInst>(Cloned)) 3020 if (II->getIntrinsicID() == Intrinsic::assume) 3021 AC->registerAssumption(II); 3022 3023 // End if-block. 3024 if (IfPredicateInstr) 3025 PredicatedInstructions.push_back(std::make_pair(Cloned, Cmp)); 3026 } 3027 } 3028 VectorLoopValueMap.initScalar(Instr, Entry); 3029 } 3030 3031 PHINode *InnerLoopVectorizer::createInductionVariable(Loop *L, Value *Start, 3032 Value *End, Value *Step, 3033 Instruction *DL) { 3034 BasicBlock *Header = L->getHeader(); 3035 BasicBlock *Latch = L->getLoopLatch(); 3036 // As we're just creating this loop, it's possible no latch exists 3037 // yet. If so, use the header as this will be a single block loop. 3038 if (!Latch) 3039 Latch = Header; 3040 3041 IRBuilder<> Builder(&*Header->getFirstInsertionPt()); 3042 Instruction *OldInst = getDebugLocFromInstOrOperands(OldInduction); 3043 setDebugLocFromInst(Builder, OldInst); 3044 auto *Induction = Builder.CreatePHI(Start->getType(), 2, "index"); 3045 3046 Builder.SetInsertPoint(Latch->getTerminator()); 3047 setDebugLocFromInst(Builder, OldInst); 3048 3049 // Create i+1 and fill the PHINode. 3050 Value *Next = Builder.CreateAdd(Induction, Step, "index.next"); 3051 Induction->addIncoming(Start, L->getLoopPreheader()); 3052 Induction->addIncoming(Next, Latch); 3053 // Create the compare. 3054 Value *ICmp = Builder.CreateICmpEQ(Next, End); 3055 Builder.CreateCondBr(ICmp, L->getExitBlock(), Header); 3056 3057 // Now we have two terminators. Remove the old one from the block. 3058 Latch->getTerminator()->eraseFromParent(); 3059 3060 return Induction; 3061 } 3062 3063 Value *InnerLoopVectorizer::getOrCreateTripCount(Loop *L) { 3064 if (TripCount) 3065 return TripCount; 3066 3067 IRBuilder<> Builder(L->getLoopPreheader()->getTerminator()); 3068 // Find the loop boundaries. 3069 ScalarEvolution *SE = PSE.getSE(); 3070 const SCEV *BackedgeTakenCount = PSE.getBackedgeTakenCount(); 3071 assert(BackedgeTakenCount != SE->getCouldNotCompute() && 3072 "Invalid loop count"); 3073 3074 Type *IdxTy = Legal->getWidestInductionType(); 3075 3076 // The exit count might have the type of i64 while the phi is i32. This can 3077 // happen if we have an induction variable that is sign extended before the 3078 // compare. The only way that we get a backedge taken count is that the 3079 // induction variable was signed and as such will not overflow. In such a case 3080 // truncation is legal. 3081 if (BackedgeTakenCount->getType()->getPrimitiveSizeInBits() > 3082 IdxTy->getPrimitiveSizeInBits()) 3083 BackedgeTakenCount = SE->getTruncateOrNoop(BackedgeTakenCount, IdxTy); 3084 BackedgeTakenCount = SE->getNoopOrZeroExtend(BackedgeTakenCount, IdxTy); 3085 3086 // Get the total trip count from the count by adding 1. 3087 const SCEV *ExitCount = SE->getAddExpr( 3088 BackedgeTakenCount, SE->getOne(BackedgeTakenCount->getType())); 3089 3090 const DataLayout &DL = L->getHeader()->getModule()->getDataLayout(); 3091 3092 // Expand the trip count and place the new instructions in the preheader. 3093 // Notice that the pre-header does not change, only the loop body. 3094 SCEVExpander Exp(*SE, DL, "induction"); 3095 3096 // Count holds the overall loop count (N). 3097 TripCount = Exp.expandCodeFor(ExitCount, ExitCount->getType(), 3098 L->getLoopPreheader()->getTerminator()); 3099 3100 if (TripCount->getType()->isPointerTy()) 3101 TripCount = 3102 CastInst::CreatePointerCast(TripCount, IdxTy, "exitcount.ptrcnt.to.int", 3103 L->getLoopPreheader()->getTerminator()); 3104 3105 return TripCount; 3106 } 3107 3108 Value *InnerLoopVectorizer::getOrCreateVectorTripCount(Loop *L) { 3109 if (VectorTripCount) 3110 return VectorTripCount; 3111 3112 Value *TC = getOrCreateTripCount(L); 3113 IRBuilder<> Builder(L->getLoopPreheader()->getTerminator()); 3114 3115 // Now we need to generate the expression for the part of the loop that the 3116 // vectorized body will execute. This is equal to N - (N % Step) if scalar 3117 // iterations are not required for correctness, or N - Step, otherwise. Step 3118 // is equal to the vectorization factor (number of SIMD elements) times the 3119 // unroll factor (number of SIMD instructions). 3120 Constant *Step = ConstantInt::get(TC->getType(), VF * UF); 3121 Value *R = Builder.CreateURem(TC, Step, "n.mod.vf"); 3122 3123 // If there is a non-reversed interleaved group that may speculatively access 3124 // memory out-of-bounds, we need to ensure that there will be at least one 3125 // iteration of the scalar epilogue loop. Thus, if the step evenly divides 3126 // the trip count, we set the remainder to be equal to the step. If the step 3127 // does not evenly divide the trip count, no adjustment is necessary since 3128 // there will already be scalar iterations. Note that the minimum iterations 3129 // check ensures that N >= Step. 3130 if (VF > 1 && Legal->requiresScalarEpilogue()) { 3131 auto *IsZero = Builder.CreateICmpEQ(R, ConstantInt::get(R->getType(), 0)); 3132 R = Builder.CreateSelect(IsZero, Step, R); 3133 } 3134 3135 VectorTripCount = Builder.CreateSub(TC, R, "n.vec"); 3136 3137 return VectorTripCount; 3138 } 3139 3140 void InnerLoopVectorizer::emitMinimumIterationCountCheck(Loop *L, 3141 BasicBlock *Bypass) { 3142 Value *Count = getOrCreateTripCount(L); 3143 BasicBlock *BB = L->getLoopPreheader(); 3144 IRBuilder<> Builder(BB->getTerminator()); 3145 3146 // Generate code to check that the loop's trip count that we computed by 3147 // adding one to the backedge-taken count will not overflow. 3148 Value *CheckMinIters = Builder.CreateICmpULT( 3149 Count, ConstantInt::get(Count->getType(), VF * UF), "min.iters.check"); 3150 3151 BasicBlock *NewBB = 3152 BB->splitBasicBlock(BB->getTerminator(), "min.iters.checked"); 3153 // Update dominator tree immediately if the generated block is a 3154 // LoopBypassBlock because SCEV expansions to generate loop bypass 3155 // checks may query it before the current function is finished. 3156 DT->addNewBlock(NewBB, BB); 3157 if (L->getParentLoop()) 3158 L->getParentLoop()->addBasicBlockToLoop(NewBB, *LI); 3159 ReplaceInstWithInst(BB->getTerminator(), 3160 BranchInst::Create(Bypass, NewBB, CheckMinIters)); 3161 LoopBypassBlocks.push_back(BB); 3162 } 3163 3164 void InnerLoopVectorizer::emitVectorLoopEnteredCheck(Loop *L, 3165 BasicBlock *Bypass) { 3166 Value *TC = getOrCreateVectorTripCount(L); 3167 BasicBlock *BB = L->getLoopPreheader(); 3168 IRBuilder<> Builder(BB->getTerminator()); 3169 3170 // Now, compare the new count to zero. If it is zero skip the vector loop and 3171 // jump to the scalar loop. 3172 Value *Cmp = Builder.CreateICmpEQ(TC, Constant::getNullValue(TC->getType()), 3173 "cmp.zero"); 3174 3175 // Generate code to check that the loop's trip count that we computed by 3176 // adding one to the backedge-taken count will not overflow. 3177 BasicBlock *NewBB = BB->splitBasicBlock(BB->getTerminator(), "vector.ph"); 3178 // Update dominator tree immediately if the generated block is a 3179 // LoopBypassBlock because SCEV expansions to generate loop bypass 3180 // checks may query it before the current function is finished. 3181 DT->addNewBlock(NewBB, BB); 3182 if (L->getParentLoop()) 3183 L->getParentLoop()->addBasicBlockToLoop(NewBB, *LI); 3184 ReplaceInstWithInst(BB->getTerminator(), 3185 BranchInst::Create(Bypass, NewBB, Cmp)); 3186 LoopBypassBlocks.push_back(BB); 3187 } 3188 3189 void InnerLoopVectorizer::emitSCEVChecks(Loop *L, BasicBlock *Bypass) { 3190 BasicBlock *BB = L->getLoopPreheader(); 3191 3192 // Generate the code to check that the SCEV assumptions that we made. 3193 // We want the new basic block to start at the first instruction in a 3194 // sequence of instructions that form a check. 3195 SCEVExpander Exp(*PSE.getSE(), Bypass->getModule()->getDataLayout(), 3196 "scev.check"); 3197 Value *SCEVCheck = 3198 Exp.expandCodeForPredicate(&PSE.getUnionPredicate(), BB->getTerminator()); 3199 3200 if (auto *C = dyn_cast<ConstantInt>(SCEVCheck)) 3201 if (C->isZero()) 3202 return; 3203 3204 // Create a new block containing the stride check. 3205 BB->setName("vector.scevcheck"); 3206 auto *NewBB = BB->splitBasicBlock(BB->getTerminator(), "vector.ph"); 3207 // Update dominator tree immediately if the generated block is a 3208 // LoopBypassBlock because SCEV expansions to generate loop bypass 3209 // checks may query it before the current function is finished. 3210 DT->addNewBlock(NewBB, BB); 3211 if (L->getParentLoop()) 3212 L->getParentLoop()->addBasicBlockToLoop(NewBB, *LI); 3213 ReplaceInstWithInst(BB->getTerminator(), 3214 BranchInst::Create(Bypass, NewBB, SCEVCheck)); 3215 LoopBypassBlocks.push_back(BB); 3216 AddedSafetyChecks = true; 3217 } 3218 3219 void InnerLoopVectorizer::emitMemRuntimeChecks(Loop *L, BasicBlock *Bypass) { 3220 BasicBlock *BB = L->getLoopPreheader(); 3221 3222 // Generate the code that checks in runtime if arrays overlap. We put the 3223 // checks into a separate block to make the more common case of few elements 3224 // faster. 3225 Instruction *FirstCheckInst; 3226 Instruction *MemRuntimeCheck; 3227 std::tie(FirstCheckInst, MemRuntimeCheck) = 3228 Legal->getLAI()->addRuntimeChecks(BB->getTerminator()); 3229 if (!MemRuntimeCheck) 3230 return; 3231 3232 // Create a new block containing the memory check. 3233 BB->setName("vector.memcheck"); 3234 auto *NewBB = BB->splitBasicBlock(BB->getTerminator(), "vector.ph"); 3235 // Update dominator tree immediately if the generated block is a 3236 // LoopBypassBlock because SCEV expansions to generate loop bypass 3237 // checks may query it before the current function is finished. 3238 DT->addNewBlock(NewBB, BB); 3239 if (L->getParentLoop()) 3240 L->getParentLoop()->addBasicBlockToLoop(NewBB, *LI); 3241 ReplaceInstWithInst(BB->getTerminator(), 3242 BranchInst::Create(Bypass, NewBB, MemRuntimeCheck)); 3243 LoopBypassBlocks.push_back(BB); 3244 AddedSafetyChecks = true; 3245 3246 // We currently don't use LoopVersioning for the actual loop cloning but we 3247 // still use it to add the noalias metadata. 3248 LVer = llvm::make_unique<LoopVersioning>(*Legal->getLAI(), OrigLoop, LI, DT, 3249 PSE.getSE()); 3250 LVer->prepareNoAliasMetadata(); 3251 } 3252 3253 void InnerLoopVectorizer::createEmptyLoop() { 3254 /* 3255 In this function we generate a new loop. The new loop will contain 3256 the vectorized instructions while the old loop will continue to run the 3257 scalar remainder. 3258 3259 [ ] <-- loop iteration number check. 3260 / | 3261 / v 3262 | [ ] <-- vector loop bypass (may consist of multiple blocks). 3263 | / | 3264 | / v 3265 || [ ] <-- vector pre header. 3266 |/ | 3267 | v 3268 | [ ] \ 3269 | [ ]_| <-- vector loop. 3270 | | 3271 | v 3272 | -[ ] <--- middle-block. 3273 | / | 3274 | / v 3275 -|- >[ ] <--- new preheader. 3276 | | 3277 | v 3278 | [ ] \ 3279 | [ ]_| <-- old scalar loop to handle remainder. 3280 \ | 3281 \ v 3282 >[ ] <-- exit block. 3283 ... 3284 */ 3285 3286 BasicBlock *OldBasicBlock = OrigLoop->getHeader(); 3287 BasicBlock *VectorPH = OrigLoop->getLoopPreheader(); 3288 BasicBlock *ExitBlock = OrigLoop->getExitBlock(); 3289 assert(VectorPH && "Invalid loop structure"); 3290 assert(ExitBlock && "Must have an exit block"); 3291 3292 // Some loops have a single integer induction variable, while other loops 3293 // don't. One example is c++ iterators that often have multiple pointer 3294 // induction variables. In the code below we also support a case where we 3295 // don't have a single induction variable. 3296 // 3297 // We try to obtain an induction variable from the original loop as hard 3298 // as possible. However if we don't find one that: 3299 // - is an integer 3300 // - counts from zero, stepping by one 3301 // - is the size of the widest induction variable type 3302 // then we create a new one. 3303 OldInduction = Legal->getInduction(); 3304 Type *IdxTy = Legal->getWidestInductionType(); 3305 3306 // Split the single block loop into the two loop structure described above. 3307 BasicBlock *VecBody = 3308 VectorPH->splitBasicBlock(VectorPH->getTerminator(), "vector.body"); 3309 BasicBlock *MiddleBlock = 3310 VecBody->splitBasicBlock(VecBody->getTerminator(), "middle.block"); 3311 BasicBlock *ScalarPH = 3312 MiddleBlock->splitBasicBlock(MiddleBlock->getTerminator(), "scalar.ph"); 3313 3314 // Create and register the new vector loop. 3315 Loop *Lp = new Loop(); 3316 Loop *ParentLoop = OrigLoop->getParentLoop(); 3317 3318 // Insert the new loop into the loop nest and register the new basic blocks 3319 // before calling any utilities such as SCEV that require valid LoopInfo. 3320 if (ParentLoop) { 3321 ParentLoop->addChildLoop(Lp); 3322 ParentLoop->addBasicBlockToLoop(ScalarPH, *LI); 3323 ParentLoop->addBasicBlockToLoop(MiddleBlock, *LI); 3324 } else { 3325 LI->addTopLevelLoop(Lp); 3326 } 3327 Lp->addBasicBlockToLoop(VecBody, *LI); 3328 3329 // Find the loop boundaries. 3330 Value *Count = getOrCreateTripCount(Lp); 3331 3332 Value *StartIdx = ConstantInt::get(IdxTy, 0); 3333 3334 // We need to test whether the backedge-taken count is uint##_max. Adding one 3335 // to it will cause overflow and an incorrect loop trip count in the vector 3336 // body. In case of overflow we want to directly jump to the scalar remainder 3337 // loop. 3338 emitMinimumIterationCountCheck(Lp, ScalarPH); 3339 // Now, compare the new count to zero. If it is zero skip the vector loop and 3340 // jump to the scalar loop. 3341 emitVectorLoopEnteredCheck(Lp, ScalarPH); 3342 // Generate the code to check any assumptions that we've made for SCEV 3343 // expressions. 3344 emitSCEVChecks(Lp, ScalarPH); 3345 3346 // Generate the code that checks in runtime if arrays overlap. We put the 3347 // checks into a separate block to make the more common case of few elements 3348 // faster. 3349 emitMemRuntimeChecks(Lp, ScalarPH); 3350 3351 // Generate the induction variable. 3352 // The loop step is equal to the vectorization factor (num of SIMD elements) 3353 // times the unroll factor (num of SIMD instructions). 3354 Value *CountRoundDown = getOrCreateVectorTripCount(Lp); 3355 Constant *Step = ConstantInt::get(IdxTy, VF * UF); 3356 Induction = 3357 createInductionVariable(Lp, StartIdx, CountRoundDown, Step, 3358 getDebugLocFromInstOrOperands(OldInduction)); 3359 3360 // We are going to resume the execution of the scalar loop. 3361 // Go over all of the induction variables that we found and fix the 3362 // PHIs that are left in the scalar version of the loop. 3363 // The starting values of PHI nodes depend on the counter of the last 3364 // iteration in the vectorized loop. 3365 // If we come from a bypass edge then we need to start from the original 3366 // start value. 3367 3368 // This variable saves the new starting index for the scalar loop. It is used 3369 // to test if there are any tail iterations left once the vector loop has 3370 // completed. 3371 LoopVectorizationLegality::InductionList *List = Legal->getInductionVars(); 3372 for (auto &InductionEntry : *List) { 3373 PHINode *OrigPhi = InductionEntry.first; 3374 InductionDescriptor II = InductionEntry.second; 3375 3376 // Create phi nodes to merge from the backedge-taken check block. 3377 PHINode *BCResumeVal = PHINode::Create( 3378 OrigPhi->getType(), 3, "bc.resume.val", ScalarPH->getTerminator()); 3379 Value *EndValue; 3380 if (OrigPhi == OldInduction) { 3381 // We know what the end value is. 3382 EndValue = CountRoundDown; 3383 } else { 3384 IRBuilder<> B(LoopBypassBlocks.back()->getTerminator()); 3385 Type *StepType = II.getStep()->getType(); 3386 Instruction::CastOps CastOp = 3387 CastInst::getCastOpcode(CountRoundDown, true, StepType, true); 3388 Value *CRD = B.CreateCast(CastOp, CountRoundDown, StepType, "cast.crd"); 3389 const DataLayout &DL = OrigLoop->getHeader()->getModule()->getDataLayout(); 3390 EndValue = II.transform(B, CRD, PSE.getSE(), DL); 3391 EndValue->setName("ind.end"); 3392 } 3393 3394 // The new PHI merges the original incoming value, in case of a bypass, 3395 // or the value at the end of the vectorized loop. 3396 BCResumeVal->addIncoming(EndValue, MiddleBlock); 3397 3398 // Fix up external users of the induction variable. 3399 fixupIVUsers(OrigPhi, II, CountRoundDown, EndValue, MiddleBlock); 3400 3401 // Fix the scalar body counter (PHI node). 3402 unsigned BlockIdx = OrigPhi->getBasicBlockIndex(ScalarPH); 3403 3404 // The old induction's phi node in the scalar body needs the truncated 3405 // value. 3406 for (BasicBlock *BB : LoopBypassBlocks) 3407 BCResumeVal->addIncoming(II.getStartValue(), BB); 3408 OrigPhi->setIncomingValue(BlockIdx, BCResumeVal); 3409 } 3410 3411 // Add a check in the middle block to see if we have completed 3412 // all of the iterations in the first vector loop. 3413 // If (N - N%VF) == N, then we *don't* need to run the remainder. 3414 Value *CmpN = 3415 CmpInst::Create(Instruction::ICmp, CmpInst::ICMP_EQ, Count, 3416 CountRoundDown, "cmp.n", MiddleBlock->getTerminator()); 3417 ReplaceInstWithInst(MiddleBlock->getTerminator(), 3418 BranchInst::Create(ExitBlock, ScalarPH, CmpN)); 3419 3420 // Get ready to start creating new instructions into the vectorized body. 3421 Builder.SetInsertPoint(&*VecBody->getFirstInsertionPt()); 3422 3423 // Save the state. 3424 LoopVectorPreHeader = Lp->getLoopPreheader(); 3425 LoopScalarPreHeader = ScalarPH; 3426 LoopMiddleBlock = MiddleBlock; 3427 LoopExitBlock = ExitBlock; 3428 LoopVectorBody = VecBody; 3429 LoopScalarBody = OldBasicBlock; 3430 3431 // Keep all loop hints from the original loop on the vector loop (we'll 3432 // replace the vectorizer-specific hints below). 3433 if (MDNode *LID = OrigLoop->getLoopID()) 3434 Lp->setLoopID(LID); 3435 3436 LoopVectorizeHints Hints(Lp, true, *ORE); 3437 Hints.setAlreadyVectorized(); 3438 } 3439 3440 // Fix up external users of the induction variable. At this point, we are 3441 // in LCSSA form, with all external PHIs that use the IV having one input value, 3442 // coming from the remainder loop. We need those PHIs to also have a correct 3443 // value for the IV when arriving directly from the middle block. 3444 void InnerLoopVectorizer::fixupIVUsers(PHINode *OrigPhi, 3445 const InductionDescriptor &II, 3446 Value *CountRoundDown, Value *EndValue, 3447 BasicBlock *MiddleBlock) { 3448 // There are two kinds of external IV usages - those that use the value 3449 // computed in the last iteration (the PHI) and those that use the penultimate 3450 // value (the value that feeds into the phi from the loop latch). 3451 // We allow both, but they, obviously, have different values. 3452 3453 assert(OrigLoop->getExitBlock() && "Expected a single exit block"); 3454 3455 DenseMap<Value *, Value *> MissingVals; 3456 3457 // An external user of the last iteration's value should see the value that 3458 // the remainder loop uses to initialize its own IV. 3459 Value *PostInc = OrigPhi->getIncomingValueForBlock(OrigLoop->getLoopLatch()); 3460 for (User *U : PostInc->users()) { 3461 Instruction *UI = cast<Instruction>(U); 3462 if (!OrigLoop->contains(UI)) { 3463 assert(isa<PHINode>(UI) && "Expected LCSSA form"); 3464 MissingVals[UI] = EndValue; 3465 } 3466 } 3467 3468 // An external user of the penultimate value need to see EndValue - Step. 3469 // The simplest way to get this is to recompute it from the constituent SCEVs, 3470 // that is Start + (Step * (CRD - 1)). 3471 for (User *U : OrigPhi->users()) { 3472 auto *UI = cast<Instruction>(U); 3473 if (!OrigLoop->contains(UI)) { 3474 const DataLayout &DL = 3475 OrigLoop->getHeader()->getModule()->getDataLayout(); 3476 assert(isa<PHINode>(UI) && "Expected LCSSA form"); 3477 3478 IRBuilder<> B(MiddleBlock->getTerminator()); 3479 Value *CountMinusOne = B.CreateSub( 3480 CountRoundDown, ConstantInt::get(CountRoundDown->getType(), 1)); 3481 Value *CMO = B.CreateSExtOrTrunc(CountMinusOne, II.getStep()->getType(), 3482 "cast.cmo"); 3483 Value *Escape = II.transform(B, CMO, PSE.getSE(), DL); 3484 Escape->setName("ind.escape"); 3485 MissingVals[UI] = Escape; 3486 } 3487 } 3488 3489 for (auto &I : MissingVals) { 3490 PHINode *PHI = cast<PHINode>(I.first); 3491 // One corner case we have to handle is two IVs "chasing" each-other, 3492 // that is %IV2 = phi [...], [ %IV1, %latch ] 3493 // In this case, if IV1 has an external use, we need to avoid adding both 3494 // "last value of IV1" and "penultimate value of IV2". So, verify that we 3495 // don't already have an incoming value for the middle block. 3496 if (PHI->getBasicBlockIndex(MiddleBlock) == -1) 3497 PHI->addIncoming(I.second, MiddleBlock); 3498 } 3499 } 3500 3501 namespace { 3502 struct CSEDenseMapInfo { 3503 static bool canHandle(Instruction *I) { 3504 return isa<InsertElementInst>(I) || isa<ExtractElementInst>(I) || 3505 isa<ShuffleVectorInst>(I) || isa<GetElementPtrInst>(I); 3506 } 3507 static inline Instruction *getEmptyKey() { 3508 return DenseMapInfo<Instruction *>::getEmptyKey(); 3509 } 3510 static inline Instruction *getTombstoneKey() { 3511 return DenseMapInfo<Instruction *>::getTombstoneKey(); 3512 } 3513 static unsigned getHashValue(Instruction *I) { 3514 assert(canHandle(I) && "Unknown instruction!"); 3515 return hash_combine(I->getOpcode(), hash_combine_range(I->value_op_begin(), 3516 I->value_op_end())); 3517 } 3518 static bool isEqual(Instruction *LHS, Instruction *RHS) { 3519 if (LHS == getEmptyKey() || RHS == getEmptyKey() || 3520 LHS == getTombstoneKey() || RHS == getTombstoneKey()) 3521 return LHS == RHS; 3522 return LHS->isIdenticalTo(RHS); 3523 } 3524 }; 3525 } 3526 3527 ///\brief Perform cse of induction variable instructions. 3528 static void cse(BasicBlock *BB) { 3529 // Perform simple cse. 3530 SmallDenseMap<Instruction *, Instruction *, 4, CSEDenseMapInfo> CSEMap; 3531 for (BasicBlock::iterator I = BB->begin(), E = BB->end(); I != E;) { 3532 Instruction *In = &*I++; 3533 3534 if (!CSEDenseMapInfo::canHandle(In)) 3535 continue; 3536 3537 // Check if we can replace this instruction with any of the 3538 // visited instructions. 3539 if (Instruction *V = CSEMap.lookup(In)) { 3540 In->replaceAllUsesWith(V); 3541 In->eraseFromParent(); 3542 continue; 3543 } 3544 3545 CSEMap[In] = In; 3546 } 3547 } 3548 3549 /// \brief Adds a 'fast' flag to floating point operations. 3550 static Value *addFastMathFlag(Value *V) { 3551 if (isa<FPMathOperator>(V)) { 3552 FastMathFlags Flags; 3553 Flags.setUnsafeAlgebra(); 3554 cast<Instruction>(V)->setFastMathFlags(Flags); 3555 } 3556 return V; 3557 } 3558 3559 /// \brief Estimate the overhead of scalarizing a value based on its type. 3560 /// Insert and Extract are set if the result needs to be inserted and/or 3561 /// extracted from vectors. 3562 static unsigned getScalarizationOverhead(Type *Ty, bool Insert, bool Extract, 3563 const TargetTransformInfo &TTI) { 3564 if (Ty->isVoidTy()) 3565 return 0; 3566 3567 assert(Ty->isVectorTy() && "Can only scalarize vectors"); 3568 unsigned Cost = 0; 3569 3570 for (unsigned I = 0, E = Ty->getVectorNumElements(); I < E; ++I) { 3571 if (Extract) 3572 Cost += TTI.getVectorInstrCost(Instruction::ExtractElement, Ty, I); 3573 if (Insert) 3574 Cost += TTI.getVectorInstrCost(Instruction::InsertElement, Ty, I); 3575 } 3576 3577 return Cost; 3578 } 3579 3580 /// \brief Estimate the overhead of scalarizing an Instruction based on the 3581 /// types of its operands and return value. 3582 static unsigned getScalarizationOverhead(SmallVectorImpl<Type *> &OpTys, 3583 Type *RetTy, 3584 const TargetTransformInfo &TTI) { 3585 unsigned ScalarizationCost = 3586 getScalarizationOverhead(RetTy, true, false, TTI); 3587 3588 for (Type *Ty : OpTys) 3589 ScalarizationCost += getScalarizationOverhead(Ty, false, true, TTI); 3590 3591 return ScalarizationCost; 3592 } 3593 3594 /// \brief Estimate the overhead of scalarizing an instruction. This is a 3595 /// convenience wrapper for the type-based getScalarizationOverhead API. 3596 static unsigned getScalarizationOverhead(Instruction *I, unsigned VF, 3597 const TargetTransformInfo &TTI) { 3598 if (VF == 1) 3599 return 0; 3600 3601 Type *RetTy = ToVectorTy(I->getType(), VF); 3602 3603 SmallVector<Type *, 4> OpTys; 3604 unsigned OperandsNum = I->getNumOperands(); 3605 for (unsigned OpInd = 0; OpInd < OperandsNum; ++OpInd) 3606 OpTys.push_back(ToVectorTy(I->getOperand(OpInd)->getType(), VF)); 3607 3608 return getScalarizationOverhead(OpTys, RetTy, TTI); 3609 } 3610 3611 // Estimate cost of a call instruction CI if it were vectorized with factor VF. 3612 // Return the cost of the instruction, including scalarization overhead if it's 3613 // needed. The flag NeedToScalarize shows if the call needs to be scalarized - 3614 // i.e. either vector version isn't available, or is too expensive. 3615 static unsigned getVectorCallCost(CallInst *CI, unsigned VF, 3616 const TargetTransformInfo &TTI, 3617 const TargetLibraryInfo *TLI, 3618 bool &NeedToScalarize) { 3619 Function *F = CI->getCalledFunction(); 3620 StringRef FnName = CI->getCalledFunction()->getName(); 3621 Type *ScalarRetTy = CI->getType(); 3622 SmallVector<Type *, 4> Tys, ScalarTys; 3623 for (auto &ArgOp : CI->arg_operands()) 3624 ScalarTys.push_back(ArgOp->getType()); 3625 3626 // Estimate cost of scalarized vector call. The source operands are assumed 3627 // to be vectors, so we need to extract individual elements from there, 3628 // execute VF scalar calls, and then gather the result into the vector return 3629 // value. 3630 unsigned ScalarCallCost = TTI.getCallInstrCost(F, ScalarRetTy, ScalarTys); 3631 if (VF == 1) 3632 return ScalarCallCost; 3633 3634 // Compute corresponding vector type for return value and arguments. 3635 Type *RetTy = ToVectorTy(ScalarRetTy, VF); 3636 for (Type *ScalarTy : ScalarTys) 3637 Tys.push_back(ToVectorTy(ScalarTy, VF)); 3638 3639 // Compute costs of unpacking argument values for the scalar calls and 3640 // packing the return values to a vector. 3641 unsigned ScalarizationCost = getScalarizationOverhead(Tys, RetTy, TTI); 3642 3643 unsigned Cost = ScalarCallCost * VF + ScalarizationCost; 3644 3645 // If we can't emit a vector call for this function, then the currently found 3646 // cost is the cost we need to return. 3647 NeedToScalarize = true; 3648 if (!TLI || !TLI->isFunctionVectorizable(FnName, VF) || CI->isNoBuiltin()) 3649 return Cost; 3650 3651 // If the corresponding vector cost is cheaper, return its cost. 3652 unsigned VectorCallCost = TTI.getCallInstrCost(nullptr, RetTy, Tys); 3653 if (VectorCallCost < Cost) { 3654 NeedToScalarize = false; 3655 return VectorCallCost; 3656 } 3657 return Cost; 3658 } 3659 3660 // Estimate cost of an intrinsic call instruction CI if it were vectorized with 3661 // factor VF. Return the cost of the instruction, including scalarization 3662 // overhead if it's needed. 3663 static unsigned getVectorIntrinsicCost(CallInst *CI, unsigned VF, 3664 const TargetTransformInfo &TTI, 3665 const TargetLibraryInfo *TLI) { 3666 Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI); 3667 assert(ID && "Expected intrinsic call!"); 3668 3669 Type *RetTy = ToVectorTy(CI->getType(), VF); 3670 SmallVector<Type *, 4> Tys; 3671 for (Value *ArgOperand : CI->arg_operands()) 3672 Tys.push_back(ToVectorTy(ArgOperand->getType(), VF)); 3673 3674 FastMathFlags FMF; 3675 if (auto *FPMO = dyn_cast<FPMathOperator>(CI)) 3676 FMF = FPMO->getFastMathFlags(); 3677 3678 return TTI.getIntrinsicInstrCost(ID, RetTy, Tys, FMF); 3679 } 3680 3681 static Type *smallestIntegerVectorType(Type *T1, Type *T2) { 3682 auto *I1 = cast<IntegerType>(T1->getVectorElementType()); 3683 auto *I2 = cast<IntegerType>(T2->getVectorElementType()); 3684 return I1->getBitWidth() < I2->getBitWidth() ? T1 : T2; 3685 } 3686 static Type *largestIntegerVectorType(Type *T1, Type *T2) { 3687 auto *I1 = cast<IntegerType>(T1->getVectorElementType()); 3688 auto *I2 = cast<IntegerType>(T2->getVectorElementType()); 3689 return I1->getBitWidth() > I2->getBitWidth() ? T1 : T2; 3690 } 3691 3692 void InnerLoopVectorizer::truncateToMinimalBitwidths() { 3693 // For every instruction `I` in MinBWs, truncate the operands, create a 3694 // truncated version of `I` and reextend its result. InstCombine runs 3695 // later and will remove any ext/trunc pairs. 3696 // 3697 SmallPtrSet<Value *, 4> Erased; 3698 for (const auto &KV : Cost->getMinimalBitwidths()) { 3699 VectorParts &Parts = VectorLoopValueMap.getVector(KV.first); 3700 for (Value *&I : Parts) { 3701 if (Erased.count(I) || I->use_empty() || !isa<Instruction>(I)) 3702 continue; 3703 Type *OriginalTy = I->getType(); 3704 Type *ScalarTruncatedTy = 3705 IntegerType::get(OriginalTy->getContext(), KV.second); 3706 Type *TruncatedTy = VectorType::get(ScalarTruncatedTy, 3707 OriginalTy->getVectorNumElements()); 3708 if (TruncatedTy == OriginalTy) 3709 continue; 3710 3711 IRBuilder<> B(cast<Instruction>(I)); 3712 auto ShrinkOperand = [&](Value *V) -> Value * { 3713 if (auto *ZI = dyn_cast<ZExtInst>(V)) 3714 if (ZI->getSrcTy() == TruncatedTy) 3715 return ZI->getOperand(0); 3716 return B.CreateZExtOrTrunc(V, TruncatedTy); 3717 }; 3718 3719 // The actual instruction modification depends on the instruction type, 3720 // unfortunately. 3721 Value *NewI = nullptr; 3722 if (auto *BO = dyn_cast<BinaryOperator>(I)) { 3723 NewI = B.CreateBinOp(BO->getOpcode(), ShrinkOperand(BO->getOperand(0)), 3724 ShrinkOperand(BO->getOperand(1))); 3725 cast<BinaryOperator>(NewI)->copyIRFlags(I); 3726 } else if (auto *CI = dyn_cast<ICmpInst>(I)) { 3727 NewI = 3728 B.CreateICmp(CI->getPredicate(), ShrinkOperand(CI->getOperand(0)), 3729 ShrinkOperand(CI->getOperand(1))); 3730 } else if (auto *SI = dyn_cast<SelectInst>(I)) { 3731 NewI = B.CreateSelect(SI->getCondition(), 3732 ShrinkOperand(SI->getTrueValue()), 3733 ShrinkOperand(SI->getFalseValue())); 3734 } else if (auto *CI = dyn_cast<CastInst>(I)) { 3735 switch (CI->getOpcode()) { 3736 default: 3737 llvm_unreachable("Unhandled cast!"); 3738 case Instruction::Trunc: 3739 NewI = ShrinkOperand(CI->getOperand(0)); 3740 break; 3741 case Instruction::SExt: 3742 NewI = B.CreateSExtOrTrunc( 3743 CI->getOperand(0), 3744 smallestIntegerVectorType(OriginalTy, TruncatedTy)); 3745 break; 3746 case Instruction::ZExt: 3747 NewI = B.CreateZExtOrTrunc( 3748 CI->getOperand(0), 3749 smallestIntegerVectorType(OriginalTy, TruncatedTy)); 3750 break; 3751 } 3752 } else if (auto *SI = dyn_cast<ShuffleVectorInst>(I)) { 3753 auto Elements0 = SI->getOperand(0)->getType()->getVectorNumElements(); 3754 auto *O0 = B.CreateZExtOrTrunc( 3755 SI->getOperand(0), VectorType::get(ScalarTruncatedTy, Elements0)); 3756 auto Elements1 = SI->getOperand(1)->getType()->getVectorNumElements(); 3757 auto *O1 = B.CreateZExtOrTrunc( 3758 SI->getOperand(1), VectorType::get(ScalarTruncatedTy, Elements1)); 3759 3760 NewI = B.CreateShuffleVector(O0, O1, SI->getMask()); 3761 } else if (isa<LoadInst>(I)) { 3762 // Don't do anything with the operands, just extend the result. 3763 continue; 3764 } else if (auto *IE = dyn_cast<InsertElementInst>(I)) { 3765 auto Elements = IE->getOperand(0)->getType()->getVectorNumElements(); 3766 auto *O0 = B.CreateZExtOrTrunc( 3767 IE->getOperand(0), VectorType::get(ScalarTruncatedTy, Elements)); 3768 auto *O1 = B.CreateZExtOrTrunc(IE->getOperand(1), ScalarTruncatedTy); 3769 NewI = B.CreateInsertElement(O0, O1, IE->getOperand(2)); 3770 } else if (auto *EE = dyn_cast<ExtractElementInst>(I)) { 3771 auto Elements = EE->getOperand(0)->getType()->getVectorNumElements(); 3772 auto *O0 = B.CreateZExtOrTrunc( 3773 EE->getOperand(0), VectorType::get(ScalarTruncatedTy, Elements)); 3774 NewI = B.CreateExtractElement(O0, EE->getOperand(2)); 3775 } else { 3776 llvm_unreachable("Unhandled instruction type!"); 3777 } 3778 3779 // Lastly, extend the result. 3780 NewI->takeName(cast<Instruction>(I)); 3781 Value *Res = B.CreateZExtOrTrunc(NewI, OriginalTy); 3782 I->replaceAllUsesWith(Res); 3783 cast<Instruction>(I)->eraseFromParent(); 3784 Erased.insert(I); 3785 I = Res; 3786 } 3787 } 3788 3789 // We'll have created a bunch of ZExts that are now parentless. Clean up. 3790 for (const auto &KV : Cost->getMinimalBitwidths()) { 3791 VectorParts &Parts = VectorLoopValueMap.getVector(KV.first); 3792 for (Value *&I : Parts) { 3793 ZExtInst *Inst = dyn_cast<ZExtInst>(I); 3794 if (Inst && Inst->use_empty()) { 3795 Value *NewI = Inst->getOperand(0); 3796 Inst->eraseFromParent(); 3797 I = NewI; 3798 } 3799 } 3800 } 3801 } 3802 3803 void InnerLoopVectorizer::vectorizeLoop() { 3804 //===------------------------------------------------===// 3805 // 3806 // Notice: any optimization or new instruction that go 3807 // into the code below should be also be implemented in 3808 // the cost-model. 3809 // 3810 //===------------------------------------------------===// 3811 Constant *Zero = Builder.getInt32(0); 3812 3813 // In order to support recurrences we need to be able to vectorize Phi nodes. 3814 // Phi nodes have cycles, so we need to vectorize them in two stages. First, 3815 // we create a new vector PHI node with no incoming edges. We use this value 3816 // when we vectorize all of the instructions that use the PHI. Next, after 3817 // all of the instructions in the block are complete we add the new incoming 3818 // edges to the PHI. At this point all of the instructions in the basic block 3819 // are vectorized, so we can use them to construct the PHI. 3820 PhiVector PHIsToFix; 3821 3822 // Collect instructions from the original loop that will become trivially 3823 // dead in the vectorized loop. We don't need to vectorize these 3824 // instructions. 3825 collectTriviallyDeadInstructions(); 3826 3827 // Scan the loop in a topological order to ensure that defs are vectorized 3828 // before users. 3829 LoopBlocksDFS DFS(OrigLoop); 3830 DFS.perform(LI); 3831 3832 // Vectorize all of the blocks in the original loop. 3833 for (BasicBlock *BB : make_range(DFS.beginRPO(), DFS.endRPO())) 3834 vectorizeBlockInLoop(BB, &PHIsToFix); 3835 3836 // Insert truncates and extends for any truncated instructions as hints to 3837 // InstCombine. 3838 if (VF > 1) 3839 truncateToMinimalBitwidths(); 3840 3841 // At this point every instruction in the original loop is widened to a 3842 // vector form. Now we need to fix the recurrences in PHIsToFix. These PHI 3843 // nodes are currently empty because we did not want to introduce cycles. 3844 // This is the second stage of vectorizing recurrences. 3845 for (PHINode *Phi : PHIsToFix) { 3846 assert(Phi && "Unable to recover vectorized PHI"); 3847 3848 // Handle first-order recurrences that need to be fixed. 3849 if (Legal->isFirstOrderRecurrence(Phi)) { 3850 fixFirstOrderRecurrence(Phi); 3851 continue; 3852 } 3853 3854 // If the phi node is not a first-order recurrence, it must be a reduction. 3855 // Get it's reduction variable descriptor. 3856 assert(Legal->isReductionVariable(Phi) && 3857 "Unable to find the reduction variable"); 3858 RecurrenceDescriptor RdxDesc = (*Legal->getReductionVars())[Phi]; 3859 3860 RecurrenceDescriptor::RecurrenceKind RK = RdxDesc.getRecurrenceKind(); 3861 TrackingVH<Value> ReductionStartValue = RdxDesc.getRecurrenceStartValue(); 3862 Instruction *LoopExitInst = RdxDesc.getLoopExitInstr(); 3863 RecurrenceDescriptor::MinMaxRecurrenceKind MinMaxKind = 3864 RdxDesc.getMinMaxRecurrenceKind(); 3865 setDebugLocFromInst(Builder, ReductionStartValue); 3866 3867 // We need to generate a reduction vector from the incoming scalar. 3868 // To do so, we need to generate the 'identity' vector and override 3869 // one of the elements with the incoming scalar reduction. We need 3870 // to do it in the vector-loop preheader. 3871 Builder.SetInsertPoint(LoopBypassBlocks[1]->getTerminator()); 3872 3873 // This is the vector-clone of the value that leaves the loop. 3874 const VectorParts &VectorExit = getVectorValue(LoopExitInst); 3875 Type *VecTy = VectorExit[0]->getType(); 3876 3877 // Find the reduction identity variable. Zero for addition, or, xor, 3878 // one for multiplication, -1 for And. 3879 Value *Identity; 3880 Value *VectorStart; 3881 if (RK == RecurrenceDescriptor::RK_IntegerMinMax || 3882 RK == RecurrenceDescriptor::RK_FloatMinMax) { 3883 // MinMax reduction have the start value as their identify. 3884 if (VF == 1) { 3885 VectorStart = Identity = ReductionStartValue; 3886 } else { 3887 VectorStart = Identity = 3888 Builder.CreateVectorSplat(VF, ReductionStartValue, "minmax.ident"); 3889 } 3890 } else { 3891 // Handle other reduction kinds: 3892 Constant *Iden = RecurrenceDescriptor::getRecurrenceIdentity( 3893 RK, VecTy->getScalarType()); 3894 if (VF == 1) { 3895 Identity = Iden; 3896 // This vector is the Identity vector where the first element is the 3897 // incoming scalar reduction. 3898 VectorStart = ReductionStartValue; 3899 } else { 3900 Identity = ConstantVector::getSplat(VF, Iden); 3901 3902 // This vector is the Identity vector where the first element is the 3903 // incoming scalar reduction. 3904 VectorStart = 3905 Builder.CreateInsertElement(Identity, ReductionStartValue, Zero); 3906 } 3907 } 3908 3909 // Fix the vector-loop phi. 3910 3911 // Reductions do not have to start at zero. They can start with 3912 // any loop invariant values. 3913 const VectorParts &VecRdxPhi = getVectorValue(Phi); 3914 BasicBlock *Latch = OrigLoop->getLoopLatch(); 3915 Value *LoopVal = Phi->getIncomingValueForBlock(Latch); 3916 const VectorParts &Val = getVectorValue(LoopVal); 3917 for (unsigned part = 0; part < UF; ++part) { 3918 // Make sure to add the reduction stat value only to the 3919 // first unroll part. 3920 Value *StartVal = (part == 0) ? VectorStart : Identity; 3921 cast<PHINode>(VecRdxPhi[part]) 3922 ->addIncoming(StartVal, LoopVectorPreHeader); 3923 cast<PHINode>(VecRdxPhi[part]) 3924 ->addIncoming(Val[part], LoopVectorBody); 3925 } 3926 3927 // Before each round, move the insertion point right between 3928 // the PHIs and the values we are going to write. 3929 // This allows us to write both PHINodes and the extractelement 3930 // instructions. 3931 Builder.SetInsertPoint(&*LoopMiddleBlock->getFirstInsertionPt()); 3932 3933 VectorParts &RdxParts = VectorLoopValueMap.getVector(LoopExitInst); 3934 setDebugLocFromInst(Builder, LoopExitInst); 3935 3936 // If the vector reduction can be performed in a smaller type, we truncate 3937 // then extend the loop exit value to enable InstCombine to evaluate the 3938 // entire expression in the smaller type. 3939 if (VF > 1 && Phi->getType() != RdxDesc.getRecurrenceType()) { 3940 Type *RdxVecTy = VectorType::get(RdxDesc.getRecurrenceType(), VF); 3941 Builder.SetInsertPoint(LoopVectorBody->getTerminator()); 3942 for (unsigned part = 0; part < UF; ++part) { 3943 Value *Trunc = Builder.CreateTrunc(RdxParts[part], RdxVecTy); 3944 Value *Extnd = RdxDesc.isSigned() ? Builder.CreateSExt(Trunc, VecTy) 3945 : Builder.CreateZExt(Trunc, VecTy); 3946 for (Value::user_iterator UI = RdxParts[part]->user_begin(); 3947 UI != RdxParts[part]->user_end();) 3948 if (*UI != Trunc) { 3949 (*UI++)->replaceUsesOfWith(RdxParts[part], Extnd); 3950 RdxParts[part] = Extnd; 3951 } else { 3952 ++UI; 3953 } 3954 } 3955 Builder.SetInsertPoint(&*LoopMiddleBlock->getFirstInsertionPt()); 3956 for (unsigned part = 0; part < UF; ++part) 3957 RdxParts[part] = Builder.CreateTrunc(RdxParts[part], RdxVecTy); 3958 } 3959 3960 // Reduce all of the unrolled parts into a single vector. 3961 Value *ReducedPartRdx = RdxParts[0]; 3962 unsigned Op = RecurrenceDescriptor::getRecurrenceBinOp(RK); 3963 setDebugLocFromInst(Builder, ReducedPartRdx); 3964 for (unsigned part = 1; part < UF; ++part) { 3965 if (Op != Instruction::ICmp && Op != Instruction::FCmp) 3966 // Floating point operations had to be 'fast' to enable the reduction. 3967 ReducedPartRdx = addFastMathFlag( 3968 Builder.CreateBinOp((Instruction::BinaryOps)Op, RdxParts[part], 3969 ReducedPartRdx, "bin.rdx")); 3970 else 3971 ReducedPartRdx = RecurrenceDescriptor::createMinMaxOp( 3972 Builder, MinMaxKind, ReducedPartRdx, RdxParts[part]); 3973 } 3974 3975 if (VF > 1) { 3976 // VF is a power of 2 so we can emit the reduction using log2(VF) shuffles 3977 // and vector ops, reducing the set of values being computed by half each 3978 // round. 3979 assert(isPowerOf2_32(VF) && 3980 "Reduction emission only supported for pow2 vectors!"); 3981 Value *TmpVec = ReducedPartRdx; 3982 SmallVector<Constant *, 32> ShuffleMask(VF, nullptr); 3983 for (unsigned i = VF; i != 1; i >>= 1) { 3984 // Move the upper half of the vector to the lower half. 3985 for (unsigned j = 0; j != i / 2; ++j) 3986 ShuffleMask[j] = Builder.getInt32(i / 2 + j); 3987 3988 // Fill the rest of the mask with undef. 3989 std::fill(&ShuffleMask[i / 2], ShuffleMask.end(), 3990 UndefValue::get(Builder.getInt32Ty())); 3991 3992 Value *Shuf = Builder.CreateShuffleVector( 3993 TmpVec, UndefValue::get(TmpVec->getType()), 3994 ConstantVector::get(ShuffleMask), "rdx.shuf"); 3995 3996 if (Op != Instruction::ICmp && Op != Instruction::FCmp) 3997 // Floating point operations had to be 'fast' to enable the reduction. 3998 TmpVec = addFastMathFlag(Builder.CreateBinOp( 3999 (Instruction::BinaryOps)Op, TmpVec, Shuf, "bin.rdx")); 4000 else 4001 TmpVec = RecurrenceDescriptor::createMinMaxOp(Builder, MinMaxKind, 4002 TmpVec, Shuf); 4003 } 4004 4005 // The result is in the first element of the vector. 4006 ReducedPartRdx = 4007 Builder.CreateExtractElement(TmpVec, Builder.getInt32(0)); 4008 4009 // If the reduction can be performed in a smaller type, we need to extend 4010 // the reduction to the wider type before we branch to the original loop. 4011 if (Phi->getType() != RdxDesc.getRecurrenceType()) 4012 ReducedPartRdx = 4013 RdxDesc.isSigned() 4014 ? Builder.CreateSExt(ReducedPartRdx, Phi->getType()) 4015 : Builder.CreateZExt(ReducedPartRdx, Phi->getType()); 4016 } 4017 4018 // Create a phi node that merges control-flow from the backedge-taken check 4019 // block and the middle block. 4020 PHINode *BCBlockPhi = PHINode::Create(Phi->getType(), 2, "bc.merge.rdx", 4021 LoopScalarPreHeader->getTerminator()); 4022 for (unsigned I = 0, E = LoopBypassBlocks.size(); I != E; ++I) 4023 BCBlockPhi->addIncoming(ReductionStartValue, LoopBypassBlocks[I]); 4024 BCBlockPhi->addIncoming(ReducedPartRdx, LoopMiddleBlock); 4025 4026 // Now, we need to fix the users of the reduction variable 4027 // inside and outside of the scalar remainder loop. 4028 // We know that the loop is in LCSSA form. We need to update the 4029 // PHI nodes in the exit blocks. 4030 for (BasicBlock::iterator LEI = LoopExitBlock->begin(), 4031 LEE = LoopExitBlock->end(); 4032 LEI != LEE; ++LEI) { 4033 PHINode *LCSSAPhi = dyn_cast<PHINode>(LEI); 4034 if (!LCSSAPhi) 4035 break; 4036 4037 // All PHINodes need to have a single entry edge, or two if 4038 // we already fixed them. 4039 assert(LCSSAPhi->getNumIncomingValues() < 3 && "Invalid LCSSA PHI"); 4040 4041 // We found our reduction value exit-PHI. Update it with the 4042 // incoming bypass edge. 4043 if (LCSSAPhi->getIncomingValue(0) == LoopExitInst) { 4044 // Add an edge coming from the bypass. 4045 LCSSAPhi->addIncoming(ReducedPartRdx, LoopMiddleBlock); 4046 break; 4047 } 4048 } // end of the LCSSA phi scan. 4049 4050 // Fix the scalar loop reduction variable with the incoming reduction sum 4051 // from the vector body and from the backedge value. 4052 int IncomingEdgeBlockIdx = 4053 Phi->getBasicBlockIndex(OrigLoop->getLoopLatch()); 4054 assert(IncomingEdgeBlockIdx >= 0 && "Invalid block index"); 4055 // Pick the other block. 4056 int SelfEdgeBlockIdx = (IncomingEdgeBlockIdx ? 0 : 1); 4057 Phi->setIncomingValue(SelfEdgeBlockIdx, BCBlockPhi); 4058 Phi->setIncomingValue(IncomingEdgeBlockIdx, LoopExitInst); 4059 } // end of for each Phi in PHIsToFix. 4060 4061 fixLCSSAPHIs(); 4062 4063 // Make sure DomTree is updated. 4064 updateAnalysis(); 4065 4066 predicateInstructions(); 4067 4068 // Remove redundant induction instructions. 4069 cse(LoopVectorBody); 4070 } 4071 4072 void InnerLoopVectorizer::fixFirstOrderRecurrence(PHINode *Phi) { 4073 4074 // This is the second phase of vectorizing first-order recurrences. An 4075 // overview of the transformation is described below. Suppose we have the 4076 // following loop. 4077 // 4078 // for (int i = 0; i < n; ++i) 4079 // b[i] = a[i] - a[i - 1]; 4080 // 4081 // There is a first-order recurrence on "a". For this loop, the shorthand 4082 // scalar IR looks like: 4083 // 4084 // scalar.ph: 4085 // s_init = a[-1] 4086 // br scalar.body 4087 // 4088 // scalar.body: 4089 // i = phi [0, scalar.ph], [i+1, scalar.body] 4090 // s1 = phi [s_init, scalar.ph], [s2, scalar.body] 4091 // s2 = a[i] 4092 // b[i] = s2 - s1 4093 // br cond, scalar.body, ... 4094 // 4095 // In this example, s1 is a recurrence because it's value depends on the 4096 // previous iteration. In the first phase of vectorization, we created a 4097 // temporary value for s1. We now complete the vectorization and produce the 4098 // shorthand vector IR shown below (for VF = 4, UF = 1). 4099 // 4100 // vector.ph: 4101 // v_init = vector(..., ..., ..., a[-1]) 4102 // br vector.body 4103 // 4104 // vector.body 4105 // i = phi [0, vector.ph], [i+4, vector.body] 4106 // v1 = phi [v_init, vector.ph], [v2, vector.body] 4107 // v2 = a[i, i+1, i+2, i+3]; 4108 // v3 = vector(v1(3), v2(0, 1, 2)) 4109 // b[i, i+1, i+2, i+3] = v2 - v3 4110 // br cond, vector.body, middle.block 4111 // 4112 // middle.block: 4113 // x = v2(3) 4114 // br scalar.ph 4115 // 4116 // scalar.ph: 4117 // s_init = phi [x, middle.block], [a[-1], otherwise] 4118 // br scalar.body 4119 // 4120 // After execution completes the vector loop, we extract the next value of 4121 // the recurrence (x) to use as the initial value in the scalar loop. 4122 4123 // Get the original loop preheader and single loop latch. 4124 auto *Preheader = OrigLoop->getLoopPreheader(); 4125 auto *Latch = OrigLoop->getLoopLatch(); 4126 4127 // Get the initial and previous values of the scalar recurrence. 4128 auto *ScalarInit = Phi->getIncomingValueForBlock(Preheader); 4129 auto *Previous = Phi->getIncomingValueForBlock(Latch); 4130 4131 // Create a vector from the initial value. 4132 auto *VectorInit = ScalarInit; 4133 if (VF > 1) { 4134 Builder.SetInsertPoint(LoopVectorPreHeader->getTerminator()); 4135 VectorInit = Builder.CreateInsertElement( 4136 UndefValue::get(VectorType::get(VectorInit->getType(), VF)), VectorInit, 4137 Builder.getInt32(VF - 1), "vector.recur.init"); 4138 } 4139 4140 // We constructed a temporary phi node in the first phase of vectorization. 4141 // This phi node will eventually be deleted. 4142 VectorParts &PhiParts = VectorLoopValueMap.getVector(Phi); 4143 Builder.SetInsertPoint(cast<Instruction>(PhiParts[0])); 4144 4145 // Create a phi node for the new recurrence. The current value will either be 4146 // the initial value inserted into a vector or loop-varying vector value. 4147 auto *VecPhi = Builder.CreatePHI(VectorInit->getType(), 2, "vector.recur"); 4148 VecPhi->addIncoming(VectorInit, LoopVectorPreHeader); 4149 4150 // Get the vectorized previous value. We ensured the previous values was an 4151 // instruction when detecting the recurrence. 4152 auto &PreviousParts = getVectorValue(Previous); 4153 4154 // Set the insertion point to be after this instruction. We ensured the 4155 // previous value dominated all uses of the phi when detecting the 4156 // recurrence. 4157 Builder.SetInsertPoint( 4158 &*++BasicBlock::iterator(cast<Instruction>(PreviousParts[UF - 1]))); 4159 4160 // We will construct a vector for the recurrence by combining the values for 4161 // the current and previous iterations. This is the required shuffle mask. 4162 SmallVector<Constant *, 8> ShuffleMask(VF); 4163 ShuffleMask[0] = Builder.getInt32(VF - 1); 4164 for (unsigned I = 1; I < VF; ++I) 4165 ShuffleMask[I] = Builder.getInt32(I + VF - 1); 4166 4167 // The vector from which to take the initial value for the current iteration 4168 // (actual or unrolled). Initially, this is the vector phi node. 4169 Value *Incoming = VecPhi; 4170 4171 // Shuffle the current and previous vector and update the vector parts. 4172 for (unsigned Part = 0; Part < UF; ++Part) { 4173 auto *Shuffle = 4174 VF > 1 4175 ? Builder.CreateShuffleVector(Incoming, PreviousParts[Part], 4176 ConstantVector::get(ShuffleMask)) 4177 : Incoming; 4178 PhiParts[Part]->replaceAllUsesWith(Shuffle); 4179 cast<Instruction>(PhiParts[Part])->eraseFromParent(); 4180 PhiParts[Part] = Shuffle; 4181 Incoming = PreviousParts[Part]; 4182 } 4183 4184 // Fix the latch value of the new recurrence in the vector loop. 4185 VecPhi->addIncoming(Incoming, LI->getLoopFor(LoopVectorBody)->getLoopLatch()); 4186 4187 // Extract the last vector element in the middle block. This will be the 4188 // initial value for the recurrence when jumping to the scalar loop. 4189 auto *Extract = Incoming; 4190 if (VF > 1) { 4191 Builder.SetInsertPoint(LoopMiddleBlock->getTerminator()); 4192 Extract = Builder.CreateExtractElement(Extract, Builder.getInt32(VF - 1), 4193 "vector.recur.extract"); 4194 } 4195 4196 // Fix the initial value of the original recurrence in the scalar loop. 4197 Builder.SetInsertPoint(&*LoopScalarPreHeader->begin()); 4198 auto *Start = Builder.CreatePHI(Phi->getType(), 2, "scalar.recur.init"); 4199 for (auto *BB : predecessors(LoopScalarPreHeader)) { 4200 auto *Incoming = BB == LoopMiddleBlock ? Extract : ScalarInit; 4201 Start->addIncoming(Incoming, BB); 4202 } 4203 4204 Phi->setIncomingValue(Phi->getBasicBlockIndex(LoopScalarPreHeader), Start); 4205 Phi->setName("scalar.recur"); 4206 4207 // Finally, fix users of the recurrence outside the loop. The users will need 4208 // either the last value of the scalar recurrence or the last value of the 4209 // vector recurrence we extracted in the middle block. Since the loop is in 4210 // LCSSA form, we just need to find the phi node for the original scalar 4211 // recurrence in the exit block, and then add an edge for the middle block. 4212 for (auto &I : *LoopExitBlock) { 4213 auto *LCSSAPhi = dyn_cast<PHINode>(&I); 4214 if (!LCSSAPhi) 4215 break; 4216 if (LCSSAPhi->getIncomingValue(0) == Phi) { 4217 LCSSAPhi->addIncoming(Extract, LoopMiddleBlock); 4218 break; 4219 } 4220 } 4221 } 4222 4223 void InnerLoopVectorizer::fixLCSSAPHIs() { 4224 for (Instruction &LEI : *LoopExitBlock) { 4225 auto *LCSSAPhi = dyn_cast<PHINode>(&LEI); 4226 if (!LCSSAPhi) 4227 break; 4228 if (LCSSAPhi->getNumIncomingValues() == 1) 4229 LCSSAPhi->addIncoming(UndefValue::get(LCSSAPhi->getType()), 4230 LoopMiddleBlock); 4231 } 4232 } 4233 4234 void InnerLoopVectorizer::collectTriviallyDeadInstructions() { 4235 BasicBlock *Latch = OrigLoop->getLoopLatch(); 4236 4237 // We create new control-flow for the vectorized loop, so the original 4238 // condition will be dead after vectorization if it's only used by the 4239 // branch. 4240 auto *Cmp = dyn_cast<Instruction>(Latch->getTerminator()->getOperand(0)); 4241 if (Cmp && Cmp->hasOneUse()) 4242 DeadInstructions.insert(Cmp); 4243 4244 // We create new "steps" for induction variable updates to which the original 4245 // induction variables map. An original update instruction will be dead if 4246 // all its users except the induction variable are dead. 4247 for (auto &Induction : *Legal->getInductionVars()) { 4248 PHINode *Ind = Induction.first; 4249 auto *IndUpdate = cast<Instruction>(Ind->getIncomingValueForBlock(Latch)); 4250 if (all_of(IndUpdate->users(), [&](User *U) -> bool { 4251 return U == Ind || DeadInstructions.count(cast<Instruction>(U)); 4252 })) 4253 DeadInstructions.insert(IndUpdate); 4254 } 4255 } 4256 4257 void InnerLoopVectorizer::sinkScalarOperands(Instruction *PredInst) { 4258 4259 // The basic block and loop containing the predicated instruction. 4260 auto *PredBB = PredInst->getParent(); 4261 auto *VectorLoop = LI->getLoopFor(PredBB); 4262 4263 // Initialize a worklist with the operands of the predicated instruction. 4264 SetVector<Value *> Worklist(PredInst->op_begin(), PredInst->op_end()); 4265 4266 // Holds instructions that we need to analyze again. An instruction may be 4267 // reanalyzed if we don't yet know if we can sink it or not. 4268 SmallVector<Instruction *, 8> InstsToReanalyze; 4269 4270 // Returns true if a given use occurs in the predicated block. Phi nodes use 4271 // their operands in their corresponding predecessor blocks. 4272 auto isBlockOfUsePredicated = [&](Use &U) -> bool { 4273 auto *I = cast<Instruction>(U.getUser()); 4274 BasicBlock *BB = I->getParent(); 4275 if (auto *Phi = dyn_cast<PHINode>(I)) 4276 BB = Phi->getIncomingBlock( 4277 PHINode::getIncomingValueNumForOperand(U.getOperandNo())); 4278 return BB == PredBB; 4279 }; 4280 4281 // Iteratively sink the scalarized operands of the predicated instruction 4282 // into the block we created for it. When an instruction is sunk, it's 4283 // operands are then added to the worklist. The algorithm ends after one pass 4284 // through the worklist doesn't sink a single instruction. 4285 bool Changed; 4286 do { 4287 4288 // Add the instructions that need to be reanalyzed to the worklist, and 4289 // reset the changed indicator. 4290 Worklist.insert(InstsToReanalyze.begin(), InstsToReanalyze.end()); 4291 InstsToReanalyze.clear(); 4292 Changed = false; 4293 4294 while (!Worklist.empty()) { 4295 auto *I = dyn_cast<Instruction>(Worklist.pop_back_val()); 4296 4297 // We can't sink an instruction if it is a phi node, is already in the 4298 // predicated block, is not in the loop, or may have side effects. 4299 if (!I || isa<PHINode>(I) || I->getParent() == PredBB || 4300 !VectorLoop->contains(I) || I->mayHaveSideEffects()) 4301 continue; 4302 4303 // It's legal to sink the instruction if all its uses occur in the 4304 // predicated block. Otherwise, there's nothing to do yet, and we may 4305 // need to reanalyze the instruction. 4306 if (!all_of(I->uses(), isBlockOfUsePredicated)) { 4307 InstsToReanalyze.push_back(I); 4308 continue; 4309 } 4310 4311 // Move the instruction to the beginning of the predicated block, and add 4312 // it's operands to the worklist. 4313 I->moveBefore(&*PredBB->getFirstInsertionPt()); 4314 Worklist.insert(I->op_begin(), I->op_end()); 4315 4316 // The sinking may have enabled other instructions to be sunk, so we will 4317 // need to iterate. 4318 Changed = true; 4319 } 4320 } while (Changed); 4321 } 4322 4323 void InnerLoopVectorizer::predicateInstructions() { 4324 4325 // For each instruction I marked for predication on value C, split I into its 4326 // own basic block to form an if-then construct over C. Since I may be fed by 4327 // an extractelement instruction or other scalar operand, we try to 4328 // iteratively sink its scalar operands into the predicated block. If I feeds 4329 // an insertelement instruction, we try to move this instruction into the 4330 // predicated block as well. For non-void types, a phi node will be created 4331 // for the resulting value (either vector or scalar). 4332 // 4333 // So for some predicated instruction, e.g. the conditional sdiv in: 4334 // 4335 // for.body: 4336 // ... 4337 // %add = add nsw i32 %mul, %0 4338 // %cmp5 = icmp sgt i32 %2, 7 4339 // br i1 %cmp5, label %if.then, label %if.end 4340 // 4341 // if.then: 4342 // %div = sdiv i32 %0, %1 4343 // br label %if.end 4344 // 4345 // if.end: 4346 // %x.0 = phi i32 [ %div, %if.then ], [ %add, %for.body ] 4347 // 4348 // the sdiv at this point is scalarized and if-converted using a select. 4349 // The inactive elements in the vector are not used, but the predicated 4350 // instruction is still executed for all vector elements, essentially: 4351 // 4352 // vector.body: 4353 // ... 4354 // %17 = add nsw <2 x i32> %16, %wide.load 4355 // %29 = extractelement <2 x i32> %wide.load, i32 0 4356 // %30 = extractelement <2 x i32> %wide.load51, i32 0 4357 // %31 = sdiv i32 %29, %30 4358 // %32 = insertelement <2 x i32> undef, i32 %31, i32 0 4359 // %35 = extractelement <2 x i32> %wide.load, i32 1 4360 // %36 = extractelement <2 x i32> %wide.load51, i32 1 4361 // %37 = sdiv i32 %35, %36 4362 // %38 = insertelement <2 x i32> %32, i32 %37, i32 1 4363 // %predphi = select <2 x i1> %26, <2 x i32> %38, <2 x i32> %17 4364 // 4365 // Predication will now re-introduce the original control flow to avoid false 4366 // side-effects by the sdiv instructions on the inactive elements, yielding 4367 // (after cleanup): 4368 // 4369 // vector.body: 4370 // ... 4371 // %5 = add nsw <2 x i32> %4, %wide.load 4372 // %8 = icmp sgt <2 x i32> %wide.load52, <i32 7, i32 7> 4373 // %9 = extractelement <2 x i1> %8, i32 0 4374 // br i1 %9, label %pred.sdiv.if, label %pred.sdiv.continue 4375 // 4376 // pred.sdiv.if: 4377 // %10 = extractelement <2 x i32> %wide.load, i32 0 4378 // %11 = extractelement <2 x i32> %wide.load51, i32 0 4379 // %12 = sdiv i32 %10, %11 4380 // %13 = insertelement <2 x i32> undef, i32 %12, i32 0 4381 // br label %pred.sdiv.continue 4382 // 4383 // pred.sdiv.continue: 4384 // %14 = phi <2 x i32> [ undef, %vector.body ], [ %13, %pred.sdiv.if ] 4385 // %15 = extractelement <2 x i1> %8, i32 1 4386 // br i1 %15, label %pred.sdiv.if54, label %pred.sdiv.continue55 4387 // 4388 // pred.sdiv.if54: 4389 // %16 = extractelement <2 x i32> %wide.load, i32 1 4390 // %17 = extractelement <2 x i32> %wide.load51, i32 1 4391 // %18 = sdiv i32 %16, %17 4392 // %19 = insertelement <2 x i32> %14, i32 %18, i32 1 4393 // br label %pred.sdiv.continue55 4394 // 4395 // pred.sdiv.continue55: 4396 // %20 = phi <2 x i32> [ %14, %pred.sdiv.continue ], [ %19, %pred.sdiv.if54 ] 4397 // %predphi = select <2 x i1> %8, <2 x i32> %20, <2 x i32> %5 4398 4399 for (auto KV : PredicatedInstructions) { 4400 BasicBlock::iterator I(KV.first); 4401 BasicBlock *Head = I->getParent(); 4402 auto *BB = SplitBlock(Head, &*std::next(I), DT, LI); 4403 auto *T = SplitBlockAndInsertIfThen(KV.second, &*I, /*Unreachable=*/false, 4404 /*BranchWeights=*/nullptr, DT, LI); 4405 I->moveBefore(T); 4406 sinkScalarOperands(&*I); 4407 4408 I->getParent()->setName(Twine("pred.") + I->getOpcodeName() + ".if"); 4409 BB->setName(Twine("pred.") + I->getOpcodeName() + ".continue"); 4410 4411 // If the instruction is non-void create a Phi node at reconvergence point. 4412 if (!I->getType()->isVoidTy()) { 4413 Value *IncomingTrue = nullptr; 4414 Value *IncomingFalse = nullptr; 4415 4416 if (I->hasOneUse() && isa<InsertElementInst>(*I->user_begin())) { 4417 // If the predicated instruction is feeding an insert-element, move it 4418 // into the Then block; Phi node will be created for the vector. 4419 InsertElementInst *IEI = cast<InsertElementInst>(*I->user_begin()); 4420 IEI->moveBefore(T); 4421 IncomingTrue = IEI; // the new vector with the inserted element. 4422 IncomingFalse = IEI->getOperand(0); // the unmodified vector 4423 } else { 4424 // Phi node will be created for the scalar predicated instruction. 4425 IncomingTrue = &*I; 4426 IncomingFalse = UndefValue::get(I->getType()); 4427 } 4428 4429 BasicBlock *PostDom = I->getParent()->getSingleSuccessor(); 4430 assert(PostDom && "Then block has multiple successors"); 4431 PHINode *Phi = 4432 PHINode::Create(IncomingTrue->getType(), 2, "", &PostDom->front()); 4433 IncomingTrue->replaceAllUsesWith(Phi); 4434 Phi->addIncoming(IncomingFalse, Head); 4435 Phi->addIncoming(IncomingTrue, I->getParent()); 4436 } 4437 } 4438 4439 DEBUG(DT->verifyDomTree()); 4440 } 4441 4442 InnerLoopVectorizer::VectorParts 4443 InnerLoopVectorizer::createEdgeMask(BasicBlock *Src, BasicBlock *Dst) { 4444 assert(is_contained(predecessors(Dst), Src) && "Invalid edge"); 4445 4446 // Look for cached value. 4447 std::pair<BasicBlock *, BasicBlock *> Edge(Src, Dst); 4448 EdgeMaskCache::iterator ECEntryIt = MaskCache.find(Edge); 4449 if (ECEntryIt != MaskCache.end()) 4450 return ECEntryIt->second; 4451 4452 VectorParts SrcMask = createBlockInMask(Src); 4453 4454 // The terminator has to be a branch inst! 4455 BranchInst *BI = dyn_cast<BranchInst>(Src->getTerminator()); 4456 assert(BI && "Unexpected terminator found"); 4457 4458 if (BI->isConditional()) { 4459 VectorParts EdgeMask = getVectorValue(BI->getCondition()); 4460 4461 if (BI->getSuccessor(0) != Dst) 4462 for (unsigned part = 0; part < UF; ++part) 4463 EdgeMask[part] = Builder.CreateNot(EdgeMask[part]); 4464 4465 for (unsigned part = 0; part < UF; ++part) 4466 EdgeMask[part] = Builder.CreateAnd(EdgeMask[part], SrcMask[part]); 4467 4468 MaskCache[Edge] = EdgeMask; 4469 return EdgeMask; 4470 } 4471 4472 MaskCache[Edge] = SrcMask; 4473 return SrcMask; 4474 } 4475 4476 InnerLoopVectorizer::VectorParts 4477 InnerLoopVectorizer::createBlockInMask(BasicBlock *BB) { 4478 assert(OrigLoop->contains(BB) && "Block is not a part of a loop"); 4479 4480 // Loop incoming mask is all-one. 4481 if (OrigLoop->getHeader() == BB) { 4482 Value *C = ConstantInt::get(IntegerType::getInt1Ty(BB->getContext()), 1); 4483 return getVectorValue(C); 4484 } 4485 4486 // This is the block mask. We OR all incoming edges, and with zero. 4487 Value *Zero = ConstantInt::get(IntegerType::getInt1Ty(BB->getContext()), 0); 4488 VectorParts BlockMask = getVectorValue(Zero); 4489 4490 // For each pred: 4491 for (pred_iterator it = pred_begin(BB), e = pred_end(BB); it != e; ++it) { 4492 VectorParts EM = createEdgeMask(*it, BB); 4493 for (unsigned part = 0; part < UF; ++part) 4494 BlockMask[part] = Builder.CreateOr(BlockMask[part], EM[part]); 4495 } 4496 4497 return BlockMask; 4498 } 4499 4500 void InnerLoopVectorizer::widenPHIInstruction(Instruction *PN, unsigned UF, 4501 unsigned VF, PhiVector *PV) { 4502 PHINode *P = cast<PHINode>(PN); 4503 // Handle recurrences. 4504 if (Legal->isReductionVariable(P) || Legal->isFirstOrderRecurrence(P)) { 4505 VectorParts Entry(UF); 4506 for (unsigned part = 0; part < UF; ++part) { 4507 // This is phase one of vectorizing PHIs. 4508 Type *VecTy = 4509 (VF == 1) ? PN->getType() : VectorType::get(PN->getType(), VF); 4510 Entry[part] = PHINode::Create( 4511 VecTy, 2, "vec.phi", &*LoopVectorBody->getFirstInsertionPt()); 4512 } 4513 VectorLoopValueMap.initVector(P, Entry); 4514 PV->push_back(P); 4515 return; 4516 } 4517 4518 setDebugLocFromInst(Builder, P); 4519 // Check for PHI nodes that are lowered to vector selects. 4520 if (P->getParent() != OrigLoop->getHeader()) { 4521 // We know that all PHIs in non-header blocks are converted into 4522 // selects, so we don't have to worry about the insertion order and we 4523 // can just use the builder. 4524 // At this point we generate the predication tree. There may be 4525 // duplications since this is a simple recursive scan, but future 4526 // optimizations will clean it up. 4527 4528 unsigned NumIncoming = P->getNumIncomingValues(); 4529 4530 // Generate a sequence of selects of the form: 4531 // SELECT(Mask3, In3, 4532 // SELECT(Mask2, In2, 4533 // ( ...))) 4534 VectorParts Entry(UF); 4535 for (unsigned In = 0; In < NumIncoming; In++) { 4536 VectorParts Cond = 4537 createEdgeMask(P->getIncomingBlock(In), P->getParent()); 4538 const VectorParts &In0 = getVectorValue(P->getIncomingValue(In)); 4539 4540 for (unsigned part = 0; part < UF; ++part) { 4541 // We might have single edge PHIs (blocks) - use an identity 4542 // 'select' for the first PHI operand. 4543 if (In == 0) 4544 Entry[part] = Builder.CreateSelect(Cond[part], In0[part], In0[part]); 4545 else 4546 // Select between the current value and the previous incoming edge 4547 // based on the incoming mask. 4548 Entry[part] = Builder.CreateSelect(Cond[part], In0[part], Entry[part], 4549 "predphi"); 4550 } 4551 } 4552 VectorLoopValueMap.initVector(P, Entry); 4553 return; 4554 } 4555 4556 // This PHINode must be an induction variable. 4557 // Make sure that we know about it. 4558 assert(Legal->getInductionVars()->count(P) && "Not an induction variable"); 4559 4560 InductionDescriptor II = Legal->getInductionVars()->lookup(P); 4561 const DataLayout &DL = OrigLoop->getHeader()->getModule()->getDataLayout(); 4562 4563 // FIXME: The newly created binary instructions should contain nsw/nuw flags, 4564 // which can be found from the original scalar operations. 4565 switch (II.getKind()) { 4566 case InductionDescriptor::IK_NoInduction: 4567 llvm_unreachable("Unknown induction"); 4568 case InductionDescriptor::IK_IntInduction: 4569 return widenIntInduction(P); 4570 case InductionDescriptor::IK_PtrInduction: { 4571 // Handle the pointer induction variable case. 4572 assert(P->getType()->isPointerTy() && "Unexpected type."); 4573 // This is the normalized GEP that starts counting at zero. 4574 Value *PtrInd = Induction; 4575 PtrInd = Builder.CreateSExtOrTrunc(PtrInd, II.getStep()->getType()); 4576 // Determine the number of scalars we need to generate for each unroll 4577 // iteration. If the instruction is uniform, we only need to generate the 4578 // first lane. Otherwise, we generate all VF values. 4579 unsigned Lanes = Legal->isUniformAfterVectorization(P) ? 1 : VF; 4580 // These are the scalar results. Notice that we don't generate vector GEPs 4581 // because scalar GEPs result in better code. 4582 ScalarParts Entry(UF); 4583 for (unsigned Part = 0; Part < UF; ++Part) { 4584 Entry[Part].resize(VF); 4585 for (unsigned Lane = 0; Lane < Lanes; ++Lane) { 4586 Constant *Idx = ConstantInt::get(PtrInd->getType(), Lane + Part * VF); 4587 Value *GlobalIdx = Builder.CreateAdd(PtrInd, Idx); 4588 Value *SclrGep = II.transform(Builder, GlobalIdx, PSE.getSE(), DL); 4589 SclrGep->setName("next.gep"); 4590 Entry[Part][Lane] = SclrGep; 4591 } 4592 } 4593 VectorLoopValueMap.initScalar(P, Entry); 4594 return; 4595 } 4596 case InductionDescriptor::IK_FpInduction: { 4597 assert(P->getType() == II.getStartValue()->getType() && 4598 "Types must match"); 4599 // Handle other induction variables that are now based on the 4600 // canonical one. 4601 assert(P != OldInduction && "Primary induction can be integer only"); 4602 4603 Value *V = Builder.CreateCast(Instruction::SIToFP, Induction, P->getType()); 4604 V = II.transform(Builder, V, PSE.getSE(), DL); 4605 V->setName("fp.offset.idx"); 4606 4607 // Now we have scalar op: %fp.offset.idx = StartVal +/- Induction*StepVal 4608 4609 Value *Broadcasted = getBroadcastInstrs(V); 4610 // After broadcasting the induction variable we need to make the vector 4611 // consecutive by adding StepVal*0, StepVal*1, StepVal*2, etc. 4612 Value *StepVal = cast<SCEVUnknown>(II.getStep())->getValue(); 4613 VectorParts Entry(UF); 4614 for (unsigned part = 0; part < UF; ++part) 4615 Entry[part] = getStepVector(Broadcasted, VF * part, StepVal, 4616 II.getInductionOpcode()); 4617 VectorLoopValueMap.initVector(P, Entry); 4618 return; 4619 } 4620 } 4621 } 4622 4623 /// A helper function for checking whether an integer division-related 4624 /// instruction may divide by zero (in which case it must be predicated if 4625 /// executed conditionally in the scalar code). 4626 /// TODO: It may be worthwhile to generalize and check isKnownNonZero(). 4627 /// Non-zero divisors that are non compile-time constants will not be 4628 /// converted into multiplication, so we will still end up scalarizing 4629 /// the division, but can do so w/o predication. 4630 static bool mayDivideByZero(Instruction &I) { 4631 assert((I.getOpcode() == Instruction::UDiv || 4632 I.getOpcode() == Instruction::SDiv || 4633 I.getOpcode() == Instruction::URem || 4634 I.getOpcode() == Instruction::SRem) && 4635 "Unexpected instruction"); 4636 Value *Divisor = I.getOperand(1); 4637 auto *CInt = dyn_cast<ConstantInt>(Divisor); 4638 return !CInt || CInt->isZero(); 4639 } 4640 4641 void InnerLoopVectorizer::vectorizeBlockInLoop(BasicBlock *BB, PhiVector *PV) { 4642 // For each instruction in the old loop. 4643 for (Instruction &I : *BB) { 4644 4645 // If the instruction will become trivially dead when vectorized, we don't 4646 // need to generate it. 4647 if (DeadInstructions.count(&I)) 4648 continue; 4649 4650 // Scalarize instructions that should remain scalar after vectorization. 4651 if (!(isa<BranchInst>(&I) || isa<PHINode>(&I) || 4652 isa<DbgInfoIntrinsic>(&I)) && 4653 Legal->isScalarAfterVectorization(&I)) { 4654 scalarizeInstruction(&I); 4655 continue; 4656 } 4657 4658 switch (I.getOpcode()) { 4659 case Instruction::Br: 4660 // Nothing to do for PHIs and BR, since we already took care of the 4661 // loop control flow instructions. 4662 continue; 4663 case Instruction::PHI: { 4664 // Vectorize PHINodes. 4665 widenPHIInstruction(&I, UF, VF, PV); 4666 continue; 4667 } // End of PHI. 4668 4669 case Instruction::UDiv: 4670 case Instruction::SDiv: 4671 case Instruction::SRem: 4672 case Instruction::URem: 4673 // Scalarize with predication if this instruction may divide by zero and 4674 // block execution is conditional, otherwise fallthrough. 4675 if (Legal->isScalarWithPredication(&I)) { 4676 scalarizeInstruction(&I, true); 4677 continue; 4678 } 4679 case Instruction::Add: 4680 case Instruction::FAdd: 4681 case Instruction::Sub: 4682 case Instruction::FSub: 4683 case Instruction::Mul: 4684 case Instruction::FMul: 4685 case Instruction::FDiv: 4686 case Instruction::FRem: 4687 case Instruction::Shl: 4688 case Instruction::LShr: 4689 case Instruction::AShr: 4690 case Instruction::And: 4691 case Instruction::Or: 4692 case Instruction::Xor: { 4693 // Just widen binops. 4694 auto *BinOp = cast<BinaryOperator>(&I); 4695 setDebugLocFromInst(Builder, BinOp); 4696 const VectorParts &A = getVectorValue(BinOp->getOperand(0)); 4697 const VectorParts &B = getVectorValue(BinOp->getOperand(1)); 4698 4699 // Use this vector value for all users of the original instruction. 4700 VectorParts Entry(UF); 4701 for (unsigned Part = 0; Part < UF; ++Part) { 4702 Value *V = Builder.CreateBinOp(BinOp->getOpcode(), A[Part], B[Part]); 4703 4704 if (BinaryOperator *VecOp = dyn_cast<BinaryOperator>(V)) 4705 VecOp->copyIRFlags(BinOp); 4706 4707 Entry[Part] = V; 4708 } 4709 4710 VectorLoopValueMap.initVector(&I, Entry); 4711 addMetadata(Entry, BinOp); 4712 break; 4713 } 4714 case Instruction::Select: { 4715 // Widen selects. 4716 // If the selector is loop invariant we can create a select 4717 // instruction with a scalar condition. Otherwise, use vector-select. 4718 auto *SE = PSE.getSE(); 4719 bool InvariantCond = 4720 SE->isLoopInvariant(PSE.getSCEV(I.getOperand(0)), OrigLoop); 4721 setDebugLocFromInst(Builder, &I); 4722 4723 // The condition can be loop invariant but still defined inside the 4724 // loop. This means that we can't just use the original 'cond' value. 4725 // We have to take the 'vectorized' value and pick the first lane. 4726 // Instcombine will make this a no-op. 4727 const VectorParts &Cond = getVectorValue(I.getOperand(0)); 4728 const VectorParts &Op0 = getVectorValue(I.getOperand(1)); 4729 const VectorParts &Op1 = getVectorValue(I.getOperand(2)); 4730 4731 auto *ScalarCond = getScalarValue(I.getOperand(0), 0, 0); 4732 4733 VectorParts Entry(UF); 4734 for (unsigned Part = 0; Part < UF; ++Part) { 4735 Entry[Part] = Builder.CreateSelect( 4736 InvariantCond ? ScalarCond : Cond[Part], Op0[Part], Op1[Part]); 4737 } 4738 4739 VectorLoopValueMap.initVector(&I, Entry); 4740 addMetadata(Entry, &I); 4741 break; 4742 } 4743 4744 case Instruction::ICmp: 4745 case Instruction::FCmp: { 4746 // Widen compares. Generate vector compares. 4747 bool FCmp = (I.getOpcode() == Instruction::FCmp); 4748 auto *Cmp = dyn_cast<CmpInst>(&I); 4749 setDebugLocFromInst(Builder, Cmp); 4750 const VectorParts &A = getVectorValue(Cmp->getOperand(0)); 4751 const VectorParts &B = getVectorValue(Cmp->getOperand(1)); 4752 VectorParts Entry(UF); 4753 for (unsigned Part = 0; Part < UF; ++Part) { 4754 Value *C = nullptr; 4755 if (FCmp) { 4756 C = Builder.CreateFCmp(Cmp->getPredicate(), A[Part], B[Part]); 4757 cast<FCmpInst>(C)->copyFastMathFlags(Cmp); 4758 } else { 4759 C = Builder.CreateICmp(Cmp->getPredicate(), A[Part], B[Part]); 4760 } 4761 Entry[Part] = C; 4762 } 4763 4764 VectorLoopValueMap.initVector(&I, Entry); 4765 addMetadata(Entry, &I); 4766 break; 4767 } 4768 4769 case Instruction::Store: 4770 case Instruction::Load: 4771 vectorizeMemoryInstruction(&I); 4772 break; 4773 case Instruction::ZExt: 4774 case Instruction::SExt: 4775 case Instruction::FPToUI: 4776 case Instruction::FPToSI: 4777 case Instruction::FPExt: 4778 case Instruction::PtrToInt: 4779 case Instruction::IntToPtr: 4780 case Instruction::SIToFP: 4781 case Instruction::UIToFP: 4782 case Instruction::Trunc: 4783 case Instruction::FPTrunc: 4784 case Instruction::BitCast: { 4785 auto *CI = dyn_cast<CastInst>(&I); 4786 setDebugLocFromInst(Builder, CI); 4787 4788 // Optimize the special case where the source is a constant integer 4789 // induction variable. Notice that we can only optimize the 'trunc' case 4790 // because (a) FP conversions lose precision, (b) sext/zext may wrap, and 4791 // (c) other casts depend on pointer size. 4792 auto ID = Legal->getInductionVars()->lookup(OldInduction); 4793 if (isa<TruncInst>(CI) && CI->getOperand(0) == OldInduction && 4794 ID.getConstIntStepValue()) { 4795 widenIntInduction(OldInduction, cast<TruncInst>(CI)); 4796 break; 4797 } 4798 4799 /// Vectorize casts. 4800 Type *DestTy = 4801 (VF == 1) ? CI->getType() : VectorType::get(CI->getType(), VF); 4802 4803 const VectorParts &A = getVectorValue(CI->getOperand(0)); 4804 VectorParts Entry(UF); 4805 for (unsigned Part = 0; Part < UF; ++Part) 4806 Entry[Part] = Builder.CreateCast(CI->getOpcode(), A[Part], DestTy); 4807 VectorLoopValueMap.initVector(&I, Entry); 4808 addMetadata(Entry, &I); 4809 break; 4810 } 4811 4812 case Instruction::Call: { 4813 // Ignore dbg intrinsics. 4814 if (isa<DbgInfoIntrinsic>(I)) 4815 break; 4816 setDebugLocFromInst(Builder, &I); 4817 4818 Module *M = BB->getParent()->getParent(); 4819 auto *CI = cast<CallInst>(&I); 4820 4821 StringRef FnName = CI->getCalledFunction()->getName(); 4822 Function *F = CI->getCalledFunction(); 4823 Type *RetTy = ToVectorTy(CI->getType(), VF); 4824 SmallVector<Type *, 4> Tys; 4825 for (Value *ArgOperand : CI->arg_operands()) 4826 Tys.push_back(ToVectorTy(ArgOperand->getType(), VF)); 4827 4828 Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI); 4829 if (ID && (ID == Intrinsic::assume || ID == Intrinsic::lifetime_end || 4830 ID == Intrinsic::lifetime_start)) { 4831 scalarizeInstruction(&I); 4832 break; 4833 } 4834 // The flag shows whether we use Intrinsic or a usual Call for vectorized 4835 // version of the instruction. 4836 // Is it beneficial to perform intrinsic call compared to lib call? 4837 bool NeedToScalarize; 4838 unsigned CallCost = getVectorCallCost(CI, VF, *TTI, TLI, NeedToScalarize); 4839 bool UseVectorIntrinsic = 4840 ID && getVectorIntrinsicCost(CI, VF, *TTI, TLI) <= CallCost; 4841 if (!UseVectorIntrinsic && NeedToScalarize) { 4842 scalarizeInstruction(&I); 4843 break; 4844 } 4845 4846 VectorParts Entry(UF); 4847 for (unsigned Part = 0; Part < UF; ++Part) { 4848 SmallVector<Value *, 4> Args; 4849 for (unsigned i = 0, ie = CI->getNumArgOperands(); i != ie; ++i) { 4850 Value *Arg = CI->getArgOperand(i); 4851 // Some intrinsics have a scalar argument - don't replace it with a 4852 // vector. 4853 if (!UseVectorIntrinsic || !hasVectorInstrinsicScalarOpd(ID, i)) { 4854 const VectorParts &VectorArg = getVectorValue(CI->getArgOperand(i)); 4855 Arg = VectorArg[Part]; 4856 } 4857 Args.push_back(Arg); 4858 } 4859 4860 Function *VectorF; 4861 if (UseVectorIntrinsic) { 4862 // Use vector version of the intrinsic. 4863 Type *TysForDecl[] = {CI->getType()}; 4864 if (VF > 1) 4865 TysForDecl[0] = VectorType::get(CI->getType()->getScalarType(), VF); 4866 VectorF = Intrinsic::getDeclaration(M, ID, TysForDecl); 4867 } else { 4868 // Use vector version of the library call. 4869 StringRef VFnName = TLI->getVectorizedFunction(FnName, VF); 4870 assert(!VFnName.empty() && "Vector function name is empty."); 4871 VectorF = M->getFunction(VFnName); 4872 if (!VectorF) { 4873 // Generate a declaration 4874 FunctionType *FTy = FunctionType::get(RetTy, Tys, false); 4875 VectorF = 4876 Function::Create(FTy, Function::ExternalLinkage, VFnName, M); 4877 VectorF->copyAttributesFrom(F); 4878 } 4879 } 4880 assert(VectorF && "Can't create vector function."); 4881 4882 SmallVector<OperandBundleDef, 1> OpBundles; 4883 CI->getOperandBundlesAsDefs(OpBundles); 4884 CallInst *V = Builder.CreateCall(VectorF, Args, OpBundles); 4885 4886 if (isa<FPMathOperator>(V)) 4887 V->copyFastMathFlags(CI); 4888 4889 Entry[Part] = V; 4890 } 4891 4892 VectorLoopValueMap.initVector(&I, Entry); 4893 addMetadata(Entry, &I); 4894 break; 4895 } 4896 4897 default: 4898 // All other instructions are unsupported. Scalarize them. 4899 scalarizeInstruction(&I); 4900 break; 4901 } // end of switch. 4902 } // end of for_each instr. 4903 } 4904 4905 void InnerLoopVectorizer::updateAnalysis() { 4906 // Forget the original basic block. 4907 PSE.getSE()->forgetLoop(OrigLoop); 4908 4909 // Update the dominator tree information. 4910 assert(DT->properlyDominates(LoopBypassBlocks.front(), LoopExitBlock) && 4911 "Entry does not dominate exit."); 4912 4913 // We don't predicate stores by this point, so the vector body should be a 4914 // single loop. 4915 DT->addNewBlock(LoopVectorBody, LoopVectorPreHeader); 4916 4917 DT->addNewBlock(LoopMiddleBlock, LoopVectorBody); 4918 DT->addNewBlock(LoopScalarPreHeader, LoopBypassBlocks[0]); 4919 DT->changeImmediateDominator(LoopScalarBody, LoopScalarPreHeader); 4920 DT->changeImmediateDominator(LoopExitBlock, LoopBypassBlocks[0]); 4921 4922 DEBUG(DT->verifyDomTree()); 4923 } 4924 4925 /// \brief Check whether it is safe to if-convert this phi node. 4926 /// 4927 /// Phi nodes with constant expressions that can trap are not safe to if 4928 /// convert. 4929 static bool canIfConvertPHINodes(BasicBlock *BB) { 4930 for (Instruction &I : *BB) { 4931 auto *Phi = dyn_cast<PHINode>(&I); 4932 if (!Phi) 4933 return true; 4934 for (Value *V : Phi->incoming_values()) 4935 if (auto *C = dyn_cast<Constant>(V)) 4936 if (C->canTrap()) 4937 return false; 4938 } 4939 return true; 4940 } 4941 4942 bool LoopVectorizationLegality::canVectorizeWithIfConvert() { 4943 if (!EnableIfConversion) { 4944 ORE->emit(createMissedAnalysis("IfConversionDisabled") 4945 << "if-conversion is disabled"); 4946 return false; 4947 } 4948 4949 assert(TheLoop->getNumBlocks() > 1 && "Single block loops are vectorizable"); 4950 4951 // A list of pointers that we can safely read and write to. 4952 SmallPtrSet<Value *, 8> SafePointes; 4953 4954 // Collect safe addresses. 4955 for (BasicBlock *BB : TheLoop->blocks()) { 4956 if (blockNeedsPredication(BB)) 4957 continue; 4958 4959 for (Instruction &I : *BB) 4960 if (auto *Ptr = getPointerOperand(&I)) 4961 SafePointes.insert(Ptr); 4962 } 4963 4964 // Collect the blocks that need predication. 4965 BasicBlock *Header = TheLoop->getHeader(); 4966 for (BasicBlock *BB : TheLoop->blocks()) { 4967 // We don't support switch statements inside loops. 4968 if (!isa<BranchInst>(BB->getTerminator())) { 4969 ORE->emit(createMissedAnalysis("LoopContainsSwitch", BB->getTerminator()) 4970 << "loop contains a switch statement"); 4971 return false; 4972 } 4973 4974 // We must be able to predicate all blocks that need to be predicated. 4975 if (blockNeedsPredication(BB)) { 4976 if (!blockCanBePredicated(BB, SafePointes)) { 4977 ORE->emit(createMissedAnalysis("NoCFGForSelect", BB->getTerminator()) 4978 << "control flow cannot be substituted for a select"); 4979 return false; 4980 } 4981 } else if (BB != Header && !canIfConvertPHINodes(BB)) { 4982 ORE->emit(createMissedAnalysis("NoCFGForSelect", BB->getTerminator()) 4983 << "control flow cannot be substituted for a select"); 4984 return false; 4985 } 4986 } 4987 4988 // We can if-convert this loop. 4989 return true; 4990 } 4991 4992 bool LoopVectorizationLegality::canVectorize() { 4993 // We must have a loop in canonical form. Loops with indirectbr in them cannot 4994 // be canonicalized. 4995 if (!TheLoop->getLoopPreheader()) { 4996 ORE->emit(createMissedAnalysis("CFGNotUnderstood") 4997 << "loop control flow is not understood by vectorizer"); 4998 return false; 4999 } 5000 5001 // FIXME: The code is currently dead, since the loop gets sent to 5002 // LoopVectorizationLegality is already an innermost loop. 5003 // 5004 // We can only vectorize innermost loops. 5005 if (!TheLoop->empty()) { 5006 ORE->emit(createMissedAnalysis("NotInnermostLoop") 5007 << "loop is not the innermost loop"); 5008 return false; 5009 } 5010 5011 // We must have a single backedge. 5012 if (TheLoop->getNumBackEdges() != 1) { 5013 ORE->emit(createMissedAnalysis("CFGNotUnderstood") 5014 << "loop control flow is not understood by vectorizer"); 5015 return false; 5016 } 5017 5018 // We must have a single exiting block. 5019 if (!TheLoop->getExitingBlock()) { 5020 ORE->emit(createMissedAnalysis("CFGNotUnderstood") 5021 << "loop control flow is not understood by vectorizer"); 5022 return false; 5023 } 5024 5025 // We only handle bottom-tested loops, i.e. loop in which the condition is 5026 // checked at the end of each iteration. With that we can assume that all 5027 // instructions in the loop are executed the same number of times. 5028 if (TheLoop->getExitingBlock() != TheLoop->getLoopLatch()) { 5029 ORE->emit(createMissedAnalysis("CFGNotUnderstood") 5030 << "loop control flow is not understood by vectorizer"); 5031 return false; 5032 } 5033 5034 // We need to have a loop header. 5035 DEBUG(dbgs() << "LV: Found a loop: " << TheLoop->getHeader()->getName() 5036 << '\n'); 5037 5038 // Check if we can if-convert non-single-bb loops. 5039 unsigned NumBlocks = TheLoop->getNumBlocks(); 5040 if (NumBlocks != 1 && !canVectorizeWithIfConvert()) { 5041 DEBUG(dbgs() << "LV: Can't if-convert the loop.\n"); 5042 return false; 5043 } 5044 5045 // ScalarEvolution needs to be able to find the exit count. 5046 const SCEV *ExitCount = PSE.getBackedgeTakenCount(); 5047 if (ExitCount == PSE.getSE()->getCouldNotCompute()) { 5048 ORE->emit(createMissedAnalysis("CantComputeNumberOfIterations") 5049 << "could not determine number of loop iterations"); 5050 DEBUG(dbgs() << "LV: SCEV could not compute the loop exit count.\n"); 5051 return false; 5052 } 5053 5054 // Check if we can vectorize the instructions and CFG in this loop. 5055 if (!canVectorizeInstrs()) { 5056 DEBUG(dbgs() << "LV: Can't vectorize the instructions or CFG\n"); 5057 return false; 5058 } 5059 5060 // Go over each instruction and look at memory deps. 5061 if (!canVectorizeMemory()) { 5062 DEBUG(dbgs() << "LV: Can't vectorize due to memory conflicts\n"); 5063 return false; 5064 } 5065 5066 DEBUG(dbgs() << "LV: We can vectorize this loop" 5067 << (LAI->getRuntimePointerChecking()->Need 5068 ? " (with a runtime bound check)" 5069 : "") 5070 << "!\n"); 5071 5072 bool UseInterleaved = TTI->enableInterleavedAccessVectorization(); 5073 5074 // If an override option has been passed in for interleaved accesses, use it. 5075 if (EnableInterleavedMemAccesses.getNumOccurrences() > 0) 5076 UseInterleaved = EnableInterleavedMemAccesses; 5077 5078 // Analyze interleaved memory accesses. 5079 if (UseInterleaved) 5080 InterleaveInfo.analyzeInterleaving(*getSymbolicStrides()); 5081 5082 // Collect all instructions that are known to be uniform after vectorization. 5083 collectLoopUniforms(); 5084 5085 // Collect all instructions that are known to be scalar after vectorization. 5086 collectLoopScalars(); 5087 5088 unsigned SCEVThreshold = VectorizeSCEVCheckThreshold; 5089 if (Hints->getForce() == LoopVectorizeHints::FK_Enabled) 5090 SCEVThreshold = PragmaVectorizeSCEVCheckThreshold; 5091 5092 if (PSE.getUnionPredicate().getComplexity() > SCEVThreshold) { 5093 ORE->emit(createMissedAnalysis("TooManySCEVRunTimeChecks") 5094 << "Too many SCEV assumptions need to be made and checked " 5095 << "at runtime"); 5096 DEBUG(dbgs() << "LV: Too many SCEV checks needed.\n"); 5097 return false; 5098 } 5099 5100 // Okay! We can vectorize. At this point we don't have any other mem analysis 5101 // which may limit our maximum vectorization factor, so just return true with 5102 // no restrictions. 5103 return true; 5104 } 5105 5106 static Type *convertPointerToIntegerType(const DataLayout &DL, Type *Ty) { 5107 if (Ty->isPointerTy()) 5108 return DL.getIntPtrType(Ty); 5109 5110 // It is possible that char's or short's overflow when we ask for the loop's 5111 // trip count, work around this by changing the type size. 5112 if (Ty->getScalarSizeInBits() < 32) 5113 return Type::getInt32Ty(Ty->getContext()); 5114 5115 return Ty; 5116 } 5117 5118 static Type *getWiderType(const DataLayout &DL, Type *Ty0, Type *Ty1) { 5119 Ty0 = convertPointerToIntegerType(DL, Ty0); 5120 Ty1 = convertPointerToIntegerType(DL, Ty1); 5121 if (Ty0->getScalarSizeInBits() > Ty1->getScalarSizeInBits()) 5122 return Ty0; 5123 return Ty1; 5124 } 5125 5126 /// \brief Check that the instruction has outside loop users and is not an 5127 /// identified reduction variable. 5128 static bool hasOutsideLoopUser(const Loop *TheLoop, Instruction *Inst, 5129 SmallPtrSetImpl<Value *> &AllowedExit) { 5130 // Reduction and Induction instructions are allowed to have exit users. All 5131 // other instructions must not have external users. 5132 if (!AllowedExit.count(Inst)) 5133 // Check that all of the users of the loop are inside the BB. 5134 for (User *U : Inst->users()) { 5135 Instruction *UI = cast<Instruction>(U); 5136 // This user may be a reduction exit value. 5137 if (!TheLoop->contains(UI)) { 5138 DEBUG(dbgs() << "LV: Found an outside user for : " << *UI << '\n'); 5139 return true; 5140 } 5141 } 5142 return false; 5143 } 5144 5145 void LoopVectorizationLegality::addInductionPhi( 5146 PHINode *Phi, const InductionDescriptor &ID, 5147 SmallPtrSetImpl<Value *> &AllowedExit) { 5148 Inductions[Phi] = ID; 5149 Type *PhiTy = Phi->getType(); 5150 const DataLayout &DL = Phi->getModule()->getDataLayout(); 5151 5152 // Get the widest type. 5153 if (!PhiTy->isFloatingPointTy()) { 5154 if (!WidestIndTy) 5155 WidestIndTy = convertPointerToIntegerType(DL, PhiTy); 5156 else 5157 WidestIndTy = getWiderType(DL, PhiTy, WidestIndTy); 5158 } 5159 5160 // Int inductions are special because we only allow one IV. 5161 if (ID.getKind() == InductionDescriptor::IK_IntInduction && 5162 ID.getConstIntStepValue() && 5163 ID.getConstIntStepValue()->isOne() && 5164 isa<Constant>(ID.getStartValue()) && 5165 cast<Constant>(ID.getStartValue())->isNullValue()) { 5166 5167 // Use the phi node with the widest type as induction. Use the last 5168 // one if there are multiple (no good reason for doing this other 5169 // than it is expedient). We've checked that it begins at zero and 5170 // steps by one, so this is a canonical induction variable. 5171 if (!Induction || PhiTy == WidestIndTy) 5172 Induction = Phi; 5173 } 5174 5175 // Both the PHI node itself, and the "post-increment" value feeding 5176 // back into the PHI node may have external users. 5177 AllowedExit.insert(Phi); 5178 AllowedExit.insert(Phi->getIncomingValueForBlock(TheLoop->getLoopLatch())); 5179 5180 DEBUG(dbgs() << "LV: Found an induction variable.\n"); 5181 return; 5182 } 5183 5184 bool LoopVectorizationLegality::canVectorizeInstrs() { 5185 BasicBlock *Header = TheLoop->getHeader(); 5186 5187 // Look for the attribute signaling the absence of NaNs. 5188 Function &F = *Header->getParent(); 5189 HasFunNoNaNAttr = 5190 F.getFnAttribute("no-nans-fp-math").getValueAsString() == "true"; 5191 5192 // For each block in the loop. 5193 for (BasicBlock *BB : TheLoop->blocks()) { 5194 // Scan the instructions in the block and look for hazards. 5195 for (Instruction &I : *BB) { 5196 if (auto *Phi = dyn_cast<PHINode>(&I)) { 5197 Type *PhiTy = Phi->getType(); 5198 // Check that this PHI type is allowed. 5199 if (!PhiTy->isIntegerTy() && !PhiTy->isFloatingPointTy() && 5200 !PhiTy->isPointerTy()) { 5201 ORE->emit(createMissedAnalysis("CFGNotUnderstood", Phi) 5202 << "loop control flow is not understood by vectorizer"); 5203 DEBUG(dbgs() << "LV: Found an non-int non-pointer PHI.\n"); 5204 return false; 5205 } 5206 5207 // If this PHINode is not in the header block, then we know that we 5208 // can convert it to select during if-conversion. No need to check if 5209 // the PHIs in this block are induction or reduction variables. 5210 if (BB != Header) { 5211 // Check that this instruction has no outside users or is an 5212 // identified reduction value with an outside user. 5213 if (!hasOutsideLoopUser(TheLoop, Phi, AllowedExit)) 5214 continue; 5215 ORE->emit(createMissedAnalysis("NeitherInductionNorReduction", Phi) 5216 << "value could not be identified as " 5217 "an induction or reduction variable"); 5218 return false; 5219 } 5220 5221 // We only allow if-converted PHIs with exactly two incoming values. 5222 if (Phi->getNumIncomingValues() != 2) { 5223 ORE->emit(createMissedAnalysis("CFGNotUnderstood", Phi) 5224 << "control flow not understood by vectorizer"); 5225 DEBUG(dbgs() << "LV: Found an invalid PHI.\n"); 5226 return false; 5227 } 5228 5229 RecurrenceDescriptor RedDes; 5230 if (RecurrenceDescriptor::isReductionPHI(Phi, TheLoop, RedDes)) { 5231 if (RedDes.hasUnsafeAlgebra()) 5232 Requirements->addUnsafeAlgebraInst(RedDes.getUnsafeAlgebraInst()); 5233 AllowedExit.insert(RedDes.getLoopExitInstr()); 5234 Reductions[Phi] = RedDes; 5235 continue; 5236 } 5237 5238 InductionDescriptor ID; 5239 if (InductionDescriptor::isInductionPHI(Phi, TheLoop, PSE, ID)) { 5240 addInductionPhi(Phi, ID, AllowedExit); 5241 if (ID.hasUnsafeAlgebra() && !HasFunNoNaNAttr) 5242 Requirements->addUnsafeAlgebraInst(ID.getUnsafeAlgebraInst()); 5243 continue; 5244 } 5245 5246 if (RecurrenceDescriptor::isFirstOrderRecurrence(Phi, TheLoop, DT)) { 5247 FirstOrderRecurrences.insert(Phi); 5248 continue; 5249 } 5250 5251 // As a last resort, coerce the PHI to a AddRec expression 5252 // and re-try classifying it a an induction PHI. 5253 if (InductionDescriptor::isInductionPHI(Phi, TheLoop, PSE, ID, true)) { 5254 addInductionPhi(Phi, ID, AllowedExit); 5255 continue; 5256 } 5257 5258 ORE->emit(createMissedAnalysis("NonReductionValueUsedOutsideLoop", Phi) 5259 << "value that could not be identified as " 5260 "reduction is used outside the loop"); 5261 DEBUG(dbgs() << "LV: Found an unidentified PHI." << *Phi << "\n"); 5262 return false; 5263 } // end of PHI handling 5264 5265 // We handle calls that: 5266 // * Are debug info intrinsics. 5267 // * Have a mapping to an IR intrinsic. 5268 // * Have a vector version available. 5269 auto *CI = dyn_cast<CallInst>(&I); 5270 if (CI && !getVectorIntrinsicIDForCall(CI, TLI) && 5271 !isa<DbgInfoIntrinsic>(CI) && 5272 !(CI->getCalledFunction() && TLI && 5273 TLI->isFunctionVectorizable(CI->getCalledFunction()->getName()))) { 5274 ORE->emit(createMissedAnalysis("CantVectorizeCall", CI) 5275 << "call instruction cannot be vectorized"); 5276 DEBUG(dbgs() << "LV: Found a non-intrinsic, non-libfunc callsite.\n"); 5277 return false; 5278 } 5279 5280 // Intrinsics such as powi,cttz and ctlz are legal to vectorize if the 5281 // second argument is the same (i.e. loop invariant) 5282 if (CI && hasVectorInstrinsicScalarOpd( 5283 getVectorIntrinsicIDForCall(CI, TLI), 1)) { 5284 auto *SE = PSE.getSE(); 5285 if (!SE->isLoopInvariant(PSE.getSCEV(CI->getOperand(1)), TheLoop)) { 5286 ORE->emit(createMissedAnalysis("CantVectorizeIntrinsic", CI) 5287 << "intrinsic instruction cannot be vectorized"); 5288 DEBUG(dbgs() << "LV: Found unvectorizable intrinsic " << *CI << "\n"); 5289 return false; 5290 } 5291 } 5292 5293 // Check that the instruction return type is vectorizable. 5294 // Also, we can't vectorize extractelement instructions. 5295 if ((!VectorType::isValidElementType(I.getType()) && 5296 !I.getType()->isVoidTy()) || 5297 isa<ExtractElementInst>(I)) { 5298 ORE->emit(createMissedAnalysis("CantVectorizeInstructionReturnType", &I) 5299 << "instruction return type cannot be vectorized"); 5300 DEBUG(dbgs() << "LV: Found unvectorizable type.\n"); 5301 return false; 5302 } 5303 5304 // Check that the stored type is vectorizable. 5305 if (auto *ST = dyn_cast<StoreInst>(&I)) { 5306 Type *T = ST->getValueOperand()->getType(); 5307 if (!VectorType::isValidElementType(T)) { 5308 ORE->emit(createMissedAnalysis("CantVectorizeStore", ST) 5309 << "store instruction cannot be vectorized"); 5310 return false; 5311 } 5312 5313 // FP instructions can allow unsafe algebra, thus vectorizable by 5314 // non-IEEE-754 compliant SIMD units. 5315 // This applies to floating-point math operations and calls, not memory 5316 // operations, shuffles, or casts, as they don't change precision or 5317 // semantics. 5318 } else if (I.getType()->isFloatingPointTy() && (CI || I.isBinaryOp()) && 5319 !I.hasUnsafeAlgebra()) { 5320 DEBUG(dbgs() << "LV: Found FP op with unsafe algebra.\n"); 5321 Hints->setPotentiallyUnsafe(); 5322 } 5323 5324 // Reduction instructions are allowed to have exit users. 5325 // All other instructions must not have external users. 5326 if (hasOutsideLoopUser(TheLoop, &I, AllowedExit)) { 5327 ORE->emit(createMissedAnalysis("ValueUsedOutsideLoop", &I) 5328 << "value cannot be used outside the loop"); 5329 return false; 5330 } 5331 5332 } // next instr. 5333 } 5334 5335 if (!Induction) { 5336 DEBUG(dbgs() << "LV: Did not find one integer induction var.\n"); 5337 if (Inductions.empty()) { 5338 ORE->emit(createMissedAnalysis("NoInductionVariable") 5339 << "loop induction variable could not be identified"); 5340 return false; 5341 } 5342 } 5343 5344 // Now we know the widest induction type, check if our found induction 5345 // is the same size. If it's not, unset it here and InnerLoopVectorizer 5346 // will create another. 5347 if (Induction && WidestIndTy != Induction->getType()) 5348 Induction = nullptr; 5349 5350 return true; 5351 } 5352 5353 void LoopVectorizationLegality::collectLoopScalars() { 5354 5355 // If an instruction is uniform after vectorization, it will remain scalar. 5356 Scalars.insert(Uniforms.begin(), Uniforms.end()); 5357 5358 // Collect the getelementptr instructions that will not be vectorized. A 5359 // getelementptr instruction is only vectorized if it is used for a legal 5360 // gather or scatter operation. 5361 for (auto *BB : TheLoop->blocks()) 5362 for (auto &I : *BB) { 5363 if (auto *GEP = dyn_cast<GetElementPtrInst>(&I)) { 5364 Scalars.insert(GEP); 5365 continue; 5366 } 5367 auto *Ptr = getPointerOperand(&I); 5368 if (!Ptr) 5369 continue; 5370 auto *GEP = getGEPInstruction(Ptr); 5371 if (GEP && isLegalGatherOrScatter(&I)) 5372 Scalars.erase(GEP); 5373 } 5374 5375 // An induction variable will remain scalar if all users of the induction 5376 // variable and induction variable update remain scalar. 5377 auto *Latch = TheLoop->getLoopLatch(); 5378 for (auto &Induction : *getInductionVars()) { 5379 auto *Ind = Induction.first; 5380 auto *IndUpdate = cast<Instruction>(Ind->getIncomingValueForBlock(Latch)); 5381 5382 // Determine if all users of the induction variable are scalar after 5383 // vectorization. 5384 auto ScalarInd = all_of(Ind->users(), [&](User *U) -> bool { 5385 auto *I = cast<Instruction>(U); 5386 return I == IndUpdate || !TheLoop->contains(I) || Scalars.count(I); 5387 }); 5388 if (!ScalarInd) 5389 continue; 5390 5391 // Determine if all users of the induction variable update instruction are 5392 // scalar after vectorization. 5393 auto ScalarIndUpdate = all_of(IndUpdate->users(), [&](User *U) -> bool { 5394 auto *I = cast<Instruction>(U); 5395 return I == Ind || !TheLoop->contains(I) || Scalars.count(I); 5396 }); 5397 if (!ScalarIndUpdate) 5398 continue; 5399 5400 // The induction variable and its update instruction will remain scalar. 5401 Scalars.insert(Ind); 5402 Scalars.insert(IndUpdate); 5403 } 5404 } 5405 5406 bool LoopVectorizationLegality::hasConsecutiveLikePtrOperand(Instruction *I) { 5407 if (isAccessInterleaved(I)) 5408 return true; 5409 if (auto *Ptr = getPointerOperand(I)) 5410 return isConsecutivePtr(Ptr); 5411 return false; 5412 } 5413 5414 bool LoopVectorizationLegality::isScalarWithPredication(Instruction *I) { 5415 if (!blockNeedsPredication(I->getParent())) 5416 return false; 5417 switch(I->getOpcode()) { 5418 default: 5419 break; 5420 case Instruction::Store: 5421 return !isMaskRequired(I); 5422 case Instruction::UDiv: 5423 case Instruction::SDiv: 5424 case Instruction::SRem: 5425 case Instruction::URem: 5426 return mayDivideByZero(*I); 5427 } 5428 return false; 5429 } 5430 5431 bool LoopVectorizationLegality::memoryInstructionMustBeScalarized( 5432 Instruction *I, unsigned VF) { 5433 5434 // If the memory instruction is in an interleaved group, it will be 5435 // vectorized and its pointer will remain uniform. 5436 if (isAccessInterleaved(I)) 5437 return false; 5438 5439 // Get and ensure we have a valid memory instruction. 5440 LoadInst *LI = dyn_cast<LoadInst>(I); 5441 StoreInst *SI = dyn_cast<StoreInst>(I); 5442 assert((LI || SI) && "Invalid memory instruction"); 5443 5444 // If the pointer operand is uniform (loop invariant), the memory instruction 5445 // will be scalarized. 5446 auto *Ptr = getPointerOperand(I); 5447 if (LI && isUniform(Ptr)) 5448 return true; 5449 5450 // If the pointer operand is non-consecutive and neither a gather nor a 5451 // scatter operation is legal, the memory instruction will be scalarized. 5452 if (!isConsecutivePtr(Ptr) && !isLegalGatherOrScatter(I)) 5453 return true; 5454 5455 // If the instruction is a store located in a predicated block, it will be 5456 // scalarized. 5457 if (isScalarWithPredication(I)) 5458 return true; 5459 5460 // If the instruction's allocated size doesn't equal it's type size, it 5461 // requires padding and will be scalarized. 5462 auto &DL = I->getModule()->getDataLayout(); 5463 auto *ScalarTy = LI ? LI->getType() : SI->getValueOperand()->getType(); 5464 if (hasIrregularType(ScalarTy, DL, VF)) 5465 return true; 5466 5467 // Otherwise, the memory instruction should be vectorized if the rest of the 5468 // loop is. 5469 return false; 5470 } 5471 5472 void LoopVectorizationLegality::collectLoopUniforms() { 5473 // We now know that the loop is vectorizable! 5474 // Collect instructions inside the loop that will remain uniform after 5475 // vectorization. 5476 5477 // Global values, params and instructions outside of current loop are out of 5478 // scope. 5479 auto isOutOfScope = [&](Value *V) -> bool { 5480 Instruction *I = dyn_cast<Instruction>(V); 5481 return (!I || !TheLoop->contains(I)); 5482 }; 5483 5484 SetVector<Instruction *> Worklist; 5485 BasicBlock *Latch = TheLoop->getLoopLatch(); 5486 5487 // Start with the conditional branch. If the branch condition is an 5488 // instruction contained in the loop that is only used by the branch, it is 5489 // uniform. 5490 auto *Cmp = dyn_cast<Instruction>(Latch->getTerminator()->getOperand(0)); 5491 if (Cmp && TheLoop->contains(Cmp) && Cmp->hasOneUse()) { 5492 Worklist.insert(Cmp); 5493 DEBUG(dbgs() << "LV: Found uniform instruction: " << *Cmp << "\n"); 5494 } 5495 5496 // Holds consecutive and consecutive-like pointers. Consecutive-like pointers 5497 // are pointers that are treated like consecutive pointers during 5498 // vectorization. The pointer operands of interleaved accesses are an 5499 // example. 5500 SmallSetVector<Instruction *, 8> ConsecutiveLikePtrs; 5501 5502 // Holds pointer operands of instructions that are possibly non-uniform. 5503 SmallPtrSet<Instruction *, 8> PossibleNonUniformPtrs; 5504 5505 // Iterate over the instructions in the loop, and collect all 5506 // consecutive-like pointer operands in ConsecutiveLikePtrs. If it's possible 5507 // that a consecutive-like pointer operand will be scalarized, we collect it 5508 // in PossibleNonUniformPtrs instead. We use two sets here because a single 5509 // getelementptr instruction can be used by both vectorized and scalarized 5510 // memory instructions. For example, if a loop loads and stores from the same 5511 // location, but the store is conditional, the store will be scalarized, and 5512 // the getelementptr won't remain uniform. 5513 for (auto *BB : TheLoop->blocks()) 5514 for (auto &I : *BB) { 5515 5516 // If there's no pointer operand, there's nothing to do. 5517 auto *Ptr = dyn_cast_or_null<Instruction>(getPointerOperand(&I)); 5518 if (!Ptr) 5519 continue; 5520 5521 // True if all users of Ptr are memory accesses that have Ptr as their 5522 // pointer operand. 5523 auto UsersAreMemAccesses = all_of(Ptr->users(), [&](User *U) -> bool { 5524 return getPointerOperand(U) == Ptr; 5525 }); 5526 5527 // Ensure the memory instruction will not be scalarized, making its 5528 // pointer operand non-uniform. If the pointer operand is used by some 5529 // instruction other than a memory access, we're not going to check if 5530 // that other instruction may be scalarized here. Thus, conservatively 5531 // assume the pointer operand may be non-uniform. 5532 if (!UsersAreMemAccesses || memoryInstructionMustBeScalarized(&I)) 5533 PossibleNonUniformPtrs.insert(Ptr); 5534 5535 // If the memory instruction will be vectorized and its pointer operand 5536 // is consecutive-like, the pointer operand should remain uniform. 5537 else if (hasConsecutiveLikePtrOperand(&I)) 5538 ConsecutiveLikePtrs.insert(Ptr); 5539 } 5540 5541 // Add to the Worklist all consecutive and consecutive-like pointers that 5542 // aren't also identified as possibly non-uniform. 5543 for (auto *V : ConsecutiveLikePtrs) 5544 if (!PossibleNonUniformPtrs.count(V)) { 5545 DEBUG(dbgs() << "LV: Found uniform instruction: " << *V << "\n"); 5546 Worklist.insert(V); 5547 } 5548 5549 // Expand Worklist in topological order: whenever a new instruction 5550 // is added , its users should be either already inside Worklist, or 5551 // out of scope. It ensures a uniform instruction will only be used 5552 // by uniform instructions or out of scope instructions. 5553 unsigned idx = 0; 5554 while (idx != Worklist.size()) { 5555 Instruction *I = Worklist[idx++]; 5556 5557 for (auto OV : I->operand_values()) { 5558 if (isOutOfScope(OV)) 5559 continue; 5560 auto *OI = cast<Instruction>(OV); 5561 if (all_of(OI->users(), [&](User *U) -> bool { 5562 return isOutOfScope(U) || Worklist.count(cast<Instruction>(U)); 5563 })) { 5564 Worklist.insert(OI); 5565 DEBUG(dbgs() << "LV: Found uniform instruction: " << *OI << "\n"); 5566 } 5567 } 5568 } 5569 5570 // Returns true if Ptr is the pointer operand of a memory access instruction 5571 // I, and I is known to not require scalarization. 5572 auto isVectorizedMemAccessUse = [&](Instruction *I, Value *Ptr) -> bool { 5573 return getPointerOperand(I) == Ptr && !memoryInstructionMustBeScalarized(I); 5574 }; 5575 5576 // For an instruction to be added into Worklist above, all its users inside 5577 // the loop should also be in Worklist. However, this condition cannot be 5578 // true for phi nodes that form a cyclic dependence. We must process phi 5579 // nodes separately. An induction variable will remain uniform if all users 5580 // of the induction variable and induction variable update remain uniform. 5581 // The code below handles both pointer and non-pointer induction variables. 5582 for (auto &Induction : Inductions) { 5583 auto *Ind = Induction.first; 5584 auto *IndUpdate = cast<Instruction>(Ind->getIncomingValueForBlock(Latch)); 5585 5586 // Determine if all users of the induction variable are uniform after 5587 // vectorization. 5588 auto UniformInd = all_of(Ind->users(), [&](User *U) -> bool { 5589 auto *I = cast<Instruction>(U); 5590 return I == IndUpdate || !TheLoop->contains(I) || Worklist.count(I) || 5591 isVectorizedMemAccessUse(I, Ind); 5592 }); 5593 if (!UniformInd) 5594 continue; 5595 5596 // Determine if all users of the induction variable update instruction are 5597 // uniform after vectorization. 5598 auto UniformIndUpdate = all_of(IndUpdate->users(), [&](User *U) -> bool { 5599 auto *I = cast<Instruction>(U); 5600 return I == Ind || !TheLoop->contains(I) || Worklist.count(I) || 5601 isVectorizedMemAccessUse(I, IndUpdate); 5602 }); 5603 if (!UniformIndUpdate) 5604 continue; 5605 5606 // The induction variable and its update instruction will remain uniform. 5607 Worklist.insert(Ind); 5608 Worklist.insert(IndUpdate); 5609 DEBUG(dbgs() << "LV: Found uniform instruction: " << *Ind << "\n"); 5610 DEBUG(dbgs() << "LV: Found uniform instruction: " << *IndUpdate << "\n"); 5611 } 5612 5613 Uniforms.insert(Worklist.begin(), Worklist.end()); 5614 } 5615 5616 bool LoopVectorizationLegality::canVectorizeMemory() { 5617 LAI = &(*GetLAA)(*TheLoop); 5618 InterleaveInfo.setLAI(LAI); 5619 const OptimizationRemarkAnalysis *LAR = LAI->getReport(); 5620 if (LAR) { 5621 OptimizationRemarkAnalysis VR(Hints->vectorizeAnalysisPassName(), 5622 "loop not vectorized: ", *LAR); 5623 ORE->emit(VR); 5624 } 5625 if (!LAI->canVectorizeMemory()) 5626 return false; 5627 5628 if (LAI->hasStoreToLoopInvariantAddress()) { 5629 ORE->emit(createMissedAnalysis("CantVectorizeStoreToLoopInvariantAddress") 5630 << "write to a loop invariant address could not be vectorized"); 5631 DEBUG(dbgs() << "LV: We don't allow storing to uniform addresses\n"); 5632 return false; 5633 } 5634 5635 Requirements->addRuntimePointerChecks(LAI->getNumRuntimePointerChecks()); 5636 PSE.addPredicate(LAI->getPSE().getUnionPredicate()); 5637 5638 return true; 5639 } 5640 5641 bool LoopVectorizationLegality::isInductionVariable(const Value *V) { 5642 Value *In0 = const_cast<Value *>(V); 5643 PHINode *PN = dyn_cast_or_null<PHINode>(In0); 5644 if (!PN) 5645 return false; 5646 5647 return Inductions.count(PN); 5648 } 5649 5650 bool LoopVectorizationLegality::isFirstOrderRecurrence(const PHINode *Phi) { 5651 return FirstOrderRecurrences.count(Phi); 5652 } 5653 5654 bool LoopVectorizationLegality::blockNeedsPredication(BasicBlock *BB) { 5655 return LoopAccessInfo::blockNeedsPredication(BB, TheLoop, DT); 5656 } 5657 5658 bool LoopVectorizationLegality::blockCanBePredicated( 5659 BasicBlock *BB, SmallPtrSetImpl<Value *> &SafePtrs) { 5660 const bool IsAnnotatedParallel = TheLoop->isAnnotatedParallel(); 5661 5662 for (Instruction &I : *BB) { 5663 // Check that we don't have a constant expression that can trap as operand. 5664 for (Value *Operand : I.operands()) { 5665 if (auto *C = dyn_cast<Constant>(Operand)) 5666 if (C->canTrap()) 5667 return false; 5668 } 5669 // We might be able to hoist the load. 5670 if (I.mayReadFromMemory()) { 5671 auto *LI = dyn_cast<LoadInst>(&I); 5672 if (!LI) 5673 return false; 5674 if (!SafePtrs.count(LI->getPointerOperand())) { 5675 if (isLegalMaskedLoad(LI->getType(), LI->getPointerOperand()) || 5676 isLegalMaskedGather(LI->getType())) { 5677 MaskedOp.insert(LI); 5678 continue; 5679 } 5680 // !llvm.mem.parallel_loop_access implies if-conversion safety. 5681 if (IsAnnotatedParallel) 5682 continue; 5683 return false; 5684 } 5685 } 5686 5687 if (I.mayWriteToMemory()) { 5688 auto *SI = dyn_cast<StoreInst>(&I); 5689 // We only support predication of stores in basic blocks with one 5690 // predecessor. 5691 if (!SI) 5692 return false; 5693 5694 // Build a masked store if it is legal for the target. 5695 if (isLegalMaskedStore(SI->getValueOperand()->getType(), 5696 SI->getPointerOperand()) || 5697 isLegalMaskedScatter(SI->getValueOperand()->getType())) { 5698 MaskedOp.insert(SI); 5699 continue; 5700 } 5701 5702 bool isSafePtr = (SafePtrs.count(SI->getPointerOperand()) != 0); 5703 bool isSinglePredecessor = SI->getParent()->getSinglePredecessor(); 5704 5705 if (++NumPredStores > NumberOfStoresToPredicate || !isSafePtr || 5706 !isSinglePredecessor) 5707 return false; 5708 } 5709 if (I.mayThrow()) 5710 return false; 5711 } 5712 5713 return true; 5714 } 5715 5716 void InterleavedAccessInfo::collectConstStrideAccesses( 5717 MapVector<Instruction *, StrideDescriptor> &AccessStrideInfo, 5718 const ValueToValueMap &Strides) { 5719 5720 auto &DL = TheLoop->getHeader()->getModule()->getDataLayout(); 5721 5722 // Since it's desired that the load/store instructions be maintained in 5723 // "program order" for the interleaved access analysis, we have to visit the 5724 // blocks in the loop in reverse postorder (i.e., in a topological order). 5725 // Such an ordering will ensure that any load/store that may be executed 5726 // before a second load/store will precede the second load/store in 5727 // AccessStrideInfo. 5728 LoopBlocksDFS DFS(TheLoop); 5729 DFS.perform(LI); 5730 for (BasicBlock *BB : make_range(DFS.beginRPO(), DFS.endRPO())) 5731 for (auto &I : *BB) { 5732 auto *LI = dyn_cast<LoadInst>(&I); 5733 auto *SI = dyn_cast<StoreInst>(&I); 5734 if (!LI && !SI) 5735 continue; 5736 5737 Value *Ptr = getPointerOperand(&I); 5738 // We don't check wrapping here because we don't know yet if Ptr will be 5739 // part of a full group or a group with gaps. Checking wrapping for all 5740 // pointers (even those that end up in groups with no gaps) will be overly 5741 // conservative. For full groups, wrapping should be ok since if we would 5742 // wrap around the address space we would do a memory access at nullptr 5743 // even without the transformation. The wrapping checks are therefore 5744 // deferred until after we've formed the interleaved groups. 5745 int64_t Stride = getPtrStride(PSE, Ptr, TheLoop, Strides, 5746 /*Assume=*/true, /*ShouldCheckWrap=*/false); 5747 5748 const SCEV *Scev = replaceSymbolicStrideSCEV(PSE, Strides, Ptr); 5749 PointerType *PtrTy = dyn_cast<PointerType>(Ptr->getType()); 5750 uint64_t Size = DL.getTypeAllocSize(PtrTy->getElementType()); 5751 5752 // An alignment of 0 means target ABI alignment. 5753 unsigned Align = LI ? LI->getAlignment() : SI->getAlignment(); 5754 if (!Align) 5755 Align = DL.getABITypeAlignment(PtrTy->getElementType()); 5756 5757 AccessStrideInfo[&I] = StrideDescriptor(Stride, Scev, Size, Align); 5758 } 5759 } 5760 5761 // Analyze interleaved accesses and collect them into interleaved load and 5762 // store groups. 5763 // 5764 // When generating code for an interleaved load group, we effectively hoist all 5765 // loads in the group to the location of the first load in program order. When 5766 // generating code for an interleaved store group, we sink all stores to the 5767 // location of the last store. This code motion can change the order of load 5768 // and store instructions and may break dependences. 5769 // 5770 // The code generation strategy mentioned above ensures that we won't violate 5771 // any write-after-read (WAR) dependences. 5772 // 5773 // E.g., for the WAR dependence: a = A[i]; // (1) 5774 // A[i] = b; // (2) 5775 // 5776 // The store group of (2) is always inserted at or below (2), and the load 5777 // group of (1) is always inserted at or above (1). Thus, the instructions will 5778 // never be reordered. All other dependences are checked to ensure the 5779 // correctness of the instruction reordering. 5780 // 5781 // The algorithm visits all memory accesses in the loop in bottom-up program 5782 // order. Program order is established by traversing the blocks in the loop in 5783 // reverse postorder when collecting the accesses. 5784 // 5785 // We visit the memory accesses in bottom-up order because it can simplify the 5786 // construction of store groups in the presence of write-after-write (WAW) 5787 // dependences. 5788 // 5789 // E.g., for the WAW dependence: A[i] = a; // (1) 5790 // A[i] = b; // (2) 5791 // A[i + 1] = c; // (3) 5792 // 5793 // We will first create a store group with (3) and (2). (1) can't be added to 5794 // this group because it and (2) are dependent. However, (1) can be grouped 5795 // with other accesses that may precede it in program order. Note that a 5796 // bottom-up order does not imply that WAW dependences should not be checked. 5797 void InterleavedAccessInfo::analyzeInterleaving( 5798 const ValueToValueMap &Strides) { 5799 DEBUG(dbgs() << "LV: Analyzing interleaved accesses...\n"); 5800 5801 // Holds all accesses with a constant stride. 5802 MapVector<Instruction *, StrideDescriptor> AccessStrideInfo; 5803 collectConstStrideAccesses(AccessStrideInfo, Strides); 5804 5805 if (AccessStrideInfo.empty()) 5806 return; 5807 5808 // Collect the dependences in the loop. 5809 collectDependences(); 5810 5811 // Holds all interleaved store groups temporarily. 5812 SmallSetVector<InterleaveGroup *, 4> StoreGroups; 5813 // Holds all interleaved load groups temporarily. 5814 SmallSetVector<InterleaveGroup *, 4> LoadGroups; 5815 5816 // Search in bottom-up program order for pairs of accesses (A and B) that can 5817 // form interleaved load or store groups. In the algorithm below, access A 5818 // precedes access B in program order. We initialize a group for B in the 5819 // outer loop of the algorithm, and then in the inner loop, we attempt to 5820 // insert each A into B's group if: 5821 // 5822 // 1. A and B have the same stride, 5823 // 2. A and B have the same memory object size, and 5824 // 3. A belongs in B's group according to its distance from B. 5825 // 5826 // Special care is taken to ensure group formation will not break any 5827 // dependences. 5828 for (auto BI = AccessStrideInfo.rbegin(), E = AccessStrideInfo.rend(); 5829 BI != E; ++BI) { 5830 Instruction *B = BI->first; 5831 StrideDescriptor DesB = BI->second; 5832 5833 // Initialize a group for B if it has an allowable stride. Even if we don't 5834 // create a group for B, we continue with the bottom-up algorithm to ensure 5835 // we don't break any of B's dependences. 5836 InterleaveGroup *Group = nullptr; 5837 if (isStrided(DesB.Stride)) { 5838 Group = getInterleaveGroup(B); 5839 if (!Group) { 5840 DEBUG(dbgs() << "LV: Creating an interleave group with:" << *B << '\n'); 5841 Group = createInterleaveGroup(B, DesB.Stride, DesB.Align); 5842 } 5843 if (B->mayWriteToMemory()) 5844 StoreGroups.insert(Group); 5845 else 5846 LoadGroups.insert(Group); 5847 } 5848 5849 for (auto AI = std::next(BI); AI != E; ++AI) { 5850 Instruction *A = AI->first; 5851 StrideDescriptor DesA = AI->second; 5852 5853 // Our code motion strategy implies that we can't have dependences 5854 // between accesses in an interleaved group and other accesses located 5855 // between the first and last member of the group. Note that this also 5856 // means that a group can't have more than one member at a given offset. 5857 // The accesses in a group can have dependences with other accesses, but 5858 // we must ensure we don't extend the boundaries of the group such that 5859 // we encompass those dependent accesses. 5860 // 5861 // For example, assume we have the sequence of accesses shown below in a 5862 // stride-2 loop: 5863 // 5864 // (1, 2) is a group | A[i] = a; // (1) 5865 // | A[i-1] = b; // (2) | 5866 // A[i-3] = c; // (3) 5867 // A[i] = d; // (4) | (2, 4) is not a group 5868 // 5869 // Because accesses (2) and (3) are dependent, we can group (2) with (1) 5870 // but not with (4). If we did, the dependent access (3) would be within 5871 // the boundaries of the (2, 4) group. 5872 if (!canReorderMemAccessesForInterleavedGroups(&*AI, &*BI)) { 5873 5874 // If a dependence exists and A is already in a group, we know that A 5875 // must be a store since A precedes B and WAR dependences are allowed. 5876 // Thus, A would be sunk below B. We release A's group to prevent this 5877 // illegal code motion. A will then be free to form another group with 5878 // instructions that precede it. 5879 if (isInterleaved(A)) { 5880 InterleaveGroup *StoreGroup = getInterleaveGroup(A); 5881 StoreGroups.remove(StoreGroup); 5882 releaseGroup(StoreGroup); 5883 } 5884 5885 // If a dependence exists and A is not already in a group (or it was 5886 // and we just released it), B might be hoisted above A (if B is a 5887 // load) or another store might be sunk below A (if B is a store). In 5888 // either case, we can't add additional instructions to B's group. B 5889 // will only form a group with instructions that it precedes. 5890 break; 5891 } 5892 5893 // At this point, we've checked for illegal code motion. If either A or B 5894 // isn't strided, there's nothing left to do. 5895 if (!isStrided(DesA.Stride) || !isStrided(DesB.Stride)) 5896 continue; 5897 5898 // Ignore A if it's already in a group or isn't the same kind of memory 5899 // operation as B. 5900 if (isInterleaved(A) || A->mayReadFromMemory() != B->mayReadFromMemory()) 5901 continue; 5902 5903 // Check rules 1 and 2. Ignore A if its stride or size is different from 5904 // that of B. 5905 if (DesA.Stride != DesB.Stride || DesA.Size != DesB.Size) 5906 continue; 5907 5908 // Calculate the distance from A to B. 5909 const SCEVConstant *DistToB = dyn_cast<SCEVConstant>( 5910 PSE.getSE()->getMinusSCEV(DesA.Scev, DesB.Scev)); 5911 if (!DistToB) 5912 continue; 5913 int64_t DistanceToB = DistToB->getAPInt().getSExtValue(); 5914 5915 // Check rule 3. Ignore A if its distance to B is not a multiple of the 5916 // size. 5917 if (DistanceToB % static_cast<int64_t>(DesB.Size)) 5918 continue; 5919 5920 // Ignore A if either A or B is in a predicated block. Although we 5921 // currently prevent group formation for predicated accesses, we may be 5922 // able to relax this limitation in the future once we handle more 5923 // complicated blocks. 5924 if (isPredicated(A->getParent()) || isPredicated(B->getParent())) 5925 continue; 5926 5927 // The index of A is the index of B plus A's distance to B in multiples 5928 // of the size. 5929 int IndexA = 5930 Group->getIndex(B) + DistanceToB / static_cast<int64_t>(DesB.Size); 5931 5932 // Try to insert A into B's group. 5933 if (Group->insertMember(A, IndexA, DesA.Align)) { 5934 DEBUG(dbgs() << "LV: Inserted:" << *A << '\n' 5935 << " into the interleave group with" << *B << '\n'); 5936 InterleaveGroupMap[A] = Group; 5937 5938 // Set the first load in program order as the insert position. 5939 if (A->mayReadFromMemory()) 5940 Group->setInsertPos(A); 5941 } 5942 } // Iteration over A accesses. 5943 } // Iteration over B accesses. 5944 5945 // Remove interleaved store groups with gaps. 5946 for (InterleaveGroup *Group : StoreGroups) 5947 if (Group->getNumMembers() != Group->getFactor()) 5948 releaseGroup(Group); 5949 5950 // Remove interleaved groups with gaps (currently only loads) whose memory 5951 // accesses may wrap around. We have to revisit the getPtrStride analysis, 5952 // this time with ShouldCheckWrap=true, since collectConstStrideAccesses does 5953 // not check wrapping (see documentation there). 5954 // FORNOW we use Assume=false; 5955 // TODO: Change to Assume=true but making sure we don't exceed the threshold 5956 // of runtime SCEV assumptions checks (thereby potentially failing to 5957 // vectorize altogether). 5958 // Additional optional optimizations: 5959 // TODO: If we are peeling the loop and we know that the first pointer doesn't 5960 // wrap then we can deduce that all pointers in the group don't wrap. 5961 // This means that we can forcefully peel the loop in order to only have to 5962 // check the first pointer for no-wrap. When we'll change to use Assume=true 5963 // we'll only need at most one runtime check per interleaved group. 5964 // 5965 for (InterleaveGroup *Group : LoadGroups) { 5966 5967 // Case 1: A full group. Can Skip the checks; For full groups, if the wide 5968 // load would wrap around the address space we would do a memory access at 5969 // nullptr even without the transformation. 5970 if (Group->getNumMembers() == Group->getFactor()) 5971 continue; 5972 5973 // Case 2: If first and last members of the group don't wrap this implies 5974 // that all the pointers in the group don't wrap. 5975 // So we check only group member 0 (which is always guaranteed to exist), 5976 // and group member Factor - 1; If the latter doesn't exist we rely on 5977 // peeling (if it is a non-reveresed accsess -- see Case 3). 5978 Value *FirstMemberPtr = getPointerOperand(Group->getMember(0)); 5979 if (!getPtrStride(PSE, FirstMemberPtr, TheLoop, Strides, /*Assume=*/false, 5980 /*ShouldCheckWrap=*/true)) { 5981 DEBUG(dbgs() << "LV: Invalidate candidate interleaved group due to " 5982 "first group member potentially pointer-wrapping.\n"); 5983 releaseGroup(Group); 5984 continue; 5985 } 5986 Instruction *LastMember = Group->getMember(Group->getFactor() - 1); 5987 if (LastMember) { 5988 Value *LastMemberPtr = getPointerOperand(LastMember); 5989 if (!getPtrStride(PSE, LastMemberPtr, TheLoop, Strides, /*Assume=*/false, 5990 /*ShouldCheckWrap=*/true)) { 5991 DEBUG(dbgs() << "LV: Invalidate candidate interleaved group due to " 5992 "last group member potentially pointer-wrapping.\n"); 5993 releaseGroup(Group); 5994 } 5995 } 5996 else { 5997 // Case 3: A non-reversed interleaved load group with gaps: We need 5998 // to execute at least one scalar epilogue iteration. This will ensure 5999 // we don't speculatively access memory out-of-bounds. We only need 6000 // to look for a member at index factor - 1, since every group must have 6001 // a member at index zero. 6002 if (Group->isReverse()) { 6003 releaseGroup(Group); 6004 continue; 6005 } 6006 DEBUG(dbgs() << "LV: Interleaved group requires epilogue iteration.\n"); 6007 RequiresScalarEpilogue = true; 6008 } 6009 } 6010 } 6011 6012 LoopVectorizationCostModel::VectorizationFactor 6013 LoopVectorizationCostModel::selectVectorizationFactor(bool OptForSize) { 6014 // Width 1 means no vectorize 6015 VectorizationFactor Factor = {1U, 0U}; 6016 if (OptForSize && Legal->getRuntimePointerChecking()->Need) { 6017 ORE->emit(createMissedAnalysis("CantVersionLoopWithOptForSize") 6018 << "runtime pointer checks needed. Enable vectorization of this " 6019 "loop with '#pragma clang loop vectorize(enable)' when " 6020 "compiling with -Os/-Oz"); 6021 DEBUG(dbgs() 6022 << "LV: Aborting. Runtime ptr check is required with -Os/-Oz.\n"); 6023 return Factor; 6024 } 6025 6026 if (!EnableCondStoresVectorization && Legal->getNumPredStores()) { 6027 ORE->emit(createMissedAnalysis("ConditionalStore") 6028 << "store that is conditionally executed prevents vectorization"); 6029 DEBUG(dbgs() << "LV: No vectorization. There are conditional stores.\n"); 6030 return Factor; 6031 } 6032 6033 // Find the trip count. 6034 unsigned TC = PSE.getSE()->getSmallConstantTripCount(TheLoop); 6035 DEBUG(dbgs() << "LV: Found trip count: " << TC << '\n'); 6036 6037 MinBWs = computeMinimumValueSizes(TheLoop->getBlocks(), *DB, &TTI); 6038 unsigned SmallestType, WidestType; 6039 std::tie(SmallestType, WidestType) = getSmallestAndWidestTypes(); 6040 unsigned WidestRegister = TTI.getRegisterBitWidth(true); 6041 unsigned MaxSafeDepDist = -1U; 6042 6043 // Get the maximum safe dependence distance in bits computed by LAA. If the 6044 // loop contains any interleaved accesses, we divide the dependence distance 6045 // by the maximum interleave factor of all interleaved groups. Note that 6046 // although the division ensures correctness, this is a fairly conservative 6047 // computation because the maximum distance computed by LAA may not involve 6048 // any of the interleaved accesses. 6049 if (Legal->getMaxSafeDepDistBytes() != -1U) 6050 MaxSafeDepDist = 6051 Legal->getMaxSafeDepDistBytes() * 8 / Legal->getMaxInterleaveFactor(); 6052 6053 WidestRegister = 6054 ((WidestRegister < MaxSafeDepDist) ? WidestRegister : MaxSafeDepDist); 6055 unsigned MaxVectorSize = WidestRegister / WidestType; 6056 6057 DEBUG(dbgs() << "LV: The Smallest and Widest types: " << SmallestType << " / " 6058 << WidestType << " bits.\n"); 6059 DEBUG(dbgs() << "LV: The Widest register is: " << WidestRegister 6060 << " bits.\n"); 6061 6062 if (MaxVectorSize == 0) { 6063 DEBUG(dbgs() << "LV: The target has no vector registers.\n"); 6064 MaxVectorSize = 1; 6065 } 6066 6067 assert(MaxVectorSize <= 64 && "Did not expect to pack so many elements" 6068 " into one vector!"); 6069 6070 unsigned VF = MaxVectorSize; 6071 if (MaximizeBandwidth && !OptForSize) { 6072 // Collect all viable vectorization factors. 6073 SmallVector<unsigned, 8> VFs; 6074 unsigned NewMaxVectorSize = WidestRegister / SmallestType; 6075 for (unsigned VS = MaxVectorSize; VS <= NewMaxVectorSize; VS *= 2) 6076 VFs.push_back(VS); 6077 6078 // For each VF calculate its register usage. 6079 auto RUs = calculateRegisterUsage(VFs); 6080 6081 // Select the largest VF which doesn't require more registers than existing 6082 // ones. 6083 unsigned TargetNumRegisters = TTI.getNumberOfRegisters(true); 6084 for (int i = RUs.size() - 1; i >= 0; --i) { 6085 if (RUs[i].MaxLocalUsers <= TargetNumRegisters) { 6086 VF = VFs[i]; 6087 break; 6088 } 6089 } 6090 } 6091 6092 // If we optimize the program for size, avoid creating the tail loop. 6093 if (OptForSize) { 6094 // If we are unable to calculate the trip count then don't try to vectorize. 6095 if (TC < 2) { 6096 ORE->emit( 6097 createMissedAnalysis("UnknownLoopCountComplexCFG") 6098 << "unable to calculate the loop count due to complex control flow"); 6099 DEBUG(dbgs() << "LV: Aborting. A tail loop is required with -Os/-Oz.\n"); 6100 return Factor; 6101 } 6102 6103 // Find the maximum SIMD width that can fit within the trip count. 6104 VF = TC % MaxVectorSize; 6105 6106 if (VF == 0) 6107 VF = MaxVectorSize; 6108 else { 6109 // If the trip count that we found modulo the vectorization factor is not 6110 // zero then we require a tail. 6111 ORE->emit(createMissedAnalysis("NoTailLoopWithOptForSize") 6112 << "cannot optimize for size and vectorize at the " 6113 "same time. Enable vectorization of this loop " 6114 "with '#pragma clang loop vectorize(enable)' " 6115 "when compiling with -Os/-Oz"); 6116 DEBUG(dbgs() << "LV: Aborting. A tail loop is required with -Os/-Oz.\n"); 6117 return Factor; 6118 } 6119 } 6120 6121 int UserVF = Hints->getWidth(); 6122 if (UserVF != 0) { 6123 assert(isPowerOf2_32(UserVF) && "VF needs to be a power of two"); 6124 DEBUG(dbgs() << "LV: Using user VF " << UserVF << ".\n"); 6125 6126 Factor.Width = UserVF; 6127 return Factor; 6128 } 6129 6130 float Cost = expectedCost(1).first; 6131 #ifndef NDEBUG 6132 const float ScalarCost = Cost; 6133 #endif /* NDEBUG */ 6134 unsigned Width = 1; 6135 DEBUG(dbgs() << "LV: Scalar loop costs: " << (int)ScalarCost << ".\n"); 6136 6137 bool ForceVectorization = Hints->getForce() == LoopVectorizeHints::FK_Enabled; 6138 // Ignore scalar width, because the user explicitly wants vectorization. 6139 if (ForceVectorization && VF > 1) { 6140 Width = 2; 6141 Cost = expectedCost(Width).first / (float)Width; 6142 } 6143 6144 for (unsigned i = 2; i <= VF; i *= 2) { 6145 // Notice that the vector loop needs to be executed less times, so 6146 // we need to divide the cost of the vector loops by the width of 6147 // the vector elements. 6148 VectorizationCostTy C = expectedCost(i); 6149 float VectorCost = C.first / (float)i; 6150 DEBUG(dbgs() << "LV: Vector loop of width " << i 6151 << " costs: " << (int)VectorCost << ".\n"); 6152 if (!C.second && !ForceVectorization) { 6153 DEBUG( 6154 dbgs() << "LV: Not considering vector loop of width " << i 6155 << " because it will not generate any vector instructions.\n"); 6156 continue; 6157 } 6158 if (VectorCost < Cost) { 6159 Cost = VectorCost; 6160 Width = i; 6161 } 6162 } 6163 6164 DEBUG(if (ForceVectorization && Width > 1 && Cost >= ScalarCost) dbgs() 6165 << "LV: Vectorization seems to be not beneficial, " 6166 << "but was forced by a user.\n"); 6167 DEBUG(dbgs() << "LV: Selecting VF: " << Width << ".\n"); 6168 Factor.Width = Width; 6169 Factor.Cost = Width * Cost; 6170 return Factor; 6171 } 6172 6173 std::pair<unsigned, unsigned> 6174 LoopVectorizationCostModel::getSmallestAndWidestTypes() { 6175 unsigned MinWidth = -1U; 6176 unsigned MaxWidth = 8; 6177 const DataLayout &DL = TheFunction->getParent()->getDataLayout(); 6178 6179 // For each block. 6180 for (BasicBlock *BB : TheLoop->blocks()) { 6181 // For each instruction in the loop. 6182 for (Instruction &I : *BB) { 6183 Type *T = I.getType(); 6184 6185 // Skip ignored values. 6186 if (ValuesToIgnore.count(&I)) 6187 continue; 6188 6189 // Only examine Loads, Stores and PHINodes. 6190 if (!isa<LoadInst>(I) && !isa<StoreInst>(I) && !isa<PHINode>(I)) 6191 continue; 6192 6193 // Examine PHI nodes that are reduction variables. Update the type to 6194 // account for the recurrence type. 6195 if (auto *PN = dyn_cast<PHINode>(&I)) { 6196 if (!Legal->isReductionVariable(PN)) 6197 continue; 6198 RecurrenceDescriptor RdxDesc = (*Legal->getReductionVars())[PN]; 6199 T = RdxDesc.getRecurrenceType(); 6200 } 6201 6202 // Examine the stored values. 6203 if (auto *ST = dyn_cast<StoreInst>(&I)) 6204 T = ST->getValueOperand()->getType(); 6205 6206 // Ignore loaded pointer types and stored pointer types that are not 6207 // consecutive. However, we do want to take consecutive stores/loads of 6208 // pointer vectors into account. 6209 if (T->isPointerTy() && !isConsecutiveLoadOrStore(&I)) 6210 continue; 6211 6212 MinWidth = std::min(MinWidth, 6213 (unsigned)DL.getTypeSizeInBits(T->getScalarType())); 6214 MaxWidth = std::max(MaxWidth, 6215 (unsigned)DL.getTypeSizeInBits(T->getScalarType())); 6216 } 6217 } 6218 6219 return {MinWidth, MaxWidth}; 6220 } 6221 6222 unsigned LoopVectorizationCostModel::selectInterleaveCount(bool OptForSize, 6223 unsigned VF, 6224 unsigned LoopCost) { 6225 6226 // -- The interleave heuristics -- 6227 // We interleave the loop in order to expose ILP and reduce the loop overhead. 6228 // There are many micro-architectural considerations that we can't predict 6229 // at this level. For example, frontend pressure (on decode or fetch) due to 6230 // code size, or the number and capabilities of the execution ports. 6231 // 6232 // We use the following heuristics to select the interleave count: 6233 // 1. If the code has reductions, then we interleave to break the cross 6234 // iteration dependency. 6235 // 2. If the loop is really small, then we interleave to reduce the loop 6236 // overhead. 6237 // 3. We don't interleave if we think that we will spill registers to memory 6238 // due to the increased register pressure. 6239 6240 // When we optimize for size, we don't interleave. 6241 if (OptForSize) 6242 return 1; 6243 6244 // We used the distance for the interleave count. 6245 if (Legal->getMaxSafeDepDistBytes() != -1U) 6246 return 1; 6247 6248 // Do not interleave loops with a relatively small trip count. 6249 unsigned TC = PSE.getSE()->getSmallConstantTripCount(TheLoop); 6250 if (TC > 1 && TC < TinyTripCountInterleaveThreshold) 6251 return 1; 6252 6253 unsigned TargetNumRegisters = TTI.getNumberOfRegisters(VF > 1); 6254 DEBUG(dbgs() << "LV: The target has " << TargetNumRegisters 6255 << " registers\n"); 6256 6257 if (VF == 1) { 6258 if (ForceTargetNumScalarRegs.getNumOccurrences() > 0) 6259 TargetNumRegisters = ForceTargetNumScalarRegs; 6260 } else { 6261 if (ForceTargetNumVectorRegs.getNumOccurrences() > 0) 6262 TargetNumRegisters = ForceTargetNumVectorRegs; 6263 } 6264 6265 RegisterUsage R = calculateRegisterUsage({VF})[0]; 6266 // We divide by these constants so assume that we have at least one 6267 // instruction that uses at least one register. 6268 R.MaxLocalUsers = std::max(R.MaxLocalUsers, 1U); 6269 R.NumInstructions = std::max(R.NumInstructions, 1U); 6270 6271 // We calculate the interleave count using the following formula. 6272 // Subtract the number of loop invariants from the number of available 6273 // registers. These registers are used by all of the interleaved instances. 6274 // Next, divide the remaining registers by the number of registers that is 6275 // required by the loop, in order to estimate how many parallel instances 6276 // fit without causing spills. All of this is rounded down if necessary to be 6277 // a power of two. We want power of two interleave count to simplify any 6278 // addressing operations or alignment considerations. 6279 unsigned IC = PowerOf2Floor((TargetNumRegisters - R.LoopInvariantRegs) / 6280 R.MaxLocalUsers); 6281 6282 // Don't count the induction variable as interleaved. 6283 if (EnableIndVarRegisterHeur) 6284 IC = PowerOf2Floor((TargetNumRegisters - R.LoopInvariantRegs - 1) / 6285 std::max(1U, (R.MaxLocalUsers - 1))); 6286 6287 // Clamp the interleave ranges to reasonable counts. 6288 unsigned MaxInterleaveCount = TTI.getMaxInterleaveFactor(VF); 6289 6290 // Check if the user has overridden the max. 6291 if (VF == 1) { 6292 if (ForceTargetMaxScalarInterleaveFactor.getNumOccurrences() > 0) 6293 MaxInterleaveCount = ForceTargetMaxScalarInterleaveFactor; 6294 } else { 6295 if (ForceTargetMaxVectorInterleaveFactor.getNumOccurrences() > 0) 6296 MaxInterleaveCount = ForceTargetMaxVectorInterleaveFactor; 6297 } 6298 6299 // If we did not calculate the cost for VF (because the user selected the VF) 6300 // then we calculate the cost of VF here. 6301 if (LoopCost == 0) 6302 LoopCost = expectedCost(VF).first; 6303 6304 // Clamp the calculated IC to be between the 1 and the max interleave count 6305 // that the target allows. 6306 if (IC > MaxInterleaveCount) 6307 IC = MaxInterleaveCount; 6308 else if (IC < 1) 6309 IC = 1; 6310 6311 // Interleave if we vectorized this loop and there is a reduction that could 6312 // benefit from interleaving. 6313 if (VF > 1 && Legal->getReductionVars()->size()) { 6314 DEBUG(dbgs() << "LV: Interleaving because of reductions.\n"); 6315 return IC; 6316 } 6317 6318 // Note that if we've already vectorized the loop we will have done the 6319 // runtime check and so interleaving won't require further checks. 6320 bool InterleavingRequiresRuntimePointerCheck = 6321 (VF == 1 && Legal->getRuntimePointerChecking()->Need); 6322 6323 // We want to interleave small loops in order to reduce the loop overhead and 6324 // potentially expose ILP opportunities. 6325 DEBUG(dbgs() << "LV: Loop cost is " << LoopCost << '\n'); 6326 if (!InterleavingRequiresRuntimePointerCheck && LoopCost < SmallLoopCost) { 6327 // We assume that the cost overhead is 1 and we use the cost model 6328 // to estimate the cost of the loop and interleave until the cost of the 6329 // loop overhead is about 5% of the cost of the loop. 6330 unsigned SmallIC = 6331 std::min(IC, (unsigned)PowerOf2Floor(SmallLoopCost / LoopCost)); 6332 6333 // Interleave until store/load ports (estimated by max interleave count) are 6334 // saturated. 6335 unsigned NumStores = Legal->getNumStores(); 6336 unsigned NumLoads = Legal->getNumLoads(); 6337 unsigned StoresIC = IC / (NumStores ? NumStores : 1); 6338 unsigned LoadsIC = IC / (NumLoads ? NumLoads : 1); 6339 6340 // If we have a scalar reduction (vector reductions are already dealt with 6341 // by this point), we can increase the critical path length if the loop 6342 // we're interleaving is inside another loop. Limit, by default to 2, so the 6343 // critical path only gets increased by one reduction operation. 6344 if (Legal->getReductionVars()->size() && TheLoop->getLoopDepth() > 1) { 6345 unsigned F = static_cast<unsigned>(MaxNestedScalarReductionIC); 6346 SmallIC = std::min(SmallIC, F); 6347 StoresIC = std::min(StoresIC, F); 6348 LoadsIC = std::min(LoadsIC, F); 6349 } 6350 6351 if (EnableLoadStoreRuntimeInterleave && 6352 std::max(StoresIC, LoadsIC) > SmallIC) { 6353 DEBUG(dbgs() << "LV: Interleaving to saturate store or load ports.\n"); 6354 return std::max(StoresIC, LoadsIC); 6355 } 6356 6357 DEBUG(dbgs() << "LV: Interleaving to reduce branch cost.\n"); 6358 return SmallIC; 6359 } 6360 6361 // Interleave if this is a large loop (small loops are already dealt with by 6362 // this point) that could benefit from interleaving. 6363 bool HasReductions = (Legal->getReductionVars()->size() > 0); 6364 if (TTI.enableAggressiveInterleaving(HasReductions)) { 6365 DEBUG(dbgs() << "LV: Interleaving to expose ILP.\n"); 6366 return IC; 6367 } 6368 6369 DEBUG(dbgs() << "LV: Not Interleaving.\n"); 6370 return 1; 6371 } 6372 6373 SmallVector<LoopVectorizationCostModel::RegisterUsage, 8> 6374 LoopVectorizationCostModel::calculateRegisterUsage(ArrayRef<unsigned> VFs) { 6375 // This function calculates the register usage by measuring the highest number 6376 // of values that are alive at a single location. Obviously, this is a very 6377 // rough estimation. We scan the loop in a topological order in order and 6378 // assign a number to each instruction. We use RPO to ensure that defs are 6379 // met before their users. We assume that each instruction that has in-loop 6380 // users starts an interval. We record every time that an in-loop value is 6381 // used, so we have a list of the first and last occurrences of each 6382 // instruction. Next, we transpose this data structure into a multi map that 6383 // holds the list of intervals that *end* at a specific location. This multi 6384 // map allows us to perform a linear search. We scan the instructions linearly 6385 // and record each time that a new interval starts, by placing it in a set. 6386 // If we find this value in the multi-map then we remove it from the set. 6387 // The max register usage is the maximum size of the set. 6388 // We also search for instructions that are defined outside the loop, but are 6389 // used inside the loop. We need this number separately from the max-interval 6390 // usage number because when we unroll, loop-invariant values do not take 6391 // more register. 6392 LoopBlocksDFS DFS(TheLoop); 6393 DFS.perform(LI); 6394 6395 RegisterUsage RU; 6396 RU.NumInstructions = 0; 6397 6398 // Each 'key' in the map opens a new interval. The values 6399 // of the map are the index of the 'last seen' usage of the 6400 // instruction that is the key. 6401 typedef DenseMap<Instruction *, unsigned> IntervalMap; 6402 // Maps instruction to its index. 6403 DenseMap<unsigned, Instruction *> IdxToInstr; 6404 // Marks the end of each interval. 6405 IntervalMap EndPoint; 6406 // Saves the list of instruction indices that are used in the loop. 6407 SmallSet<Instruction *, 8> Ends; 6408 // Saves the list of values that are used in the loop but are 6409 // defined outside the loop, such as arguments and constants. 6410 SmallPtrSet<Value *, 8> LoopInvariants; 6411 6412 unsigned Index = 0; 6413 for (BasicBlock *BB : make_range(DFS.beginRPO(), DFS.endRPO())) { 6414 RU.NumInstructions += BB->size(); 6415 for (Instruction &I : *BB) { 6416 IdxToInstr[Index++] = &I; 6417 6418 // Save the end location of each USE. 6419 for (Value *U : I.operands()) { 6420 auto *Instr = dyn_cast<Instruction>(U); 6421 6422 // Ignore non-instruction values such as arguments, constants, etc. 6423 if (!Instr) 6424 continue; 6425 6426 // If this instruction is outside the loop then record it and continue. 6427 if (!TheLoop->contains(Instr)) { 6428 LoopInvariants.insert(Instr); 6429 continue; 6430 } 6431 6432 // Overwrite previous end points. 6433 EndPoint[Instr] = Index; 6434 Ends.insert(Instr); 6435 } 6436 } 6437 } 6438 6439 // Saves the list of intervals that end with the index in 'key'. 6440 typedef SmallVector<Instruction *, 2> InstrList; 6441 DenseMap<unsigned, InstrList> TransposeEnds; 6442 6443 // Transpose the EndPoints to a list of values that end at each index. 6444 for (auto &Interval : EndPoint) 6445 TransposeEnds[Interval.second].push_back(Interval.first); 6446 6447 SmallSet<Instruction *, 8> OpenIntervals; 6448 6449 // Get the size of the widest register. 6450 unsigned MaxSafeDepDist = -1U; 6451 if (Legal->getMaxSafeDepDistBytes() != -1U) 6452 MaxSafeDepDist = Legal->getMaxSafeDepDistBytes() * 8; 6453 unsigned WidestRegister = 6454 std::min(TTI.getRegisterBitWidth(true), MaxSafeDepDist); 6455 const DataLayout &DL = TheFunction->getParent()->getDataLayout(); 6456 6457 SmallVector<RegisterUsage, 8> RUs(VFs.size()); 6458 SmallVector<unsigned, 8> MaxUsages(VFs.size(), 0); 6459 6460 DEBUG(dbgs() << "LV(REG): Calculating max register usage:\n"); 6461 6462 // A lambda that gets the register usage for the given type and VF. 6463 auto GetRegUsage = [&DL, WidestRegister](Type *Ty, unsigned VF) { 6464 if (Ty->isTokenTy()) 6465 return 0U; 6466 unsigned TypeSize = DL.getTypeSizeInBits(Ty->getScalarType()); 6467 return std::max<unsigned>(1, VF * TypeSize / WidestRegister); 6468 }; 6469 6470 for (unsigned int i = 0; i < Index; ++i) { 6471 Instruction *I = IdxToInstr[i]; 6472 6473 // Remove all of the instructions that end at this location. 6474 InstrList &List = TransposeEnds[i]; 6475 for (Instruction *ToRemove : List) 6476 OpenIntervals.erase(ToRemove); 6477 6478 // Ignore instructions that are never used within the loop. 6479 if (!Ends.count(I)) 6480 continue; 6481 6482 // Skip ignored values. 6483 if (ValuesToIgnore.count(I)) 6484 continue; 6485 6486 // For each VF find the maximum usage of registers. 6487 for (unsigned j = 0, e = VFs.size(); j < e; ++j) { 6488 if (VFs[j] == 1) { 6489 MaxUsages[j] = std::max(MaxUsages[j], OpenIntervals.size()); 6490 continue; 6491 } 6492 6493 // Count the number of live intervals. 6494 unsigned RegUsage = 0; 6495 for (auto Inst : OpenIntervals) { 6496 // Skip ignored values for VF > 1. 6497 if (VecValuesToIgnore.count(Inst)) 6498 continue; 6499 RegUsage += GetRegUsage(Inst->getType(), VFs[j]); 6500 } 6501 MaxUsages[j] = std::max(MaxUsages[j], RegUsage); 6502 } 6503 6504 DEBUG(dbgs() << "LV(REG): At #" << i << " Interval # " 6505 << OpenIntervals.size() << '\n'); 6506 6507 // Add the current instruction to the list of open intervals. 6508 OpenIntervals.insert(I); 6509 } 6510 6511 for (unsigned i = 0, e = VFs.size(); i < e; ++i) { 6512 unsigned Invariant = 0; 6513 if (VFs[i] == 1) 6514 Invariant = LoopInvariants.size(); 6515 else { 6516 for (auto Inst : LoopInvariants) 6517 Invariant += GetRegUsage(Inst->getType(), VFs[i]); 6518 } 6519 6520 DEBUG(dbgs() << "LV(REG): VF = " << VFs[i] << '\n'); 6521 DEBUG(dbgs() << "LV(REG): Found max usage: " << MaxUsages[i] << '\n'); 6522 DEBUG(dbgs() << "LV(REG): Found invariant usage: " << Invariant << '\n'); 6523 DEBUG(dbgs() << "LV(REG): LoopSize: " << RU.NumInstructions << '\n'); 6524 6525 RU.LoopInvariantRegs = Invariant; 6526 RU.MaxLocalUsers = MaxUsages[i]; 6527 RUs[i] = RU; 6528 } 6529 6530 return RUs; 6531 } 6532 6533 LoopVectorizationCostModel::VectorizationCostTy 6534 LoopVectorizationCostModel::expectedCost(unsigned VF) { 6535 VectorizationCostTy Cost; 6536 6537 // For each block. 6538 for (BasicBlock *BB : TheLoop->blocks()) { 6539 VectorizationCostTy BlockCost; 6540 6541 // For each instruction in the old loop. 6542 for (Instruction &I : *BB) { 6543 // Skip dbg intrinsics. 6544 if (isa<DbgInfoIntrinsic>(I)) 6545 continue; 6546 6547 // Skip ignored values. 6548 if (ValuesToIgnore.count(&I)) 6549 continue; 6550 6551 VectorizationCostTy C = getInstructionCost(&I, VF); 6552 6553 // Check if we should override the cost. 6554 if (ForceTargetInstructionCost.getNumOccurrences() > 0) 6555 C.first = ForceTargetInstructionCost; 6556 6557 BlockCost.first += C.first; 6558 BlockCost.second |= C.second; 6559 DEBUG(dbgs() << "LV: Found an estimated cost of " << C.first << " for VF " 6560 << VF << " For instruction: " << I << '\n'); 6561 } 6562 6563 // If we are vectorizing a predicated block, it will have been 6564 // if-converted. This means that the block's instructions (aside from 6565 // stores and instructions that may divide by zero) will now be 6566 // unconditionally executed. For the scalar case, we may not always execute 6567 // the predicated block. Thus, scale the block's cost by the probability of 6568 // executing it. 6569 if (VF == 1 && Legal->blockNeedsPredication(BB)) 6570 BlockCost.first /= getReciprocalPredBlockProb(); 6571 6572 Cost.first += BlockCost.first; 6573 Cost.second |= BlockCost.second; 6574 } 6575 6576 return Cost; 6577 } 6578 6579 /// \brief Check whether the address computation for a non-consecutive memory 6580 /// access looks like an unlikely candidate for being merged into the indexing 6581 /// mode. 6582 /// 6583 /// We look for a GEP which has one index that is an induction variable and all 6584 /// other indices are loop invariant. If the stride of this access is also 6585 /// within a small bound we decide that this address computation can likely be 6586 /// merged into the addressing mode. 6587 /// In all other cases, we identify the address computation as complex. 6588 static bool isLikelyComplexAddressComputation(Value *Ptr, 6589 LoopVectorizationLegality *Legal, 6590 ScalarEvolution *SE, 6591 const Loop *TheLoop) { 6592 auto *Gep = dyn_cast<GetElementPtrInst>(Ptr); 6593 if (!Gep) 6594 return true; 6595 6596 // We are looking for a gep with all loop invariant indices except for one 6597 // which should be an induction variable. 6598 unsigned NumOperands = Gep->getNumOperands(); 6599 for (unsigned i = 1; i < NumOperands; ++i) { 6600 Value *Opd = Gep->getOperand(i); 6601 if (!SE->isLoopInvariant(SE->getSCEV(Opd), TheLoop) && 6602 !Legal->isInductionVariable(Opd)) 6603 return true; 6604 } 6605 6606 // Now we know we have a GEP ptr, %inv, %ind, %inv. Make sure that the step 6607 // can likely be merged into the address computation. 6608 unsigned MaxMergeDistance = 64; 6609 6610 const SCEVAddRecExpr *AddRec = dyn_cast<SCEVAddRecExpr>(SE->getSCEV(Ptr)); 6611 if (!AddRec) 6612 return true; 6613 6614 // Check the step is constant. 6615 const SCEV *Step = AddRec->getStepRecurrence(*SE); 6616 // Calculate the pointer stride and check if it is consecutive. 6617 const auto *C = dyn_cast<SCEVConstant>(Step); 6618 if (!C) 6619 return true; 6620 6621 const APInt &APStepVal = C->getAPInt(); 6622 6623 // Huge step value - give up. 6624 if (APStepVal.getBitWidth() > 64) 6625 return true; 6626 6627 int64_t StepVal = APStepVal.getSExtValue(); 6628 6629 return StepVal > MaxMergeDistance; 6630 } 6631 6632 static bool isStrideMul(Instruction *I, LoopVectorizationLegality *Legal) { 6633 return Legal->hasStride(I->getOperand(0)) || 6634 Legal->hasStride(I->getOperand(1)); 6635 } 6636 6637 LoopVectorizationCostModel::VectorizationCostTy 6638 LoopVectorizationCostModel::getInstructionCost(Instruction *I, unsigned VF) { 6639 // If we know that this instruction will remain uniform, check the cost of 6640 // the scalar version. 6641 if (Legal->isUniformAfterVectorization(I)) 6642 VF = 1; 6643 6644 Type *VectorTy; 6645 unsigned C = getInstructionCost(I, VF, VectorTy); 6646 6647 bool TypeNotScalarized = 6648 VF > 1 && !VectorTy->isVoidTy() && TTI.getNumberOfParts(VectorTy) < VF; 6649 return VectorizationCostTy(C, TypeNotScalarized); 6650 } 6651 6652 unsigned LoopVectorizationCostModel::getInstructionCost(Instruction *I, 6653 unsigned VF, 6654 Type *&VectorTy) { 6655 Type *RetTy = I->getType(); 6656 if (VF > 1 && MinBWs.count(I)) 6657 RetTy = IntegerType::get(RetTy->getContext(), MinBWs[I]); 6658 VectorTy = ToVectorTy(RetTy, VF); 6659 auto SE = PSE.getSE(); 6660 6661 // TODO: We need to estimate the cost of intrinsic calls. 6662 switch (I->getOpcode()) { 6663 case Instruction::GetElementPtr: 6664 // We mark this instruction as zero-cost because the cost of GEPs in 6665 // vectorized code depends on whether the corresponding memory instruction 6666 // is scalarized or not. Therefore, we handle GEPs with the memory 6667 // instruction cost. 6668 return 0; 6669 case Instruction::Br: { 6670 return TTI.getCFInstrCost(I->getOpcode()); 6671 } 6672 case Instruction::PHI: { 6673 auto *Phi = cast<PHINode>(I); 6674 6675 // First-order recurrences are replaced by vector shuffles inside the loop. 6676 if (VF > 1 && Legal->isFirstOrderRecurrence(Phi)) 6677 return TTI.getShuffleCost(TargetTransformInfo::SK_ExtractSubvector, 6678 VectorTy, VF - 1, VectorTy); 6679 6680 // TODO: IF-converted IFs become selects. 6681 return 0; 6682 } 6683 case Instruction::UDiv: 6684 case Instruction::SDiv: 6685 case Instruction::URem: 6686 case Instruction::SRem: 6687 // If we have a predicated instruction, it may not be executed for each 6688 // vector lane. Get the scalarization cost and scale this amount by the 6689 // probability of executing the predicated block. If the instruction is not 6690 // predicated, we fall through to the next case. 6691 if (VF > 1 && Legal->isScalarWithPredication(I)) { 6692 unsigned Cost = 0; 6693 6694 // These instructions have a non-void type, so account for the phi nodes 6695 // that we will create. This cost is likely to be zero. The phi node 6696 // cost, if any, should be scaled by the block probability because it 6697 // models a copy at the end of each predicated block. 6698 Cost += VF * TTI.getCFInstrCost(Instruction::PHI); 6699 6700 // The cost of the non-predicated instruction. 6701 Cost += VF * TTI.getArithmeticInstrCost(I->getOpcode(), RetTy); 6702 6703 // The cost of insertelement and extractelement instructions needed for 6704 // scalarization. 6705 Cost += getScalarizationOverhead(I, VF, TTI); 6706 6707 // Scale the cost by the probability of executing the predicated blocks. 6708 // This assumes the predicated block for each vector lane is equally 6709 // likely. 6710 return Cost / getReciprocalPredBlockProb(); 6711 } 6712 case Instruction::Add: 6713 case Instruction::FAdd: 6714 case Instruction::Sub: 6715 case Instruction::FSub: 6716 case Instruction::Mul: 6717 case Instruction::FMul: 6718 case Instruction::FDiv: 6719 case Instruction::FRem: 6720 case Instruction::Shl: 6721 case Instruction::LShr: 6722 case Instruction::AShr: 6723 case Instruction::And: 6724 case Instruction::Or: 6725 case Instruction::Xor: { 6726 // Since we will replace the stride by 1 the multiplication should go away. 6727 if (I->getOpcode() == Instruction::Mul && isStrideMul(I, Legal)) 6728 return 0; 6729 // Certain instructions can be cheaper to vectorize if they have a constant 6730 // second vector operand. One example of this are shifts on x86. 6731 TargetTransformInfo::OperandValueKind Op1VK = 6732 TargetTransformInfo::OK_AnyValue; 6733 TargetTransformInfo::OperandValueKind Op2VK = 6734 TargetTransformInfo::OK_AnyValue; 6735 TargetTransformInfo::OperandValueProperties Op1VP = 6736 TargetTransformInfo::OP_None; 6737 TargetTransformInfo::OperandValueProperties Op2VP = 6738 TargetTransformInfo::OP_None; 6739 Value *Op2 = I->getOperand(1); 6740 6741 // Check for a splat or for a non uniform vector of constants. 6742 if (isa<ConstantInt>(Op2)) { 6743 ConstantInt *CInt = cast<ConstantInt>(Op2); 6744 if (CInt && CInt->getValue().isPowerOf2()) 6745 Op2VP = TargetTransformInfo::OP_PowerOf2; 6746 Op2VK = TargetTransformInfo::OK_UniformConstantValue; 6747 } else if (isa<ConstantVector>(Op2) || isa<ConstantDataVector>(Op2)) { 6748 Op2VK = TargetTransformInfo::OK_NonUniformConstantValue; 6749 Constant *SplatValue = cast<Constant>(Op2)->getSplatValue(); 6750 if (SplatValue) { 6751 ConstantInt *CInt = dyn_cast<ConstantInt>(SplatValue); 6752 if (CInt && CInt->getValue().isPowerOf2()) 6753 Op2VP = TargetTransformInfo::OP_PowerOf2; 6754 Op2VK = TargetTransformInfo::OK_UniformConstantValue; 6755 } 6756 } else if (Legal->isUniform(Op2)) { 6757 Op2VK = TargetTransformInfo::OK_UniformValue; 6758 } 6759 6760 return TTI.getArithmeticInstrCost(I->getOpcode(), VectorTy, Op1VK, Op2VK, 6761 Op1VP, Op2VP); 6762 } 6763 case Instruction::Select: { 6764 SelectInst *SI = cast<SelectInst>(I); 6765 const SCEV *CondSCEV = SE->getSCEV(SI->getCondition()); 6766 bool ScalarCond = (SE->isLoopInvariant(CondSCEV, TheLoop)); 6767 Type *CondTy = SI->getCondition()->getType(); 6768 if (!ScalarCond) 6769 CondTy = VectorType::get(CondTy, VF); 6770 6771 return TTI.getCmpSelInstrCost(I->getOpcode(), VectorTy, CondTy); 6772 } 6773 case Instruction::ICmp: 6774 case Instruction::FCmp: { 6775 Type *ValTy = I->getOperand(0)->getType(); 6776 Instruction *Op0AsInstruction = dyn_cast<Instruction>(I->getOperand(0)); 6777 auto It = MinBWs.find(Op0AsInstruction); 6778 if (VF > 1 && It != MinBWs.end()) 6779 ValTy = IntegerType::get(ValTy->getContext(), It->second); 6780 VectorTy = ToVectorTy(ValTy, VF); 6781 return TTI.getCmpSelInstrCost(I->getOpcode(), VectorTy); 6782 } 6783 case Instruction::Store: 6784 case Instruction::Load: { 6785 StoreInst *SI = dyn_cast<StoreInst>(I); 6786 LoadInst *LI = dyn_cast<LoadInst>(I); 6787 Type *ValTy = (SI ? SI->getValueOperand()->getType() : LI->getType()); 6788 VectorTy = ToVectorTy(ValTy, VF); 6789 6790 unsigned Alignment = SI ? SI->getAlignment() : LI->getAlignment(); 6791 unsigned AS = 6792 SI ? SI->getPointerAddressSpace() : LI->getPointerAddressSpace(); 6793 Value *Ptr = getPointerOperand(I); 6794 // We add the cost of address computation here instead of with the gep 6795 // instruction because only here we know whether the operation is 6796 // scalarized. 6797 if (VF == 1) 6798 return TTI.getAddressComputationCost(VectorTy) + 6799 TTI.getMemoryOpCost(I->getOpcode(), VectorTy, Alignment, AS); 6800 6801 if (LI && Legal->isUniform(Ptr)) { 6802 // Scalar load + broadcast 6803 unsigned Cost = TTI.getAddressComputationCost(ValTy->getScalarType()); 6804 Cost += TTI.getMemoryOpCost(I->getOpcode(), ValTy->getScalarType(), 6805 Alignment, AS); 6806 return Cost + 6807 TTI.getShuffleCost(TargetTransformInfo::SK_Broadcast, ValTy); 6808 } 6809 6810 // For an interleaved access, calculate the total cost of the whole 6811 // interleave group. 6812 if (Legal->isAccessInterleaved(I)) { 6813 auto Group = Legal->getInterleavedAccessGroup(I); 6814 assert(Group && "Fail to get an interleaved access group."); 6815 6816 // Only calculate the cost once at the insert position. 6817 if (Group->getInsertPos() != I) 6818 return 0; 6819 6820 unsigned InterleaveFactor = Group->getFactor(); 6821 Type *WideVecTy = 6822 VectorType::get(VectorTy->getVectorElementType(), 6823 VectorTy->getVectorNumElements() * InterleaveFactor); 6824 6825 // Holds the indices of existing members in an interleaved load group. 6826 // An interleaved store group doesn't need this as it doesn't allow gaps. 6827 SmallVector<unsigned, 4> Indices; 6828 if (LI) { 6829 for (unsigned i = 0; i < InterleaveFactor; i++) 6830 if (Group->getMember(i)) 6831 Indices.push_back(i); 6832 } 6833 6834 // Calculate the cost of the whole interleaved group. 6835 unsigned Cost = TTI.getInterleavedMemoryOpCost( 6836 I->getOpcode(), WideVecTy, Group->getFactor(), Indices, 6837 Group->getAlignment(), AS); 6838 6839 if (Group->isReverse()) 6840 Cost += 6841 Group->getNumMembers() * 6842 TTI.getShuffleCost(TargetTransformInfo::SK_Reverse, VectorTy, 0); 6843 6844 // FIXME: The interleaved load group with a huge gap could be even more 6845 // expensive than scalar operations. Then we could ignore such group and 6846 // use scalar operations instead. 6847 return Cost; 6848 } 6849 6850 // Check if the memory instruction will be scalarized. 6851 if (Legal->memoryInstructionMustBeScalarized(I, VF)) { 6852 unsigned Cost = 0; 6853 Type *PtrTy = ToVectorTy(Ptr->getType(), VF); 6854 6855 // True if the memory instruction's address computation is complex. 6856 bool IsComplexComputation = 6857 isLikelyComplexAddressComputation(Ptr, Legal, SE, TheLoop); 6858 6859 // Get the cost of the scalar memory instruction and address computation. 6860 Cost += VF * TTI.getAddressComputationCost(PtrTy, IsComplexComputation); 6861 Cost += VF * 6862 TTI.getMemoryOpCost(I->getOpcode(), ValTy->getScalarType(), 6863 Alignment, AS); 6864 6865 // Get the overhead of the extractelement and insertelement instructions 6866 // we might create due to scalarization. 6867 Cost += getScalarizationOverhead(I, VF, TTI); 6868 6869 // If we have a predicated store, it may not be executed for each vector 6870 // lane. Scale the cost by the probability of executing the predicated 6871 // block. 6872 if (Legal->isScalarWithPredication(I)) 6873 Cost /= getReciprocalPredBlockProb(); 6874 6875 return Cost; 6876 } 6877 6878 // Determine if the pointer operand of the access is either consecutive or 6879 // reverse consecutive. 6880 int ConsecutiveStride = Legal->isConsecutivePtr(Ptr); 6881 bool Reverse = ConsecutiveStride < 0; 6882 6883 // Determine if either a gather or scatter operation is legal. 6884 bool UseGatherOrScatter = 6885 !ConsecutiveStride && Legal->isLegalGatherOrScatter(I); 6886 6887 unsigned Cost = TTI.getAddressComputationCost(VectorTy); 6888 if (UseGatherOrScatter) { 6889 assert(ConsecutiveStride == 0 && 6890 "Gather/Scatter are not used for consecutive stride"); 6891 return Cost + 6892 TTI.getGatherScatterOpCost(I->getOpcode(), VectorTy, Ptr, 6893 Legal->isMaskRequired(I), Alignment); 6894 } 6895 // Wide load/stores. 6896 if (Legal->isMaskRequired(I)) 6897 Cost += 6898 TTI.getMaskedMemoryOpCost(I->getOpcode(), VectorTy, Alignment, AS); 6899 else 6900 Cost += TTI.getMemoryOpCost(I->getOpcode(), VectorTy, Alignment, AS); 6901 6902 if (Reverse) 6903 Cost += TTI.getShuffleCost(TargetTransformInfo::SK_Reverse, VectorTy, 0); 6904 return Cost; 6905 } 6906 case Instruction::ZExt: 6907 case Instruction::SExt: 6908 case Instruction::FPToUI: 6909 case Instruction::FPToSI: 6910 case Instruction::FPExt: 6911 case Instruction::PtrToInt: 6912 case Instruction::IntToPtr: 6913 case Instruction::SIToFP: 6914 case Instruction::UIToFP: 6915 case Instruction::Trunc: 6916 case Instruction::FPTrunc: 6917 case Instruction::BitCast: { 6918 // We optimize the truncation of induction variable. 6919 // The cost of these is the same as the scalar operation. 6920 if (I->getOpcode() == Instruction::Trunc && 6921 Legal->isInductionVariable(I->getOperand(0))) 6922 return TTI.getCastInstrCost(I->getOpcode(), I->getType(), 6923 I->getOperand(0)->getType()); 6924 6925 Type *SrcScalarTy = I->getOperand(0)->getType(); 6926 Type *SrcVecTy = ToVectorTy(SrcScalarTy, VF); 6927 if (VF > 1 && MinBWs.count(I)) { 6928 // This cast is going to be shrunk. This may remove the cast or it might 6929 // turn it into slightly different cast. For example, if MinBW == 16, 6930 // "zext i8 %1 to i32" becomes "zext i8 %1 to i16". 6931 // 6932 // Calculate the modified src and dest types. 6933 Type *MinVecTy = VectorTy; 6934 if (I->getOpcode() == Instruction::Trunc) { 6935 SrcVecTy = smallestIntegerVectorType(SrcVecTy, MinVecTy); 6936 VectorTy = 6937 largestIntegerVectorType(ToVectorTy(I->getType(), VF), MinVecTy); 6938 } else if (I->getOpcode() == Instruction::ZExt || 6939 I->getOpcode() == Instruction::SExt) { 6940 SrcVecTy = largestIntegerVectorType(SrcVecTy, MinVecTy); 6941 VectorTy = 6942 smallestIntegerVectorType(ToVectorTy(I->getType(), VF), MinVecTy); 6943 } 6944 } 6945 6946 return TTI.getCastInstrCost(I->getOpcode(), VectorTy, SrcVecTy); 6947 } 6948 case Instruction::Call: { 6949 bool NeedToScalarize; 6950 CallInst *CI = cast<CallInst>(I); 6951 unsigned CallCost = getVectorCallCost(CI, VF, TTI, TLI, NeedToScalarize); 6952 if (getVectorIntrinsicIDForCall(CI, TLI)) 6953 return std::min(CallCost, getVectorIntrinsicCost(CI, VF, TTI, TLI)); 6954 return CallCost; 6955 } 6956 default: 6957 // The cost of executing VF copies of the scalar instruction. This opcode 6958 // is unknown. Assume that it is the same as 'mul'. 6959 return VF * TTI.getArithmeticInstrCost(Instruction::Mul, VectorTy) + 6960 getScalarizationOverhead(I, VF, TTI); 6961 } // end of switch. 6962 } 6963 6964 char LoopVectorize::ID = 0; 6965 static const char lv_name[] = "Loop Vectorization"; 6966 INITIALIZE_PASS_BEGIN(LoopVectorize, LV_NAME, lv_name, false, false) 6967 INITIALIZE_PASS_DEPENDENCY(TargetTransformInfoWrapperPass) 6968 INITIALIZE_PASS_DEPENDENCY(BasicAAWrapperPass) 6969 INITIALIZE_PASS_DEPENDENCY(AAResultsWrapperPass) 6970 INITIALIZE_PASS_DEPENDENCY(GlobalsAAWrapperPass) 6971 INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker) 6972 INITIALIZE_PASS_DEPENDENCY(BlockFrequencyInfoWrapperPass) 6973 INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass) 6974 INITIALIZE_PASS_DEPENDENCY(ScalarEvolutionWrapperPass) 6975 INITIALIZE_PASS_DEPENDENCY(LCSSAWrapperPass) 6976 INITIALIZE_PASS_DEPENDENCY(LoopInfoWrapperPass) 6977 INITIALIZE_PASS_DEPENDENCY(LoopSimplify) 6978 INITIALIZE_PASS_DEPENDENCY(LoopAccessLegacyAnalysis) 6979 INITIALIZE_PASS_DEPENDENCY(DemandedBitsWrapperPass) 6980 INITIALIZE_PASS_DEPENDENCY(OptimizationRemarkEmitterWrapperPass) 6981 INITIALIZE_PASS_END(LoopVectorize, LV_NAME, lv_name, false, false) 6982 6983 namespace llvm { 6984 Pass *createLoopVectorizePass(bool NoUnrolling, bool AlwaysVectorize) { 6985 return new LoopVectorize(NoUnrolling, AlwaysVectorize); 6986 } 6987 } 6988 6989 bool LoopVectorizationCostModel::isConsecutiveLoadOrStore(Instruction *Inst) { 6990 6991 // Check if the pointer operand of a load or store instruction is 6992 // consecutive. 6993 if (auto *Ptr = getPointerOperand(Inst)) 6994 return Legal->isConsecutivePtr(Ptr); 6995 return false; 6996 } 6997 6998 void LoopVectorizationCostModel::collectValuesToIgnore() { 6999 // Ignore ephemeral values. 7000 CodeMetrics::collectEphemeralValues(TheLoop, AC, ValuesToIgnore); 7001 7002 // Ignore type-promoting instructions we identified during reduction 7003 // detection. 7004 for (auto &Reduction : *Legal->getReductionVars()) { 7005 RecurrenceDescriptor &RedDes = Reduction.second; 7006 SmallPtrSetImpl<Instruction *> &Casts = RedDes.getCastInsts(); 7007 VecValuesToIgnore.insert(Casts.begin(), Casts.end()); 7008 } 7009 7010 // Insert values known to be scalar into VecValuesToIgnore. 7011 for (auto *BB : TheLoop->getBlocks()) 7012 for (auto &I : *BB) 7013 if (Legal->isScalarAfterVectorization(&I)) 7014 VecValuesToIgnore.insert(&I); 7015 } 7016 7017 void InnerLoopUnroller::scalarizeInstruction(Instruction *Instr, 7018 bool IfPredicateInstr) { 7019 assert(!Instr->getType()->isAggregateType() && "Can't handle vectors"); 7020 // Holds vector parameters or scalars, in case of uniform vals. 7021 SmallVector<VectorParts, 4> Params; 7022 7023 setDebugLocFromInst(Builder, Instr); 7024 7025 // Does this instruction return a value ? 7026 bool IsVoidRetTy = Instr->getType()->isVoidTy(); 7027 7028 // Initialize a new scalar map entry. 7029 ScalarParts Entry(UF); 7030 7031 VectorParts Cond; 7032 if (IfPredicateInstr) 7033 Cond = createBlockInMask(Instr->getParent()); 7034 7035 // For each vector unroll 'part': 7036 for (unsigned Part = 0; Part < UF; ++Part) { 7037 Entry[Part].resize(1); 7038 // For each scalar that we create: 7039 7040 // Start an "if (pred) a[i] = ..." block. 7041 Value *Cmp = nullptr; 7042 if (IfPredicateInstr) { 7043 if (Cond[Part]->getType()->isVectorTy()) 7044 Cond[Part] = 7045 Builder.CreateExtractElement(Cond[Part], Builder.getInt32(0)); 7046 Cmp = Builder.CreateICmp(ICmpInst::ICMP_EQ, Cond[Part], 7047 ConstantInt::get(Cond[Part]->getType(), 1)); 7048 } 7049 7050 Instruction *Cloned = Instr->clone(); 7051 if (!IsVoidRetTy) 7052 Cloned->setName(Instr->getName() + ".cloned"); 7053 7054 // Replace the operands of the cloned instructions with their scalar 7055 // equivalents in the new loop. 7056 for (unsigned op = 0, e = Instr->getNumOperands(); op != e; ++op) { 7057 auto *NewOp = getScalarValue(Instr->getOperand(op), Part, 0); 7058 Cloned->setOperand(op, NewOp); 7059 } 7060 7061 // Place the cloned scalar in the new loop. 7062 Builder.Insert(Cloned); 7063 7064 // Add the cloned scalar to the scalar map entry. 7065 Entry[Part][0] = Cloned; 7066 7067 // If we just cloned a new assumption, add it the assumption cache. 7068 if (auto *II = dyn_cast<IntrinsicInst>(Cloned)) 7069 if (II->getIntrinsicID() == Intrinsic::assume) 7070 AC->registerAssumption(II); 7071 7072 // End if-block. 7073 if (IfPredicateInstr) 7074 PredicatedInstructions.push_back(std::make_pair(Cloned, Cmp)); 7075 } 7076 VectorLoopValueMap.initScalar(Instr, Entry); 7077 } 7078 7079 void InnerLoopUnroller::vectorizeMemoryInstruction(Instruction *Instr) { 7080 auto *SI = dyn_cast<StoreInst>(Instr); 7081 bool IfPredicateInstr = (SI && Legal->blockNeedsPredication(SI->getParent())); 7082 7083 return scalarizeInstruction(Instr, IfPredicateInstr); 7084 } 7085 7086 Value *InnerLoopUnroller::reverseVector(Value *Vec) { return Vec; } 7087 7088 Value *InnerLoopUnroller::getBroadcastInstrs(Value *V) { return V; } 7089 7090 Value *InnerLoopUnroller::getStepVector(Value *Val, int StartIdx, Value *Step, 7091 Instruction::BinaryOps BinOp) { 7092 // When unrolling and the VF is 1, we only need to add a simple scalar. 7093 Type *Ty = Val->getType(); 7094 assert(!Ty->isVectorTy() && "Val must be a scalar"); 7095 7096 if (Ty->isFloatingPointTy()) { 7097 Constant *C = ConstantFP::get(Ty, (double)StartIdx); 7098 7099 // Floating point operations had to be 'fast' to enable the unrolling. 7100 Value *MulOp = addFastMathFlag(Builder.CreateFMul(C, Step)); 7101 return addFastMathFlag(Builder.CreateBinOp(BinOp, Val, MulOp)); 7102 } 7103 Constant *C = ConstantInt::get(Ty, StartIdx); 7104 return Builder.CreateAdd(Val, Builder.CreateMul(C, Step), "induction"); 7105 } 7106 7107 static void AddRuntimeUnrollDisableMetaData(Loop *L) { 7108 SmallVector<Metadata *, 4> MDs; 7109 // Reserve first location for self reference to the LoopID metadata node. 7110 MDs.push_back(nullptr); 7111 bool IsUnrollMetadata = false; 7112 MDNode *LoopID = L->getLoopID(); 7113 if (LoopID) { 7114 // First find existing loop unrolling disable metadata. 7115 for (unsigned i = 1, ie = LoopID->getNumOperands(); i < ie; ++i) { 7116 auto *MD = dyn_cast<MDNode>(LoopID->getOperand(i)); 7117 if (MD) { 7118 const auto *S = dyn_cast<MDString>(MD->getOperand(0)); 7119 IsUnrollMetadata = 7120 S && S->getString().startswith("llvm.loop.unroll.disable"); 7121 } 7122 MDs.push_back(LoopID->getOperand(i)); 7123 } 7124 } 7125 7126 if (!IsUnrollMetadata) { 7127 // Add runtime unroll disable metadata. 7128 LLVMContext &Context = L->getHeader()->getContext(); 7129 SmallVector<Metadata *, 1> DisableOperands; 7130 DisableOperands.push_back( 7131 MDString::get(Context, "llvm.loop.unroll.runtime.disable")); 7132 MDNode *DisableNode = MDNode::get(Context, DisableOperands); 7133 MDs.push_back(DisableNode); 7134 MDNode *NewLoopID = MDNode::get(Context, MDs); 7135 // Set operand 0 to refer to the loop id itself. 7136 NewLoopID->replaceOperandWith(0, NewLoopID); 7137 L->setLoopID(NewLoopID); 7138 } 7139 } 7140 7141 bool LoopVectorizePass::processLoop(Loop *L) { 7142 assert(L->empty() && "Only process inner loops."); 7143 7144 #ifndef NDEBUG 7145 const std::string DebugLocStr = getDebugLocString(L); 7146 #endif /* NDEBUG */ 7147 7148 DEBUG(dbgs() << "\nLV: Checking a loop in \"" 7149 << L->getHeader()->getParent()->getName() << "\" from " 7150 << DebugLocStr << "\n"); 7151 7152 LoopVectorizeHints Hints(L, DisableUnrolling, *ORE); 7153 7154 DEBUG(dbgs() << "LV: Loop hints:" 7155 << " force=" 7156 << (Hints.getForce() == LoopVectorizeHints::FK_Disabled 7157 ? "disabled" 7158 : (Hints.getForce() == LoopVectorizeHints::FK_Enabled 7159 ? "enabled" 7160 : "?")) 7161 << " width=" << Hints.getWidth() 7162 << " unroll=" << Hints.getInterleave() << "\n"); 7163 7164 // Function containing loop 7165 Function *F = L->getHeader()->getParent(); 7166 7167 // Looking at the diagnostic output is the only way to determine if a loop 7168 // was vectorized (other than looking at the IR or machine code), so it 7169 // is important to generate an optimization remark for each loop. Most of 7170 // these messages are generated as OptimizationRemarkAnalysis. Remarks 7171 // generated as OptimizationRemark and OptimizationRemarkMissed are 7172 // less verbose reporting vectorized loops and unvectorized loops that may 7173 // benefit from vectorization, respectively. 7174 7175 if (!Hints.allowVectorization(F, L, AlwaysVectorize)) { 7176 DEBUG(dbgs() << "LV: Loop hints prevent vectorization.\n"); 7177 return false; 7178 } 7179 7180 // Check the loop for a trip count threshold: 7181 // do not vectorize loops with a tiny trip count. 7182 const unsigned TC = SE->getSmallConstantTripCount(L); 7183 if (TC > 0u && TC < TinyTripCountVectorThreshold) { 7184 DEBUG(dbgs() << "LV: Found a loop with a very small trip count. " 7185 << "This loop is not worth vectorizing."); 7186 if (Hints.getForce() == LoopVectorizeHints::FK_Enabled) 7187 DEBUG(dbgs() << " But vectorizing was explicitly forced.\n"); 7188 else { 7189 DEBUG(dbgs() << "\n"); 7190 ORE->emit(createMissedAnalysis(Hints.vectorizeAnalysisPassName(), 7191 "NotBeneficial", L) 7192 << "vectorization is not beneficial " 7193 "and is not explicitly forced"); 7194 return false; 7195 } 7196 } 7197 7198 PredicatedScalarEvolution PSE(*SE, *L); 7199 7200 // Check if it is legal to vectorize the loop. 7201 LoopVectorizationRequirements Requirements(*ORE); 7202 LoopVectorizationLegality LVL(L, PSE, DT, TLI, AA, F, TTI, GetLAA, LI, ORE, 7203 &Requirements, &Hints); 7204 if (!LVL.canVectorize()) { 7205 DEBUG(dbgs() << "LV: Not vectorizing: Cannot prove legality.\n"); 7206 emitMissedWarning(F, L, Hints, ORE); 7207 return false; 7208 } 7209 7210 // Use the cost model. 7211 LoopVectorizationCostModel CM(L, PSE, LI, &LVL, *TTI, TLI, DB, AC, ORE, F, 7212 &Hints); 7213 CM.collectValuesToIgnore(); 7214 7215 // Check the function attributes to find out if this function should be 7216 // optimized for size. 7217 bool OptForSize = 7218 Hints.getForce() != LoopVectorizeHints::FK_Enabled && F->optForSize(); 7219 7220 // Compute the weighted frequency of this loop being executed and see if it 7221 // is less than 20% of the function entry baseline frequency. Note that we 7222 // always have a canonical loop here because we think we *can* vectorize. 7223 // FIXME: This is hidden behind a flag due to pervasive problems with 7224 // exactly what block frequency models. 7225 if (LoopVectorizeWithBlockFrequency) { 7226 BlockFrequency LoopEntryFreq = BFI->getBlockFreq(L->getLoopPreheader()); 7227 if (Hints.getForce() != LoopVectorizeHints::FK_Enabled && 7228 LoopEntryFreq < ColdEntryFreq) 7229 OptForSize = true; 7230 } 7231 7232 // Check the function attributes to see if implicit floats are allowed. 7233 // FIXME: This check doesn't seem possibly correct -- what if the loop is 7234 // an integer loop and the vector instructions selected are purely integer 7235 // vector instructions? 7236 if (F->hasFnAttribute(Attribute::NoImplicitFloat)) { 7237 DEBUG(dbgs() << "LV: Can't vectorize when the NoImplicitFloat" 7238 "attribute is used.\n"); 7239 ORE->emit(createMissedAnalysis(Hints.vectorizeAnalysisPassName(), 7240 "NoImplicitFloat", L) 7241 << "loop not vectorized due to NoImplicitFloat attribute"); 7242 emitMissedWarning(F, L, Hints, ORE); 7243 return false; 7244 } 7245 7246 // Check if the target supports potentially unsafe FP vectorization. 7247 // FIXME: Add a check for the type of safety issue (denormal, signaling) 7248 // for the target we're vectorizing for, to make sure none of the 7249 // additional fp-math flags can help. 7250 if (Hints.isPotentiallyUnsafe() && 7251 TTI->isFPVectorizationPotentiallyUnsafe()) { 7252 DEBUG(dbgs() << "LV: Potentially unsafe FP op prevents vectorization.\n"); 7253 ORE->emit( 7254 createMissedAnalysis(Hints.vectorizeAnalysisPassName(), "UnsafeFP", L) 7255 << "loop not vectorized due to unsafe FP support."); 7256 emitMissedWarning(F, L, Hints, ORE); 7257 return false; 7258 } 7259 7260 // Select the optimal vectorization factor. 7261 const LoopVectorizationCostModel::VectorizationFactor VF = 7262 CM.selectVectorizationFactor(OptForSize); 7263 7264 // Select the interleave count. 7265 unsigned IC = CM.selectInterleaveCount(OptForSize, VF.Width, VF.Cost); 7266 7267 // Get user interleave count. 7268 unsigned UserIC = Hints.getInterleave(); 7269 7270 // Identify the diagnostic messages that should be produced. 7271 std::pair<StringRef, std::string> VecDiagMsg, IntDiagMsg; 7272 bool VectorizeLoop = true, InterleaveLoop = true; 7273 if (Requirements.doesNotMeet(F, L, Hints)) { 7274 DEBUG(dbgs() << "LV: Not vectorizing: loop did not meet vectorization " 7275 "requirements.\n"); 7276 emitMissedWarning(F, L, Hints, ORE); 7277 return false; 7278 } 7279 7280 if (VF.Width == 1) { 7281 DEBUG(dbgs() << "LV: Vectorization is possible but not beneficial.\n"); 7282 VecDiagMsg = std::make_pair( 7283 "VectorizationNotBeneficial", 7284 "the cost-model indicates that vectorization is not beneficial"); 7285 VectorizeLoop = false; 7286 } 7287 7288 if (IC == 1 && UserIC <= 1) { 7289 // Tell the user interleaving is not beneficial. 7290 DEBUG(dbgs() << "LV: Interleaving is not beneficial.\n"); 7291 IntDiagMsg = std::make_pair( 7292 "InterleavingNotBeneficial", 7293 "the cost-model indicates that interleaving is not beneficial"); 7294 InterleaveLoop = false; 7295 if (UserIC == 1) { 7296 IntDiagMsg.first = "InterleavingNotBeneficialAndDisabled"; 7297 IntDiagMsg.second += 7298 " and is explicitly disabled or interleave count is set to 1"; 7299 } 7300 } else if (IC > 1 && UserIC == 1) { 7301 // Tell the user interleaving is beneficial, but it explicitly disabled. 7302 DEBUG(dbgs() 7303 << "LV: Interleaving is beneficial but is explicitly disabled."); 7304 IntDiagMsg = std::make_pair( 7305 "InterleavingBeneficialButDisabled", 7306 "the cost-model indicates that interleaving is beneficial " 7307 "but is explicitly disabled or interleave count is set to 1"); 7308 InterleaveLoop = false; 7309 } 7310 7311 // Override IC if user provided an interleave count. 7312 IC = UserIC > 0 ? UserIC : IC; 7313 7314 // Emit diagnostic messages, if any. 7315 const char *VAPassName = Hints.vectorizeAnalysisPassName(); 7316 if (!VectorizeLoop && !InterleaveLoop) { 7317 // Do not vectorize or interleaving the loop. 7318 ORE->emit(OptimizationRemarkAnalysis(VAPassName, VecDiagMsg.first, 7319 L->getStartLoc(), L->getHeader()) 7320 << VecDiagMsg.second); 7321 ORE->emit(OptimizationRemarkAnalysis(LV_NAME, IntDiagMsg.first, 7322 L->getStartLoc(), L->getHeader()) 7323 << IntDiagMsg.second); 7324 return false; 7325 } else if (!VectorizeLoop && InterleaveLoop) { 7326 DEBUG(dbgs() << "LV: Interleave Count is " << IC << '\n'); 7327 ORE->emit(OptimizationRemarkAnalysis(VAPassName, VecDiagMsg.first, 7328 L->getStartLoc(), L->getHeader()) 7329 << VecDiagMsg.second); 7330 } else if (VectorizeLoop && !InterleaveLoop) { 7331 DEBUG(dbgs() << "LV: Found a vectorizable loop (" << VF.Width << ") in " 7332 << DebugLocStr << '\n'); 7333 ORE->emit(OptimizationRemarkAnalysis(LV_NAME, IntDiagMsg.first, 7334 L->getStartLoc(), L->getHeader()) 7335 << IntDiagMsg.second); 7336 } else if (VectorizeLoop && InterleaveLoop) { 7337 DEBUG(dbgs() << "LV: Found a vectorizable loop (" << VF.Width << ") in " 7338 << DebugLocStr << '\n'); 7339 DEBUG(dbgs() << "LV: Interleave Count is " << IC << '\n'); 7340 } 7341 7342 using namespace ore; 7343 if (!VectorizeLoop) { 7344 assert(IC > 1 && "interleave count should not be 1 or 0"); 7345 // If we decided that it is not legal to vectorize the loop, then 7346 // interleave it. 7347 InnerLoopUnroller Unroller(L, PSE, LI, DT, TLI, TTI, AC, ORE, IC, &LVL, 7348 &CM); 7349 Unroller.vectorize(); 7350 7351 ORE->emit(OptimizationRemark(LV_NAME, "Interleaved", L->getStartLoc(), 7352 L->getHeader()) 7353 << "interleaved loop (interleaved count: " 7354 << NV("InterleaveCount", IC) << ")"); 7355 } else { 7356 // If we decided that it is *legal* to vectorize the loop, then do it. 7357 InnerLoopVectorizer LB(L, PSE, LI, DT, TLI, TTI, AC, ORE, VF.Width, IC, 7358 &LVL, &CM); 7359 LB.vectorize(); 7360 ++LoopsVectorized; 7361 7362 // Add metadata to disable runtime unrolling a scalar loop when there are 7363 // no runtime checks about strides and memory. A scalar loop that is 7364 // rarely used is not worth unrolling. 7365 if (!LB.areSafetyChecksAdded()) 7366 AddRuntimeUnrollDisableMetaData(L); 7367 7368 // Report the vectorization decision. 7369 ORE->emit(OptimizationRemark(LV_NAME, "Vectorized", L->getStartLoc(), 7370 L->getHeader()) 7371 << "vectorized loop (vectorization width: " 7372 << NV("VectorizationFactor", VF.Width) 7373 << ", interleaved count: " << NV("InterleaveCount", IC) << ")"); 7374 } 7375 7376 // Mark the loop as already vectorized to avoid vectorizing again. 7377 Hints.setAlreadyVectorized(); 7378 7379 DEBUG(verifyFunction(*L->getHeader()->getParent())); 7380 return true; 7381 } 7382 7383 bool LoopVectorizePass::runImpl( 7384 Function &F, ScalarEvolution &SE_, LoopInfo &LI_, TargetTransformInfo &TTI_, 7385 DominatorTree &DT_, BlockFrequencyInfo &BFI_, TargetLibraryInfo *TLI_, 7386 DemandedBits &DB_, AliasAnalysis &AA_, AssumptionCache &AC_, 7387 std::function<const LoopAccessInfo &(Loop &)> &GetLAA_, 7388 OptimizationRemarkEmitter &ORE_) { 7389 7390 SE = &SE_; 7391 LI = &LI_; 7392 TTI = &TTI_; 7393 DT = &DT_; 7394 BFI = &BFI_; 7395 TLI = TLI_; 7396 AA = &AA_; 7397 AC = &AC_; 7398 GetLAA = &GetLAA_; 7399 DB = &DB_; 7400 ORE = &ORE_; 7401 7402 // Compute some weights outside of the loop over the loops. Compute this 7403 // using a BranchProbability to re-use its scaling math. 7404 const BranchProbability ColdProb(1, 5); // 20% 7405 ColdEntryFreq = BlockFrequency(BFI->getEntryFreq()) * ColdProb; 7406 7407 // Don't attempt if 7408 // 1. the target claims to have no vector registers, and 7409 // 2. interleaving won't help ILP. 7410 // 7411 // The second condition is necessary because, even if the target has no 7412 // vector registers, loop vectorization may still enable scalar 7413 // interleaving. 7414 if (!TTI->getNumberOfRegisters(true) && TTI->getMaxInterleaveFactor(1) < 2) 7415 return false; 7416 7417 // Build up a worklist of inner-loops to vectorize. This is necessary as 7418 // the act of vectorizing or partially unrolling a loop creates new loops 7419 // and can invalidate iterators across the loops. 7420 SmallVector<Loop *, 8> Worklist; 7421 7422 for (Loop *L : *LI) 7423 addAcyclicInnerLoop(*L, Worklist); 7424 7425 LoopsAnalyzed += Worklist.size(); 7426 7427 // Now walk the identified inner loops. 7428 bool Changed = false; 7429 while (!Worklist.empty()) 7430 Changed |= processLoop(Worklist.pop_back_val()); 7431 7432 // Process each loop nest in the function. 7433 return Changed; 7434 7435 } 7436 7437 7438 PreservedAnalyses LoopVectorizePass::run(Function &F, 7439 FunctionAnalysisManager &AM) { 7440 auto &SE = AM.getResult<ScalarEvolutionAnalysis>(F); 7441 auto &LI = AM.getResult<LoopAnalysis>(F); 7442 auto &TTI = AM.getResult<TargetIRAnalysis>(F); 7443 auto &DT = AM.getResult<DominatorTreeAnalysis>(F); 7444 auto &BFI = AM.getResult<BlockFrequencyAnalysis>(F); 7445 auto *TLI = AM.getCachedResult<TargetLibraryAnalysis>(F); 7446 auto &AA = AM.getResult<AAManager>(F); 7447 auto &AC = AM.getResult<AssumptionAnalysis>(F); 7448 auto &DB = AM.getResult<DemandedBitsAnalysis>(F); 7449 auto &ORE = AM.getResult<OptimizationRemarkEmitterAnalysis>(F); 7450 7451 auto &LAM = AM.getResult<LoopAnalysisManagerFunctionProxy>(F).getManager(); 7452 std::function<const LoopAccessInfo &(Loop &)> GetLAA = 7453 [&](Loop &L) -> const LoopAccessInfo & { 7454 return LAM.getResult<LoopAccessAnalysis>(L); 7455 }; 7456 bool Changed = 7457 runImpl(F, SE, LI, TTI, DT, BFI, TLI, DB, AA, AC, GetLAA, ORE); 7458 if (!Changed) 7459 return PreservedAnalyses::all(); 7460 PreservedAnalyses PA; 7461 PA.preserve<LoopAnalysis>(); 7462 PA.preserve<DominatorTreeAnalysis>(); 7463 PA.preserve<BasicAA>(); 7464 PA.preserve<GlobalsAA>(); 7465 return PA; 7466 } 7467