1 //===- LoopVectorize.cpp - A Loop Vectorizer ------------------------------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // This is the LLVM loop vectorizer. This pass modifies 'vectorizable' loops 11 // and generates target-independent LLVM-IR. 12 // The vectorizer uses the TargetTransformInfo analysis to estimate the costs 13 // of instructions in order to estimate the profitability of vectorization. 14 // 15 // The loop vectorizer combines consecutive loop iterations into a single 16 // 'wide' iteration. After this transformation the index is incremented 17 // by the SIMD vector width, and not by one. 18 // 19 // This pass has three parts: 20 // 1. The main loop pass that drives the different parts. 21 // 2. LoopVectorizationLegality - A unit that checks for the legality 22 // of the vectorization. 23 // 3. InnerLoopVectorizer - A unit that performs the actual 24 // widening of instructions. 25 // 4. LoopVectorizationCostModel - A unit that checks for the profitability 26 // of vectorization. It decides on the optimal vector width, which 27 // can be one, if vectorization is not profitable. 28 // 29 //===----------------------------------------------------------------------===// 30 // 31 // The reduction-variable vectorization is based on the paper: 32 // D. Nuzman and R. Henderson. Multi-platform Auto-vectorization. 33 // 34 // Variable uniformity checks are inspired by: 35 // Karrenberg, R. and Hack, S. Whole Function Vectorization. 36 // 37 // The interleaved access vectorization is based on the paper: 38 // Dorit Nuzman, Ira Rosen and Ayal Zaks. Auto-Vectorization of Interleaved 39 // Data for SIMD 40 // 41 // Other ideas/concepts are from: 42 // A. Zaks and D. Nuzman. Autovectorization in GCC-two years later. 43 // 44 // S. Maleki, Y. Gao, M. Garzaran, T. Wong and D. Padua. An Evaluation of 45 // Vectorizing Compilers. 46 // 47 //===----------------------------------------------------------------------===// 48 49 #include "llvm/Transforms/Vectorize/LoopVectorize.h" 50 #include "llvm/ADT/DenseMap.h" 51 #include "llvm/ADT/Hashing.h" 52 #include "llvm/ADT/MapVector.h" 53 #include "llvm/ADT/SCCIterator.h" 54 #include "llvm/ADT/SetVector.h" 55 #include "llvm/ADT/SmallPtrSet.h" 56 #include "llvm/ADT/SmallSet.h" 57 #include "llvm/ADT/SmallVector.h" 58 #include "llvm/ADT/Statistic.h" 59 #include "llvm/ADT/StringExtras.h" 60 #include "llvm/Analysis/CodeMetrics.h" 61 #include "llvm/Analysis/GlobalsModRef.h" 62 #include "llvm/Analysis/LoopInfo.h" 63 #include "llvm/Analysis/LoopIterator.h" 64 #include "llvm/Analysis/LoopPass.h" 65 #include "llvm/Analysis/ScalarEvolutionExpander.h" 66 #include "llvm/Analysis/ScalarEvolutionExpressions.h" 67 #include "llvm/Analysis/ValueTracking.h" 68 #include "llvm/Analysis/VectorUtils.h" 69 #include "llvm/IR/Constants.h" 70 #include "llvm/IR/DataLayout.h" 71 #include "llvm/IR/DebugInfo.h" 72 #include "llvm/IR/DerivedTypes.h" 73 #include "llvm/IR/DiagnosticInfo.h" 74 #include "llvm/IR/Dominators.h" 75 #include "llvm/IR/Function.h" 76 #include "llvm/IR/IRBuilder.h" 77 #include "llvm/IR/Instructions.h" 78 #include "llvm/IR/IntrinsicInst.h" 79 #include "llvm/IR/LLVMContext.h" 80 #include "llvm/IR/Module.h" 81 #include "llvm/IR/PatternMatch.h" 82 #include "llvm/IR/Type.h" 83 #include "llvm/IR/Value.h" 84 #include "llvm/IR/ValueHandle.h" 85 #include "llvm/IR/Verifier.h" 86 #include "llvm/Pass.h" 87 #include "llvm/Support/BranchProbability.h" 88 #include "llvm/Support/CommandLine.h" 89 #include "llvm/Support/Debug.h" 90 #include "llvm/Support/raw_ostream.h" 91 #include "llvm/Transforms/Scalar.h" 92 #include "llvm/Transforms/Utils/BasicBlockUtils.h" 93 #include "llvm/Transforms/Utils/Local.h" 94 #include "llvm/Transforms/Utils/LoopUtils.h" 95 #include "llvm/Transforms/Utils/LoopVersioning.h" 96 #include "llvm/Transforms/Vectorize.h" 97 #include <algorithm> 98 #include <map> 99 #include <tuple> 100 101 using namespace llvm; 102 using namespace llvm::PatternMatch; 103 104 #define LV_NAME "loop-vectorize" 105 #define DEBUG_TYPE LV_NAME 106 107 STATISTIC(LoopsVectorized, "Number of loops vectorized"); 108 STATISTIC(LoopsAnalyzed, "Number of loops analyzed for vectorization"); 109 110 static cl::opt<bool> 111 EnableIfConversion("enable-if-conversion", cl::init(true), cl::Hidden, 112 cl::desc("Enable if-conversion during vectorization.")); 113 114 /// We don't vectorize loops with a known constant trip count below this number. 115 static cl::opt<unsigned> TinyTripCountVectorThreshold( 116 "vectorizer-min-trip-count", cl::init(16), cl::Hidden, 117 cl::desc("Don't vectorize loops with a constant " 118 "trip count that is smaller than this " 119 "value.")); 120 121 static cl::opt<bool> MaximizeBandwidth( 122 "vectorizer-maximize-bandwidth", cl::init(false), cl::Hidden, 123 cl::desc("Maximize bandwidth when selecting vectorization factor which " 124 "will be determined by the smallest type in loop.")); 125 126 static cl::opt<bool> EnableInterleavedMemAccesses( 127 "enable-interleaved-mem-accesses", cl::init(false), cl::Hidden, 128 cl::desc("Enable vectorization on interleaved memory accesses in a loop")); 129 130 /// Maximum factor for an interleaved memory access. 131 static cl::opt<unsigned> MaxInterleaveGroupFactor( 132 "max-interleave-group-factor", cl::Hidden, 133 cl::desc("Maximum factor for an interleaved access group (default = 8)"), 134 cl::init(8)); 135 136 /// We don't interleave loops with a known constant trip count below this 137 /// number. 138 static const unsigned TinyTripCountInterleaveThreshold = 128; 139 140 static cl::opt<unsigned> ForceTargetNumScalarRegs( 141 "force-target-num-scalar-regs", cl::init(0), cl::Hidden, 142 cl::desc("A flag that overrides the target's number of scalar registers.")); 143 144 static cl::opt<unsigned> ForceTargetNumVectorRegs( 145 "force-target-num-vector-regs", cl::init(0), cl::Hidden, 146 cl::desc("A flag that overrides the target's number of vector registers.")); 147 148 /// Maximum vectorization interleave count. 149 static const unsigned MaxInterleaveFactor = 16; 150 151 static cl::opt<unsigned> ForceTargetMaxScalarInterleaveFactor( 152 "force-target-max-scalar-interleave", cl::init(0), cl::Hidden, 153 cl::desc("A flag that overrides the target's max interleave factor for " 154 "scalar loops.")); 155 156 static cl::opt<unsigned> ForceTargetMaxVectorInterleaveFactor( 157 "force-target-max-vector-interleave", cl::init(0), cl::Hidden, 158 cl::desc("A flag that overrides the target's max interleave factor for " 159 "vectorized loops.")); 160 161 static cl::opt<unsigned> ForceTargetInstructionCost( 162 "force-target-instruction-cost", cl::init(0), cl::Hidden, 163 cl::desc("A flag that overrides the target's expected cost for " 164 "an instruction to a single constant value. Mostly " 165 "useful for getting consistent testing.")); 166 167 static cl::opt<unsigned> SmallLoopCost( 168 "small-loop-cost", cl::init(20), cl::Hidden, 169 cl::desc( 170 "The cost of a loop that is considered 'small' by the interleaver.")); 171 172 static cl::opt<bool> LoopVectorizeWithBlockFrequency( 173 "loop-vectorize-with-block-frequency", cl::init(false), cl::Hidden, 174 cl::desc("Enable the use of the block frequency analysis to access PGO " 175 "heuristics minimizing code growth in cold regions and being more " 176 "aggressive in hot regions.")); 177 178 // Runtime interleave loops for load/store throughput. 179 static cl::opt<bool> EnableLoadStoreRuntimeInterleave( 180 "enable-loadstore-runtime-interleave", cl::init(true), cl::Hidden, 181 cl::desc( 182 "Enable runtime interleaving until load/store ports are saturated")); 183 184 /// The number of stores in a loop that are allowed to need predication. 185 static cl::opt<unsigned> NumberOfStoresToPredicate( 186 "vectorize-num-stores-pred", cl::init(1), cl::Hidden, 187 cl::desc("Max number of stores to be predicated behind an if.")); 188 189 static cl::opt<bool> EnableIndVarRegisterHeur( 190 "enable-ind-var-reg-heur", cl::init(true), cl::Hidden, 191 cl::desc("Count the induction variable only once when interleaving")); 192 193 static cl::opt<bool> EnableCondStoresVectorization( 194 "enable-cond-stores-vec", cl::init(false), cl::Hidden, 195 cl::desc("Enable if predication of stores during vectorization.")); 196 197 static cl::opt<unsigned> MaxNestedScalarReductionIC( 198 "max-nested-scalar-reduction-interleave", cl::init(2), cl::Hidden, 199 cl::desc("The maximum interleave count to use when interleaving a scalar " 200 "reduction in a nested loop.")); 201 202 static cl::opt<unsigned> PragmaVectorizeMemoryCheckThreshold( 203 "pragma-vectorize-memory-check-threshold", cl::init(128), cl::Hidden, 204 cl::desc("The maximum allowed number of runtime memory checks with a " 205 "vectorize(enable) pragma.")); 206 207 static cl::opt<unsigned> VectorizeSCEVCheckThreshold( 208 "vectorize-scev-check-threshold", cl::init(16), cl::Hidden, 209 cl::desc("The maximum number of SCEV checks allowed.")); 210 211 static cl::opt<unsigned> PragmaVectorizeSCEVCheckThreshold( 212 "pragma-vectorize-scev-check-threshold", cl::init(128), cl::Hidden, 213 cl::desc("The maximum number of SCEV checks allowed with a " 214 "vectorize(enable) pragma")); 215 216 /// Create an analysis remark that explains why vectorization failed 217 /// 218 /// \p PassName is the name of the pass (e.g. can be AlwaysPrint). \p 219 /// RemarkName is the identifier for the remark. If \p I is passed it is an 220 /// instruction that prevents vectorization. Otherwise \p TheLoop is used for 221 /// the location of the remark. \return the remark object that can be 222 /// streamed to. 223 static OptimizationRemarkAnalysis 224 createMissedAnalysis(const char *PassName, StringRef RemarkName, Loop *TheLoop, 225 Instruction *I = nullptr) { 226 Value *CodeRegion = TheLoop->getHeader(); 227 DebugLoc DL = TheLoop->getStartLoc(); 228 229 if (I) { 230 CodeRegion = I->getParent(); 231 // If there is no debug location attached to the instruction, revert back to 232 // using the loop's. 233 if (I->getDebugLoc()) 234 DL = I->getDebugLoc(); 235 } 236 237 OptimizationRemarkAnalysis R(PassName, RemarkName, DL, CodeRegion); 238 R << "loop not vectorized: "; 239 return R; 240 } 241 242 namespace { 243 244 // Forward declarations. 245 class LoopVectorizeHints; 246 class LoopVectorizationLegality; 247 class LoopVectorizationCostModel; 248 class LoopVectorizationRequirements; 249 250 /// Returns true if the given loop body has a cycle, excluding the loop 251 /// itself. 252 static bool hasCyclesInLoopBody(const Loop &L) { 253 if (!L.empty()) 254 return true; 255 256 for (const auto &SCC : 257 make_range(scc_iterator<Loop, LoopBodyTraits>::begin(L), 258 scc_iterator<Loop, LoopBodyTraits>::end(L))) { 259 if (SCC.size() > 1) { 260 DEBUG(dbgs() << "LVL: Detected a cycle in the loop body:\n"); 261 DEBUG(L.dump()); 262 return true; 263 } 264 } 265 return false; 266 } 267 268 /// \brief This modifies LoopAccessReport to initialize message with 269 /// loop-vectorizer-specific part. 270 class VectorizationReport : public LoopAccessReport { 271 public: 272 VectorizationReport(Instruction *I = nullptr) 273 : LoopAccessReport("loop not vectorized: ", I) {} 274 275 /// \brief This allows promotion of the loop-access analysis report into the 276 /// loop-vectorizer report. It modifies the message to add the 277 /// loop-vectorizer-specific part of the message. 278 explicit VectorizationReport(const LoopAccessReport &R) 279 : LoopAccessReport(Twine("loop not vectorized: ") + R.str(), 280 R.getInstr()) {} 281 }; 282 283 /// A helper function for converting Scalar types to vector types. 284 /// If the incoming type is void, we return void. If the VF is 1, we return 285 /// the scalar type. 286 static Type *ToVectorTy(Type *Scalar, unsigned VF) { 287 if (Scalar->isVoidTy() || VF == 1) 288 return Scalar; 289 return VectorType::get(Scalar, VF); 290 } 291 292 /// A helper function that returns GEP instruction and knows to skip a 293 /// 'bitcast'. The 'bitcast' may be skipped if the source and the destination 294 /// pointee types of the 'bitcast' have the same size. 295 /// For example: 296 /// bitcast double** %var to i64* - can be skipped 297 /// bitcast double** %var to i8* - can not 298 static GetElementPtrInst *getGEPInstruction(Value *Ptr) { 299 300 if (isa<GetElementPtrInst>(Ptr)) 301 return cast<GetElementPtrInst>(Ptr); 302 303 if (isa<BitCastInst>(Ptr) && 304 isa<GetElementPtrInst>(cast<BitCastInst>(Ptr)->getOperand(0))) { 305 Type *BitcastTy = Ptr->getType(); 306 Type *GEPTy = cast<BitCastInst>(Ptr)->getSrcTy(); 307 if (!isa<PointerType>(BitcastTy) || !isa<PointerType>(GEPTy)) 308 return nullptr; 309 Type *Pointee1Ty = cast<PointerType>(BitcastTy)->getPointerElementType(); 310 Type *Pointee2Ty = cast<PointerType>(GEPTy)->getPointerElementType(); 311 const DataLayout &DL = cast<BitCastInst>(Ptr)->getModule()->getDataLayout(); 312 if (DL.getTypeSizeInBits(Pointee1Ty) == DL.getTypeSizeInBits(Pointee2Ty)) 313 return cast<GetElementPtrInst>(cast<BitCastInst>(Ptr)->getOperand(0)); 314 } 315 return nullptr; 316 } 317 318 /// A helper function that returns the pointer operand of a load or store 319 /// instruction. 320 static Value *getPointerOperand(Value *I) { 321 if (auto *LI = dyn_cast<LoadInst>(I)) 322 return LI->getPointerOperand(); 323 if (auto *SI = dyn_cast<StoreInst>(I)) 324 return SI->getPointerOperand(); 325 return nullptr; 326 } 327 328 /// A helper function that returns true if the given type is irregular. The 329 /// type is irregular if its allocated size doesn't equal the store size of an 330 /// element of the corresponding vector type at the given vectorization factor. 331 static bool hasIrregularType(Type *Ty, const DataLayout &DL, unsigned VF) { 332 333 // Determine if an array of VF elements of type Ty is "bitcast compatible" 334 // with a <VF x Ty> vector. 335 if (VF > 1) { 336 auto *VectorTy = VectorType::get(Ty, VF); 337 return VF * DL.getTypeAllocSize(Ty) != DL.getTypeStoreSize(VectorTy); 338 } 339 340 // If the vectorization factor is one, we just check if an array of type Ty 341 // requires padding between elements. 342 return DL.getTypeAllocSizeInBits(Ty) != DL.getTypeSizeInBits(Ty); 343 } 344 345 /// A helper function that returns the reciprocal of the block probability of 346 /// predicated blocks. If we return X, we are assuming the predicated block 347 /// will execute once for for every X iterations of the loop header. 348 /// 349 /// TODO: We should use actual block probability here, if available. Currently, 350 /// we always assume predicated blocks have a 50% chance of executing. 351 static unsigned getReciprocalPredBlockProb() { return 2; } 352 353 /// InnerLoopVectorizer vectorizes loops which contain only one basic 354 /// block to a specified vectorization factor (VF). 355 /// This class performs the widening of scalars into vectors, or multiple 356 /// scalars. This class also implements the following features: 357 /// * It inserts an epilogue loop for handling loops that don't have iteration 358 /// counts that are known to be a multiple of the vectorization factor. 359 /// * It handles the code generation for reduction variables. 360 /// * Scalarization (implementation using scalars) of un-vectorizable 361 /// instructions. 362 /// InnerLoopVectorizer does not perform any vectorization-legality 363 /// checks, and relies on the caller to check for the different legality 364 /// aspects. The InnerLoopVectorizer relies on the 365 /// LoopVectorizationLegality class to provide information about the induction 366 /// and reduction variables that were found to a given vectorization factor. 367 class InnerLoopVectorizer { 368 public: 369 InnerLoopVectorizer(Loop *OrigLoop, PredicatedScalarEvolution &PSE, 370 LoopInfo *LI, DominatorTree *DT, 371 const TargetLibraryInfo *TLI, 372 const TargetTransformInfo *TTI, AssumptionCache *AC, 373 OptimizationRemarkEmitter *ORE, unsigned VecWidth, 374 unsigned UnrollFactor, LoopVectorizationLegality *LVL, 375 LoopVectorizationCostModel *CM) 376 : OrigLoop(OrigLoop), PSE(PSE), LI(LI), DT(DT), TLI(TLI), TTI(TTI), 377 AC(AC), ORE(ORE), VF(VecWidth), UF(UnrollFactor), 378 Builder(PSE.getSE()->getContext()), Induction(nullptr), 379 OldInduction(nullptr), VectorLoopValueMap(UnrollFactor, VecWidth), 380 TripCount(nullptr), VectorTripCount(nullptr), Legal(LVL), Cost(CM), 381 AddedSafetyChecks(false) {} 382 383 // Perform the actual loop widening (vectorization). 384 void vectorize() { 385 // Create a new empty loop. Unlink the old loop and connect the new one. 386 createEmptyLoop(); 387 // Widen each instruction in the old loop to a new one in the new loop. 388 vectorizeLoop(); 389 } 390 391 // Return true if any runtime check is added. 392 bool areSafetyChecksAdded() { return AddedSafetyChecks; } 393 394 virtual ~InnerLoopVectorizer() {} 395 396 protected: 397 /// A small list of PHINodes. 398 typedef SmallVector<PHINode *, 4> PhiVector; 399 400 /// A type for vectorized values in the new loop. Each value from the 401 /// original loop, when vectorized, is represented by UF vector values in the 402 /// new unrolled loop, where UF is the unroll factor. 403 typedef SmallVector<Value *, 2> VectorParts; 404 405 /// A type for scalarized values in the new loop. Each value from the 406 /// original loop, when scalarized, is represented by UF x VF scalar values 407 /// in the new unrolled loop, where UF is the unroll factor and VF is the 408 /// vectorization factor. 409 typedef SmallVector<SmallVector<Value *, 4>, 2> ScalarParts; 410 411 // When we if-convert we need to create edge masks. We have to cache values 412 // so that we don't end up with exponential recursion/IR. 413 typedef DenseMap<std::pair<BasicBlock *, BasicBlock *>, VectorParts> 414 EdgeMaskCache; 415 416 /// Create an empty loop, based on the loop ranges of the old loop. 417 void createEmptyLoop(); 418 419 /// Set up the values of the IVs correctly when exiting the vector loop. 420 void fixupIVUsers(PHINode *OrigPhi, const InductionDescriptor &II, 421 Value *CountRoundDown, Value *EndValue, 422 BasicBlock *MiddleBlock); 423 424 /// Create a new induction variable inside L. 425 PHINode *createInductionVariable(Loop *L, Value *Start, Value *End, 426 Value *Step, Instruction *DL); 427 /// Copy and widen the instructions from the old loop. 428 virtual void vectorizeLoop(); 429 430 /// Fix a first-order recurrence. This is the second phase of vectorizing 431 /// this phi node. 432 void fixFirstOrderRecurrence(PHINode *Phi); 433 434 /// \brief The Loop exit block may have single value PHI nodes where the 435 /// incoming value is 'Undef'. While vectorizing we only handled real values 436 /// that were defined inside the loop. Here we fix the 'undef case'. 437 /// See PR14725. 438 void fixLCSSAPHIs(); 439 440 /// Predicate conditional instructions that require predication on their 441 /// respective conditions. 442 void predicateInstructions(); 443 444 /// Collect the instructions from the original loop that would be trivially 445 /// dead in the vectorized loop if generated. 446 void collectTriviallyDeadInstructions(); 447 448 /// Shrinks vector element sizes to the smallest bitwidth they can be legally 449 /// represented as. 450 void truncateToMinimalBitwidths(); 451 452 /// A helper function that computes the predicate of the block BB, assuming 453 /// that the header block of the loop is set to True. It returns the *entry* 454 /// mask for the block BB. 455 VectorParts createBlockInMask(BasicBlock *BB); 456 /// A helper function that computes the predicate of the edge between SRC 457 /// and DST. 458 VectorParts createEdgeMask(BasicBlock *Src, BasicBlock *Dst); 459 460 /// A helper function to vectorize a single BB within the innermost loop. 461 void vectorizeBlockInLoop(BasicBlock *BB, PhiVector *PV); 462 463 /// Vectorize a single PHINode in a block. This method handles the induction 464 /// variable canonicalization. It supports both VF = 1 for unrolled loops and 465 /// arbitrary length vectors. 466 void widenPHIInstruction(Instruction *PN, unsigned UF, unsigned VF, 467 PhiVector *PV); 468 469 /// Insert the new loop to the loop hierarchy and pass manager 470 /// and update the analysis passes. 471 void updateAnalysis(); 472 473 /// This instruction is un-vectorizable. Implement it as a sequence 474 /// of scalars. If \p IfPredicateInstr is true we need to 'hide' each 475 /// scalarized instruction behind an if block predicated on the control 476 /// dependence of the instruction. 477 virtual void scalarizeInstruction(Instruction *Instr, 478 bool IfPredicateInstr = false); 479 480 /// Vectorize Load and Store instructions, 481 virtual void vectorizeMemoryInstruction(Instruction *Instr); 482 483 /// Create a broadcast instruction. This method generates a broadcast 484 /// instruction (shuffle) for loop invariant values and for the induction 485 /// value. If this is the induction variable then we extend it to N, N+1, ... 486 /// this is needed because each iteration in the loop corresponds to a SIMD 487 /// element. 488 virtual Value *getBroadcastInstrs(Value *V); 489 490 /// This function adds (StartIdx, StartIdx + Step, StartIdx + 2*Step, ...) 491 /// to each vector element of Val. The sequence starts at StartIndex. 492 /// \p Opcode is relevant for FP induction variable. 493 virtual Value *getStepVector(Value *Val, int StartIdx, Value *Step, 494 Instruction::BinaryOps Opcode = 495 Instruction::BinaryOpsEnd); 496 497 /// Compute scalar induction steps. \p ScalarIV is the scalar induction 498 /// variable on which to base the steps, \p Step is the size of the step, and 499 /// \p EntryVal is the value from the original loop that maps to the steps. 500 /// Note that \p EntryVal doesn't have to be an induction variable (e.g., it 501 /// can be a truncate instruction). 502 void buildScalarSteps(Value *ScalarIV, Value *Step, Value *EntryVal); 503 504 /// Create a vector induction phi node based on an existing scalar one. This 505 /// currently only works for integer induction variables with a constant 506 /// step. \p EntryVal is the value from the original loop that maps to the 507 /// vector phi node. If \p EntryVal is a truncate instruction, instead of 508 /// widening the original IV, we widen a version of the IV truncated to \p 509 /// EntryVal's type. 510 void createVectorIntInductionPHI(const InductionDescriptor &II, 511 Instruction *EntryVal); 512 513 /// Widen an integer induction variable \p IV. If \p Trunc is provided, the 514 /// induction variable will first be truncated to the corresponding type. 515 void widenIntInduction(PHINode *IV, TruncInst *Trunc = nullptr); 516 517 /// Returns true if we should generate a scalar version of \p IV. 518 bool needsScalarInduction(Instruction *IV) const; 519 520 /// Return a constant reference to the VectorParts corresponding to \p V from 521 /// the original loop. If the value has already been vectorized, the 522 /// corresponding vector entry in VectorLoopValueMap is returned. If, 523 /// however, the value has a scalar entry in VectorLoopValueMap, we construct 524 /// new vector values on-demand by inserting the scalar values into vectors 525 /// with an insertelement sequence. If the value has been neither vectorized 526 /// nor scalarized, it must be loop invariant, so we simply broadcast the 527 /// value into vectors. 528 const VectorParts &getVectorValue(Value *V); 529 530 /// Return a value in the new loop corresponding to \p V from the original 531 /// loop at unroll index \p Part and vector index \p Lane. If the value has 532 /// been vectorized but not scalarized, the necessary extractelement 533 /// instruction will be generated. 534 Value *getScalarValue(Value *V, unsigned Part, unsigned Lane); 535 536 /// Try to vectorize the interleaved access group that \p Instr belongs to. 537 void vectorizeInterleaveGroup(Instruction *Instr); 538 539 /// Generate a shuffle sequence that will reverse the vector Vec. 540 virtual Value *reverseVector(Value *Vec); 541 542 /// Returns (and creates if needed) the original loop trip count. 543 Value *getOrCreateTripCount(Loop *NewLoop); 544 545 /// Returns (and creates if needed) the trip count of the widened loop. 546 Value *getOrCreateVectorTripCount(Loop *NewLoop); 547 548 /// Emit a bypass check to see if the trip count would overflow, or we 549 /// wouldn't have enough iterations to execute one vector loop. 550 void emitMinimumIterationCountCheck(Loop *L, BasicBlock *Bypass); 551 /// Emit a bypass check to see if the vector trip count is nonzero. 552 void emitVectorLoopEnteredCheck(Loop *L, BasicBlock *Bypass); 553 /// Emit a bypass check to see if all of the SCEV assumptions we've 554 /// had to make are correct. 555 void emitSCEVChecks(Loop *L, BasicBlock *Bypass); 556 /// Emit bypass checks to check any memory assumptions we may have made. 557 void emitMemRuntimeChecks(Loop *L, BasicBlock *Bypass); 558 559 /// Add additional metadata to \p To that was not present on \p Orig. 560 /// 561 /// Currently this is used to add the noalias annotations based on the 562 /// inserted memchecks. Use this for instructions that are *cloned* into the 563 /// vector loop. 564 void addNewMetadata(Instruction *To, const Instruction *Orig); 565 566 /// Add metadata from one instruction to another. 567 /// 568 /// This includes both the original MDs from \p From and additional ones (\see 569 /// addNewMetadata). Use this for *newly created* instructions in the vector 570 /// loop. 571 void addMetadata(Instruction *To, Instruction *From); 572 573 /// \brief Similar to the previous function but it adds the metadata to a 574 /// vector of instructions. 575 void addMetadata(ArrayRef<Value *> To, Instruction *From); 576 577 /// This is a helper class for maintaining vectorization state. It's used for 578 /// mapping values from the original loop to their corresponding values in 579 /// the new loop. Two mappings are maintained: one for vectorized values and 580 /// one for scalarized values. Vectorized values are represented with UF 581 /// vector values in the new loop, and scalarized values are represented with 582 /// UF x VF scalar values in the new loop. UF and VF are the unroll and 583 /// vectorization factors, respectively. 584 /// 585 /// Entries can be added to either map with initVector and initScalar, which 586 /// initialize and return a constant reference to the new entry. If a 587 /// non-constant reference to a vector entry is required, getVector can be 588 /// used to retrieve a mutable entry. We currently directly modify the mapped 589 /// values during "fix-up" operations that occur once the first phase of 590 /// widening is complete. These operations include type truncation and the 591 /// second phase of recurrence widening. 592 /// 593 /// Otherwise, entries from either map should be accessed using the 594 /// getVectorValue or getScalarValue functions from InnerLoopVectorizer. 595 /// getVectorValue and getScalarValue coordinate to generate a vector or 596 /// scalar value on-demand if one is not yet available. When vectorizing a 597 /// loop, we visit the definition of an instruction before its uses. When 598 /// visiting the definition, we either vectorize or scalarize the 599 /// instruction, creating an entry for it in the corresponding map. (In some 600 /// cases, such as induction variables, we will create both vector and scalar 601 /// entries.) Then, as we encounter uses of the definition, we derive values 602 /// for each scalar or vector use unless such a value is already available. 603 /// For example, if we scalarize a definition and one of its uses is vector, 604 /// we build the required vector on-demand with an insertelement sequence 605 /// when visiting the use. Otherwise, if the use is scalar, we can use the 606 /// existing scalar definition. 607 struct ValueMap { 608 609 /// Construct an empty map with the given unroll and vectorization factors. 610 ValueMap(unsigned UnrollFactor, unsigned VecWidth) 611 : UF(UnrollFactor), VF(VecWidth) { 612 // The unroll and vectorization factors are only used in asserts builds 613 // to verify map entries are sized appropriately. 614 (void)UF; 615 (void)VF; 616 } 617 618 /// \return True if the map has a vector entry for \p Key. 619 bool hasVector(Value *Key) const { return VectorMapStorage.count(Key); } 620 621 /// \return True if the map has a scalar entry for \p Key. 622 bool hasScalar(Value *Key) const { return ScalarMapStorage.count(Key); } 623 624 /// \brief Map \p Key to the given VectorParts \p Entry, and return a 625 /// constant reference to the new vector map entry. The given key should 626 /// not already be in the map, and the given VectorParts should be 627 /// correctly sized for the current unroll factor. 628 const VectorParts &initVector(Value *Key, const VectorParts &Entry) { 629 assert(!hasVector(Key) && "Vector entry already initialized"); 630 assert(Entry.size() == UF && "VectorParts has wrong dimensions"); 631 VectorMapStorage[Key] = Entry; 632 return VectorMapStorage[Key]; 633 } 634 635 /// \brief Map \p Key to the given ScalarParts \p Entry, and return a 636 /// constant reference to the new scalar map entry. The given key should 637 /// not already be in the map, and the given ScalarParts should be 638 /// correctly sized for the current unroll and vectorization factors. 639 const ScalarParts &initScalar(Value *Key, const ScalarParts &Entry) { 640 assert(!hasScalar(Key) && "Scalar entry already initialized"); 641 assert(Entry.size() == UF && 642 all_of(make_range(Entry.begin(), Entry.end()), 643 [&](const SmallVectorImpl<Value *> &Values) -> bool { 644 return Values.size() == VF; 645 }) && 646 "ScalarParts has wrong dimensions"); 647 ScalarMapStorage[Key] = Entry; 648 return ScalarMapStorage[Key]; 649 } 650 651 /// \return A reference to the vector map entry corresponding to \p Key. 652 /// The key should already be in the map. This function should only be used 653 /// when it's necessary to update values that have already been vectorized. 654 /// This is the case for "fix-up" operations including type truncation and 655 /// the second phase of recurrence vectorization. If a non-const reference 656 /// isn't required, getVectorValue should be used instead. 657 VectorParts &getVector(Value *Key) { 658 assert(hasVector(Key) && "Vector entry not initialized"); 659 return VectorMapStorage.find(Key)->second; 660 } 661 662 /// Retrieve an entry from the vector or scalar maps. The preferred way to 663 /// access an existing mapped entry is with getVectorValue or 664 /// getScalarValue from InnerLoopVectorizer. Until those functions can be 665 /// moved inside ValueMap, we have to declare them as friends. 666 friend const VectorParts &InnerLoopVectorizer::getVectorValue(Value *V); 667 friend Value *InnerLoopVectorizer::getScalarValue(Value *V, unsigned Part, 668 unsigned Lane); 669 670 private: 671 /// The unroll factor. Each entry in the vector map contains UF vector 672 /// values. 673 unsigned UF; 674 675 /// The vectorization factor. Each entry in the scalar map contains UF x VF 676 /// scalar values. 677 unsigned VF; 678 679 /// The vector and scalar map storage. We use std::map and not DenseMap 680 /// because insertions to DenseMap invalidate its iterators. 681 std::map<Value *, VectorParts> VectorMapStorage; 682 std::map<Value *, ScalarParts> ScalarMapStorage; 683 }; 684 685 /// The original loop. 686 Loop *OrigLoop; 687 /// A wrapper around ScalarEvolution used to add runtime SCEV checks. Applies 688 /// dynamic knowledge to simplify SCEV expressions and converts them to a 689 /// more usable form. 690 PredicatedScalarEvolution &PSE; 691 /// Loop Info. 692 LoopInfo *LI; 693 /// Dominator Tree. 694 DominatorTree *DT; 695 /// Alias Analysis. 696 AliasAnalysis *AA; 697 /// Target Library Info. 698 const TargetLibraryInfo *TLI; 699 /// Target Transform Info. 700 const TargetTransformInfo *TTI; 701 /// Assumption Cache. 702 AssumptionCache *AC; 703 /// Interface to emit optimization remarks. 704 OptimizationRemarkEmitter *ORE; 705 706 /// \brief LoopVersioning. It's only set up (non-null) if memchecks were 707 /// used. 708 /// 709 /// This is currently only used to add no-alias metadata based on the 710 /// memchecks. The actually versioning is performed manually. 711 std::unique_ptr<LoopVersioning> LVer; 712 713 /// The vectorization SIMD factor to use. Each vector will have this many 714 /// vector elements. 715 unsigned VF; 716 717 protected: 718 /// The vectorization unroll factor to use. Each scalar is vectorized to this 719 /// many different vector instructions. 720 unsigned UF; 721 722 /// The builder that we use 723 IRBuilder<> Builder; 724 725 // --- Vectorization state --- 726 727 /// The vector-loop preheader. 728 BasicBlock *LoopVectorPreHeader; 729 /// The scalar-loop preheader. 730 BasicBlock *LoopScalarPreHeader; 731 /// Middle Block between the vector and the scalar. 732 BasicBlock *LoopMiddleBlock; 733 /// The ExitBlock of the scalar loop. 734 BasicBlock *LoopExitBlock; 735 /// The vector loop body. 736 BasicBlock *LoopVectorBody; 737 /// The scalar loop body. 738 BasicBlock *LoopScalarBody; 739 /// A list of all bypass blocks. The first block is the entry of the loop. 740 SmallVector<BasicBlock *, 4> LoopBypassBlocks; 741 742 /// The new Induction variable which was added to the new block. 743 PHINode *Induction; 744 /// The induction variable of the old basic block. 745 PHINode *OldInduction; 746 747 /// Maps values from the original loop to their corresponding values in the 748 /// vectorized loop. A key value can map to either vector values, scalar 749 /// values or both kinds of values, depending on whether the key was 750 /// vectorized and scalarized. 751 ValueMap VectorLoopValueMap; 752 753 /// Store instructions that should be predicated, as a pair 754 /// <StoreInst, Predicate> 755 SmallVector<std::pair<Instruction *, Value *>, 4> PredicatedInstructions; 756 EdgeMaskCache MaskCache; 757 /// Trip count of the original loop. 758 Value *TripCount; 759 /// Trip count of the widened loop (TripCount - TripCount % (VF*UF)) 760 Value *VectorTripCount; 761 762 /// The legality analysis. 763 LoopVectorizationLegality *Legal; 764 765 /// The profitablity analysis. 766 LoopVectorizationCostModel *Cost; 767 768 // Record whether runtime checks are added. 769 bool AddedSafetyChecks; 770 771 // Holds instructions from the original loop whose counterparts in the 772 // vectorized loop would be trivially dead if generated. For example, 773 // original induction update instructions can become dead because we 774 // separately emit induction "steps" when generating code for the new loop. 775 // Similarly, we create a new latch condition when setting up the structure 776 // of the new loop, so the old one can become dead. 777 SmallPtrSet<Instruction *, 4> DeadInstructions; 778 }; 779 780 class InnerLoopUnroller : public InnerLoopVectorizer { 781 public: 782 InnerLoopUnroller(Loop *OrigLoop, PredicatedScalarEvolution &PSE, 783 LoopInfo *LI, DominatorTree *DT, 784 const TargetLibraryInfo *TLI, 785 const TargetTransformInfo *TTI, AssumptionCache *AC, 786 OptimizationRemarkEmitter *ORE, unsigned UnrollFactor, 787 LoopVectorizationLegality *LVL, 788 LoopVectorizationCostModel *CM) 789 : InnerLoopVectorizer(OrigLoop, PSE, LI, DT, TLI, TTI, AC, ORE, 1, 790 UnrollFactor, LVL, CM) {} 791 792 private: 793 void scalarizeInstruction(Instruction *Instr, 794 bool IfPredicateInstr = false) override; 795 void vectorizeMemoryInstruction(Instruction *Instr) override; 796 Value *getBroadcastInstrs(Value *V) override; 797 Value *getStepVector(Value *Val, int StartIdx, Value *Step, 798 Instruction::BinaryOps Opcode = 799 Instruction::BinaryOpsEnd) override; 800 Value *reverseVector(Value *Vec) override; 801 }; 802 803 /// \brief Look for a meaningful debug location on the instruction or it's 804 /// operands. 805 static Instruction *getDebugLocFromInstOrOperands(Instruction *I) { 806 if (!I) 807 return I; 808 809 DebugLoc Empty; 810 if (I->getDebugLoc() != Empty) 811 return I; 812 813 for (User::op_iterator OI = I->op_begin(), OE = I->op_end(); OI != OE; ++OI) { 814 if (Instruction *OpInst = dyn_cast<Instruction>(*OI)) 815 if (OpInst->getDebugLoc() != Empty) 816 return OpInst; 817 } 818 819 return I; 820 } 821 822 /// \brief Set the debug location in the builder using the debug location in the 823 /// instruction. 824 static void setDebugLocFromInst(IRBuilder<> &B, const Value *Ptr) { 825 if (const Instruction *Inst = dyn_cast_or_null<Instruction>(Ptr)) 826 B.SetCurrentDebugLocation(Inst->getDebugLoc()); 827 else 828 B.SetCurrentDebugLocation(DebugLoc()); 829 } 830 831 #ifndef NDEBUG 832 /// \return string containing a file name and a line # for the given loop. 833 static std::string getDebugLocString(const Loop *L) { 834 std::string Result; 835 if (L) { 836 raw_string_ostream OS(Result); 837 if (const DebugLoc LoopDbgLoc = L->getStartLoc()) 838 LoopDbgLoc.print(OS); 839 else 840 // Just print the module name. 841 OS << L->getHeader()->getParent()->getParent()->getModuleIdentifier(); 842 OS.flush(); 843 } 844 return Result; 845 } 846 #endif 847 848 void InnerLoopVectorizer::addNewMetadata(Instruction *To, 849 const Instruction *Orig) { 850 // If the loop was versioned with memchecks, add the corresponding no-alias 851 // metadata. 852 if (LVer && (isa<LoadInst>(Orig) || isa<StoreInst>(Orig))) 853 LVer->annotateInstWithNoAlias(To, Orig); 854 } 855 856 void InnerLoopVectorizer::addMetadata(Instruction *To, 857 Instruction *From) { 858 propagateMetadata(To, From); 859 addNewMetadata(To, From); 860 } 861 862 void InnerLoopVectorizer::addMetadata(ArrayRef<Value *> To, 863 Instruction *From) { 864 for (Value *V : To) { 865 if (Instruction *I = dyn_cast<Instruction>(V)) 866 addMetadata(I, From); 867 } 868 } 869 870 /// \brief The group of interleaved loads/stores sharing the same stride and 871 /// close to each other. 872 /// 873 /// Each member in this group has an index starting from 0, and the largest 874 /// index should be less than interleaved factor, which is equal to the absolute 875 /// value of the access's stride. 876 /// 877 /// E.g. An interleaved load group of factor 4: 878 /// for (unsigned i = 0; i < 1024; i+=4) { 879 /// a = A[i]; // Member of index 0 880 /// b = A[i+1]; // Member of index 1 881 /// d = A[i+3]; // Member of index 3 882 /// ... 883 /// } 884 /// 885 /// An interleaved store group of factor 4: 886 /// for (unsigned i = 0; i < 1024; i+=4) { 887 /// ... 888 /// A[i] = a; // Member of index 0 889 /// A[i+1] = b; // Member of index 1 890 /// A[i+2] = c; // Member of index 2 891 /// A[i+3] = d; // Member of index 3 892 /// } 893 /// 894 /// Note: the interleaved load group could have gaps (missing members), but 895 /// the interleaved store group doesn't allow gaps. 896 class InterleaveGroup { 897 public: 898 InterleaveGroup(Instruction *Instr, int Stride, unsigned Align) 899 : Align(Align), SmallestKey(0), LargestKey(0), InsertPos(Instr) { 900 assert(Align && "The alignment should be non-zero"); 901 902 Factor = std::abs(Stride); 903 assert(Factor > 1 && "Invalid interleave factor"); 904 905 Reverse = Stride < 0; 906 Members[0] = Instr; 907 } 908 909 bool isReverse() const { return Reverse; } 910 unsigned getFactor() const { return Factor; } 911 unsigned getAlignment() const { return Align; } 912 unsigned getNumMembers() const { return Members.size(); } 913 914 /// \brief Try to insert a new member \p Instr with index \p Index and 915 /// alignment \p NewAlign. The index is related to the leader and it could be 916 /// negative if it is the new leader. 917 /// 918 /// \returns false if the instruction doesn't belong to the group. 919 bool insertMember(Instruction *Instr, int Index, unsigned NewAlign) { 920 assert(NewAlign && "The new member's alignment should be non-zero"); 921 922 int Key = Index + SmallestKey; 923 924 // Skip if there is already a member with the same index. 925 if (Members.count(Key)) 926 return false; 927 928 if (Key > LargestKey) { 929 // The largest index is always less than the interleave factor. 930 if (Index >= static_cast<int>(Factor)) 931 return false; 932 933 LargestKey = Key; 934 } else if (Key < SmallestKey) { 935 // The largest index is always less than the interleave factor. 936 if (LargestKey - Key >= static_cast<int>(Factor)) 937 return false; 938 939 SmallestKey = Key; 940 } 941 942 // It's always safe to select the minimum alignment. 943 Align = std::min(Align, NewAlign); 944 Members[Key] = Instr; 945 return true; 946 } 947 948 /// \brief Get the member with the given index \p Index 949 /// 950 /// \returns nullptr if contains no such member. 951 Instruction *getMember(unsigned Index) const { 952 int Key = SmallestKey + Index; 953 if (!Members.count(Key)) 954 return nullptr; 955 956 return Members.find(Key)->second; 957 } 958 959 /// \brief Get the index for the given member. Unlike the key in the member 960 /// map, the index starts from 0. 961 unsigned getIndex(Instruction *Instr) const { 962 for (auto I : Members) 963 if (I.second == Instr) 964 return I.first - SmallestKey; 965 966 llvm_unreachable("InterleaveGroup contains no such member"); 967 } 968 969 Instruction *getInsertPos() const { return InsertPos; } 970 void setInsertPos(Instruction *Inst) { InsertPos = Inst; } 971 972 private: 973 unsigned Factor; // Interleave Factor. 974 bool Reverse; 975 unsigned Align; 976 DenseMap<int, Instruction *> Members; 977 int SmallestKey; 978 int LargestKey; 979 980 // To avoid breaking dependences, vectorized instructions of an interleave 981 // group should be inserted at either the first load or the last store in 982 // program order. 983 // 984 // E.g. %even = load i32 // Insert Position 985 // %add = add i32 %even // Use of %even 986 // %odd = load i32 987 // 988 // store i32 %even 989 // %odd = add i32 // Def of %odd 990 // store i32 %odd // Insert Position 991 Instruction *InsertPos; 992 }; 993 994 /// \brief Drive the analysis of interleaved memory accesses in the loop. 995 /// 996 /// Use this class to analyze interleaved accesses only when we can vectorize 997 /// a loop. Otherwise it's meaningless to do analysis as the vectorization 998 /// on interleaved accesses is unsafe. 999 /// 1000 /// The analysis collects interleave groups and records the relationships 1001 /// between the member and the group in a map. 1002 class InterleavedAccessInfo { 1003 public: 1004 InterleavedAccessInfo(PredicatedScalarEvolution &PSE, Loop *L, 1005 DominatorTree *DT, LoopInfo *LI) 1006 : PSE(PSE), TheLoop(L), DT(DT), LI(LI), LAI(nullptr), 1007 RequiresScalarEpilogue(false) {} 1008 1009 ~InterleavedAccessInfo() { 1010 SmallSet<InterleaveGroup *, 4> DelSet; 1011 // Avoid releasing a pointer twice. 1012 for (auto &I : InterleaveGroupMap) 1013 DelSet.insert(I.second); 1014 for (auto *Ptr : DelSet) 1015 delete Ptr; 1016 } 1017 1018 /// \brief Analyze the interleaved accesses and collect them in interleave 1019 /// groups. Substitute symbolic strides using \p Strides. 1020 void analyzeInterleaving(const ValueToValueMap &Strides); 1021 1022 /// \brief Check if \p Instr belongs to any interleave group. 1023 bool isInterleaved(Instruction *Instr) const { 1024 return InterleaveGroupMap.count(Instr); 1025 } 1026 1027 /// \brief Return the maximum interleave factor of all interleaved groups. 1028 unsigned getMaxInterleaveFactor() const { 1029 unsigned MaxFactor = 1; 1030 for (auto &Entry : InterleaveGroupMap) 1031 MaxFactor = std::max(MaxFactor, Entry.second->getFactor()); 1032 return MaxFactor; 1033 } 1034 1035 /// \brief Get the interleave group that \p Instr belongs to. 1036 /// 1037 /// \returns nullptr if doesn't have such group. 1038 InterleaveGroup *getInterleaveGroup(Instruction *Instr) const { 1039 if (InterleaveGroupMap.count(Instr)) 1040 return InterleaveGroupMap.find(Instr)->second; 1041 return nullptr; 1042 } 1043 1044 /// \brief Returns true if an interleaved group that may access memory 1045 /// out-of-bounds requires a scalar epilogue iteration for correctness. 1046 bool requiresScalarEpilogue() const { return RequiresScalarEpilogue; } 1047 1048 /// \brief Initialize the LoopAccessInfo used for dependence checking. 1049 void setLAI(const LoopAccessInfo *Info) { LAI = Info; } 1050 1051 private: 1052 /// A wrapper around ScalarEvolution, used to add runtime SCEV checks. 1053 /// Simplifies SCEV expressions in the context of existing SCEV assumptions. 1054 /// The interleaved access analysis can also add new predicates (for example 1055 /// by versioning strides of pointers). 1056 PredicatedScalarEvolution &PSE; 1057 Loop *TheLoop; 1058 DominatorTree *DT; 1059 LoopInfo *LI; 1060 const LoopAccessInfo *LAI; 1061 1062 /// True if the loop may contain non-reversed interleaved groups with 1063 /// out-of-bounds accesses. We ensure we don't speculatively access memory 1064 /// out-of-bounds by executing at least one scalar epilogue iteration. 1065 bool RequiresScalarEpilogue; 1066 1067 /// Holds the relationships between the members and the interleave group. 1068 DenseMap<Instruction *, InterleaveGroup *> InterleaveGroupMap; 1069 1070 /// Holds dependences among the memory accesses in the loop. It maps a source 1071 /// access to a set of dependent sink accesses. 1072 DenseMap<Instruction *, SmallPtrSet<Instruction *, 2>> Dependences; 1073 1074 /// \brief The descriptor for a strided memory access. 1075 struct StrideDescriptor { 1076 StrideDescriptor(int64_t Stride, const SCEV *Scev, uint64_t Size, 1077 unsigned Align) 1078 : Stride(Stride), Scev(Scev), Size(Size), Align(Align) {} 1079 1080 StrideDescriptor() = default; 1081 1082 // The access's stride. It is negative for a reverse access. 1083 int64_t Stride = 0; 1084 const SCEV *Scev = nullptr; // The scalar expression of this access 1085 uint64_t Size = 0; // The size of the memory object. 1086 unsigned Align = 0; // The alignment of this access. 1087 }; 1088 1089 /// \brief A type for holding instructions and their stride descriptors. 1090 typedef std::pair<Instruction *, StrideDescriptor> StrideEntry; 1091 1092 /// \brief Create a new interleave group with the given instruction \p Instr, 1093 /// stride \p Stride and alignment \p Align. 1094 /// 1095 /// \returns the newly created interleave group. 1096 InterleaveGroup *createInterleaveGroup(Instruction *Instr, int Stride, 1097 unsigned Align) { 1098 assert(!InterleaveGroupMap.count(Instr) && 1099 "Already in an interleaved access group"); 1100 InterleaveGroupMap[Instr] = new InterleaveGroup(Instr, Stride, Align); 1101 return InterleaveGroupMap[Instr]; 1102 } 1103 1104 /// \brief Release the group and remove all the relationships. 1105 void releaseGroup(InterleaveGroup *Group) { 1106 for (unsigned i = 0; i < Group->getFactor(); i++) 1107 if (Instruction *Member = Group->getMember(i)) 1108 InterleaveGroupMap.erase(Member); 1109 1110 delete Group; 1111 } 1112 1113 /// \brief Collect all the accesses with a constant stride in program order. 1114 void collectConstStrideAccesses( 1115 MapVector<Instruction *, StrideDescriptor> &AccessStrideInfo, 1116 const ValueToValueMap &Strides); 1117 1118 /// \brief Returns true if \p Stride is allowed in an interleaved group. 1119 static bool isStrided(int Stride) { 1120 unsigned Factor = std::abs(Stride); 1121 return Factor >= 2 && Factor <= MaxInterleaveGroupFactor; 1122 } 1123 1124 /// \brief Returns true if \p BB is a predicated block. 1125 bool isPredicated(BasicBlock *BB) const { 1126 return LoopAccessInfo::blockNeedsPredication(BB, TheLoop, DT); 1127 } 1128 1129 /// \brief Returns true if LoopAccessInfo can be used for dependence queries. 1130 bool areDependencesValid() const { 1131 return LAI && LAI->getDepChecker().getDependences(); 1132 } 1133 1134 /// \brief Returns true if memory accesses \p A and \p B can be reordered, if 1135 /// necessary, when constructing interleaved groups. 1136 /// 1137 /// \p A must precede \p B in program order. We return false if reordering is 1138 /// not necessary or is prevented because \p A and \p B may be dependent. 1139 bool canReorderMemAccessesForInterleavedGroups(StrideEntry *A, 1140 StrideEntry *B) const { 1141 1142 // Code motion for interleaved accesses can potentially hoist strided loads 1143 // and sink strided stores. The code below checks the legality of the 1144 // following two conditions: 1145 // 1146 // 1. Potentially moving a strided load (B) before any store (A) that 1147 // precedes B, or 1148 // 1149 // 2. Potentially moving a strided store (A) after any load or store (B) 1150 // that A precedes. 1151 // 1152 // It's legal to reorder A and B if we know there isn't a dependence from A 1153 // to B. Note that this determination is conservative since some 1154 // dependences could potentially be reordered safely. 1155 1156 // A is potentially the source of a dependence. 1157 auto *Src = A->first; 1158 auto SrcDes = A->second; 1159 1160 // B is potentially the sink of a dependence. 1161 auto *Sink = B->first; 1162 auto SinkDes = B->second; 1163 1164 // Code motion for interleaved accesses can't violate WAR dependences. 1165 // Thus, reordering is legal if the source isn't a write. 1166 if (!Src->mayWriteToMemory()) 1167 return true; 1168 1169 // At least one of the accesses must be strided. 1170 if (!isStrided(SrcDes.Stride) && !isStrided(SinkDes.Stride)) 1171 return true; 1172 1173 // If dependence information is not available from LoopAccessInfo, 1174 // conservatively assume the instructions can't be reordered. 1175 if (!areDependencesValid()) 1176 return false; 1177 1178 // If we know there is a dependence from source to sink, assume the 1179 // instructions can't be reordered. Otherwise, reordering is legal. 1180 return !Dependences.count(Src) || !Dependences.lookup(Src).count(Sink); 1181 } 1182 1183 /// \brief Collect the dependences from LoopAccessInfo. 1184 /// 1185 /// We process the dependences once during the interleaved access analysis to 1186 /// enable constant-time dependence queries. 1187 void collectDependences() { 1188 if (!areDependencesValid()) 1189 return; 1190 auto *Deps = LAI->getDepChecker().getDependences(); 1191 for (auto Dep : *Deps) 1192 Dependences[Dep.getSource(*LAI)].insert(Dep.getDestination(*LAI)); 1193 } 1194 }; 1195 1196 /// Utility class for getting and setting loop vectorizer hints in the form 1197 /// of loop metadata. 1198 /// This class keeps a number of loop annotations locally (as member variables) 1199 /// and can, upon request, write them back as metadata on the loop. It will 1200 /// initially scan the loop for existing metadata, and will update the local 1201 /// values based on information in the loop. 1202 /// We cannot write all values to metadata, as the mere presence of some info, 1203 /// for example 'force', means a decision has been made. So, we need to be 1204 /// careful NOT to add them if the user hasn't specifically asked so. 1205 class LoopVectorizeHints { 1206 enum HintKind { HK_WIDTH, HK_UNROLL, HK_FORCE }; 1207 1208 /// Hint - associates name and validation with the hint value. 1209 struct Hint { 1210 const char *Name; 1211 unsigned Value; // This may have to change for non-numeric values. 1212 HintKind Kind; 1213 1214 Hint(const char *Name, unsigned Value, HintKind Kind) 1215 : Name(Name), Value(Value), Kind(Kind) {} 1216 1217 bool validate(unsigned Val) { 1218 switch (Kind) { 1219 case HK_WIDTH: 1220 return isPowerOf2_32(Val) && Val <= VectorizerParams::MaxVectorWidth; 1221 case HK_UNROLL: 1222 return isPowerOf2_32(Val) && Val <= MaxInterleaveFactor; 1223 case HK_FORCE: 1224 return (Val <= 1); 1225 } 1226 return false; 1227 } 1228 }; 1229 1230 /// Vectorization width. 1231 Hint Width; 1232 /// Vectorization interleave factor. 1233 Hint Interleave; 1234 /// Vectorization forced 1235 Hint Force; 1236 1237 /// Return the loop metadata prefix. 1238 static StringRef Prefix() { return "llvm.loop."; } 1239 1240 /// True if there is any unsafe math in the loop. 1241 bool PotentiallyUnsafe; 1242 1243 public: 1244 enum ForceKind { 1245 FK_Undefined = -1, ///< Not selected. 1246 FK_Disabled = 0, ///< Forcing disabled. 1247 FK_Enabled = 1, ///< Forcing enabled. 1248 }; 1249 1250 LoopVectorizeHints(const Loop *L, bool DisableInterleaving, 1251 OptimizationRemarkEmitter &ORE) 1252 : Width("vectorize.width", VectorizerParams::VectorizationFactor, 1253 HK_WIDTH), 1254 Interleave("interleave.count", DisableInterleaving, HK_UNROLL), 1255 Force("vectorize.enable", FK_Undefined, HK_FORCE), 1256 PotentiallyUnsafe(false), TheLoop(L), ORE(ORE) { 1257 // Populate values with existing loop metadata. 1258 getHintsFromMetadata(); 1259 1260 // force-vector-interleave overrides DisableInterleaving. 1261 if (VectorizerParams::isInterleaveForced()) 1262 Interleave.Value = VectorizerParams::VectorizationInterleave; 1263 1264 DEBUG(if (DisableInterleaving && Interleave.Value == 1) dbgs() 1265 << "LV: Interleaving disabled by the pass manager\n"); 1266 } 1267 1268 /// Mark the loop L as already vectorized by setting the width to 1. 1269 void setAlreadyVectorized() { 1270 Width.Value = Interleave.Value = 1; 1271 Hint Hints[] = {Width, Interleave}; 1272 writeHintsToMetadata(Hints); 1273 } 1274 1275 bool allowVectorization(Function *F, Loop *L, bool AlwaysVectorize) const { 1276 if (getForce() == LoopVectorizeHints::FK_Disabled) { 1277 DEBUG(dbgs() << "LV: Not vectorizing: #pragma vectorize disable.\n"); 1278 emitRemarkWithHints(); 1279 return false; 1280 } 1281 1282 if (!AlwaysVectorize && getForce() != LoopVectorizeHints::FK_Enabled) { 1283 DEBUG(dbgs() << "LV: Not vectorizing: No #pragma vectorize enable.\n"); 1284 emitRemarkWithHints(); 1285 return false; 1286 } 1287 1288 if (getWidth() == 1 && getInterleave() == 1) { 1289 // FIXME: Add a separate metadata to indicate when the loop has already 1290 // been vectorized instead of setting width and count to 1. 1291 DEBUG(dbgs() << "LV: Not vectorizing: Disabled/already vectorized.\n"); 1292 // FIXME: Add interleave.disable metadata. This will allow 1293 // vectorize.disable to be used without disabling the pass and errors 1294 // to differentiate between disabled vectorization and a width of 1. 1295 ORE.emit(OptimizationRemarkAnalysis(vectorizeAnalysisPassName(), 1296 "AllDisabled", L->getStartLoc(), 1297 L->getHeader()) 1298 << "loop not vectorized: vectorization and interleaving are " 1299 "explicitly disabled, or vectorize width and interleave " 1300 "count are both set to 1"); 1301 return false; 1302 } 1303 1304 return true; 1305 } 1306 1307 /// Dumps all the hint information. 1308 void emitRemarkWithHints() const { 1309 using namespace ore; 1310 if (Force.Value == LoopVectorizeHints::FK_Disabled) 1311 ORE.emit(OptimizationRemarkMissed(LV_NAME, "MissedExplicitlyDisabled", 1312 TheLoop->getStartLoc(), 1313 TheLoop->getHeader()) 1314 << "loop not vectorized: vectorization is explicitly disabled"); 1315 else { 1316 OptimizationRemarkMissed R(LV_NAME, "MissedDetails", 1317 TheLoop->getStartLoc(), TheLoop->getHeader()); 1318 R << "loop not vectorized: use -Rpass-analysis=loop-vectorize for more " 1319 "info"; 1320 if (Force.Value == LoopVectorizeHints::FK_Enabled) { 1321 R << " (Force=" << NV("Force", true); 1322 if (Width.Value != 0) 1323 R << ", Vector Width=" << NV("VectorWidth", Width.Value); 1324 if (Interleave.Value != 0) 1325 R << ", Interleave Count=" << NV("InterleaveCount", Interleave.Value); 1326 R << ")"; 1327 } 1328 ORE.emit(R); 1329 } 1330 } 1331 1332 unsigned getWidth() const { return Width.Value; } 1333 unsigned getInterleave() const { return Interleave.Value; } 1334 enum ForceKind getForce() const { return (ForceKind)Force.Value; } 1335 1336 /// \brief If hints are provided that force vectorization, use the AlwaysPrint 1337 /// pass name to force the frontend to print the diagnostic. 1338 const char *vectorizeAnalysisPassName() const { 1339 if (getWidth() == 1) 1340 return LV_NAME; 1341 if (getForce() == LoopVectorizeHints::FK_Disabled) 1342 return LV_NAME; 1343 if (getForce() == LoopVectorizeHints::FK_Undefined && getWidth() == 0) 1344 return LV_NAME; 1345 return OptimizationRemarkAnalysis::AlwaysPrint; 1346 } 1347 1348 bool allowReordering() const { 1349 // When enabling loop hints are provided we allow the vectorizer to change 1350 // the order of operations that is given by the scalar loop. This is not 1351 // enabled by default because can be unsafe or inefficient. For example, 1352 // reordering floating-point operations will change the way round-off 1353 // error accumulates in the loop. 1354 return getForce() == LoopVectorizeHints::FK_Enabled || getWidth() > 1; 1355 } 1356 1357 bool isPotentiallyUnsafe() const { 1358 // Avoid FP vectorization if the target is unsure about proper support. 1359 // This may be related to the SIMD unit in the target not handling 1360 // IEEE 754 FP ops properly, or bad single-to-double promotions. 1361 // Otherwise, a sequence of vectorized loops, even without reduction, 1362 // could lead to different end results on the destination vectors. 1363 return getForce() != LoopVectorizeHints::FK_Enabled && PotentiallyUnsafe; 1364 } 1365 1366 void setPotentiallyUnsafe() { PotentiallyUnsafe = true; } 1367 1368 private: 1369 /// Find hints specified in the loop metadata and update local values. 1370 void getHintsFromMetadata() { 1371 MDNode *LoopID = TheLoop->getLoopID(); 1372 if (!LoopID) 1373 return; 1374 1375 // First operand should refer to the loop id itself. 1376 assert(LoopID->getNumOperands() > 0 && "requires at least one operand"); 1377 assert(LoopID->getOperand(0) == LoopID && "invalid loop id"); 1378 1379 for (unsigned i = 1, ie = LoopID->getNumOperands(); i < ie; ++i) { 1380 const MDString *S = nullptr; 1381 SmallVector<Metadata *, 4> Args; 1382 1383 // The expected hint is either a MDString or a MDNode with the first 1384 // operand a MDString. 1385 if (const MDNode *MD = dyn_cast<MDNode>(LoopID->getOperand(i))) { 1386 if (!MD || MD->getNumOperands() == 0) 1387 continue; 1388 S = dyn_cast<MDString>(MD->getOperand(0)); 1389 for (unsigned i = 1, ie = MD->getNumOperands(); i < ie; ++i) 1390 Args.push_back(MD->getOperand(i)); 1391 } else { 1392 S = dyn_cast<MDString>(LoopID->getOperand(i)); 1393 assert(Args.size() == 0 && "too many arguments for MDString"); 1394 } 1395 1396 if (!S) 1397 continue; 1398 1399 // Check if the hint starts with the loop metadata prefix. 1400 StringRef Name = S->getString(); 1401 if (Args.size() == 1) 1402 setHint(Name, Args[0]); 1403 } 1404 } 1405 1406 /// Checks string hint with one operand and set value if valid. 1407 void setHint(StringRef Name, Metadata *Arg) { 1408 if (!Name.startswith(Prefix())) 1409 return; 1410 Name = Name.substr(Prefix().size(), StringRef::npos); 1411 1412 const ConstantInt *C = mdconst::dyn_extract<ConstantInt>(Arg); 1413 if (!C) 1414 return; 1415 unsigned Val = C->getZExtValue(); 1416 1417 Hint *Hints[] = {&Width, &Interleave, &Force}; 1418 for (auto H : Hints) { 1419 if (Name == H->Name) { 1420 if (H->validate(Val)) 1421 H->Value = Val; 1422 else 1423 DEBUG(dbgs() << "LV: ignoring invalid hint '" << Name << "'\n"); 1424 break; 1425 } 1426 } 1427 } 1428 1429 /// Create a new hint from name / value pair. 1430 MDNode *createHintMetadata(StringRef Name, unsigned V) const { 1431 LLVMContext &Context = TheLoop->getHeader()->getContext(); 1432 Metadata *MDs[] = {MDString::get(Context, Name), 1433 ConstantAsMetadata::get( 1434 ConstantInt::get(Type::getInt32Ty(Context), V))}; 1435 return MDNode::get(Context, MDs); 1436 } 1437 1438 /// Matches metadata with hint name. 1439 bool matchesHintMetadataName(MDNode *Node, ArrayRef<Hint> HintTypes) { 1440 MDString *Name = dyn_cast<MDString>(Node->getOperand(0)); 1441 if (!Name) 1442 return false; 1443 1444 for (auto H : HintTypes) 1445 if (Name->getString().endswith(H.Name)) 1446 return true; 1447 return false; 1448 } 1449 1450 /// Sets current hints into loop metadata, keeping other values intact. 1451 void writeHintsToMetadata(ArrayRef<Hint> HintTypes) { 1452 if (HintTypes.size() == 0) 1453 return; 1454 1455 // Reserve the first element to LoopID (see below). 1456 SmallVector<Metadata *, 4> MDs(1); 1457 // If the loop already has metadata, then ignore the existing operands. 1458 MDNode *LoopID = TheLoop->getLoopID(); 1459 if (LoopID) { 1460 for (unsigned i = 1, ie = LoopID->getNumOperands(); i < ie; ++i) { 1461 MDNode *Node = cast<MDNode>(LoopID->getOperand(i)); 1462 // If node in update list, ignore old value. 1463 if (!matchesHintMetadataName(Node, HintTypes)) 1464 MDs.push_back(Node); 1465 } 1466 } 1467 1468 // Now, add the missing hints. 1469 for (auto H : HintTypes) 1470 MDs.push_back(createHintMetadata(Twine(Prefix(), H.Name).str(), H.Value)); 1471 1472 // Replace current metadata node with new one. 1473 LLVMContext &Context = TheLoop->getHeader()->getContext(); 1474 MDNode *NewLoopID = MDNode::get(Context, MDs); 1475 // Set operand 0 to refer to the loop id itself. 1476 NewLoopID->replaceOperandWith(0, NewLoopID); 1477 1478 TheLoop->setLoopID(NewLoopID); 1479 } 1480 1481 /// The loop these hints belong to. 1482 const Loop *TheLoop; 1483 1484 /// Interface to emit optimization remarks. 1485 OptimizationRemarkEmitter &ORE; 1486 }; 1487 1488 static void emitAnalysisDiag(const Loop *TheLoop, 1489 const LoopVectorizeHints &Hints, 1490 OptimizationRemarkEmitter &ORE, 1491 const LoopAccessReport &Message) { 1492 const char *Name = Hints.vectorizeAnalysisPassName(); 1493 LoopAccessReport::emitAnalysis(Message, TheLoop, Name, ORE); 1494 } 1495 1496 static void emitMissedWarning(Function *F, Loop *L, 1497 const LoopVectorizeHints &LH, 1498 OptimizationRemarkEmitter *ORE) { 1499 LH.emitRemarkWithHints(); 1500 1501 if (LH.getForce() == LoopVectorizeHints::FK_Enabled) { 1502 if (LH.getWidth() != 1) 1503 emitLoopVectorizeWarning( 1504 F->getContext(), *F, L->getStartLoc(), 1505 "failed explicitly specified loop vectorization"); 1506 else if (LH.getInterleave() != 1) 1507 emitLoopInterleaveWarning( 1508 F->getContext(), *F, L->getStartLoc(), 1509 "failed explicitly specified loop interleaving"); 1510 } 1511 } 1512 1513 /// LoopVectorizationLegality checks if it is legal to vectorize a loop, and 1514 /// to what vectorization factor. 1515 /// This class does not look at the profitability of vectorization, only the 1516 /// legality. This class has two main kinds of checks: 1517 /// * Memory checks - The code in canVectorizeMemory checks if vectorization 1518 /// will change the order of memory accesses in a way that will change the 1519 /// correctness of the program. 1520 /// * Scalars checks - The code in canVectorizeInstrs and canVectorizeMemory 1521 /// checks for a number of different conditions, such as the availability of a 1522 /// single induction variable, that all types are supported and vectorize-able, 1523 /// etc. This code reflects the capabilities of InnerLoopVectorizer. 1524 /// This class is also used by InnerLoopVectorizer for identifying 1525 /// induction variable and the different reduction variables. 1526 class LoopVectorizationLegality { 1527 public: 1528 LoopVectorizationLegality( 1529 Loop *L, PredicatedScalarEvolution &PSE, DominatorTree *DT, 1530 TargetLibraryInfo *TLI, AliasAnalysis *AA, Function *F, 1531 const TargetTransformInfo *TTI, 1532 std::function<const LoopAccessInfo &(Loop &)> *GetLAA, LoopInfo *LI, 1533 OptimizationRemarkEmitter *ORE, LoopVectorizationRequirements *R, 1534 LoopVectorizeHints *H) 1535 : NumPredStores(0), TheLoop(L), PSE(PSE), TLI(TLI), TTI(TTI), DT(DT), 1536 GetLAA(GetLAA), LAI(nullptr), ORE(ORE), InterleaveInfo(PSE, L, DT, LI), 1537 Induction(nullptr), WidestIndTy(nullptr), HasFunNoNaNAttr(false), 1538 Requirements(R), Hints(H) {} 1539 1540 /// ReductionList contains the reduction descriptors for all 1541 /// of the reductions that were found in the loop. 1542 typedef DenseMap<PHINode *, RecurrenceDescriptor> ReductionList; 1543 1544 /// InductionList saves induction variables and maps them to the 1545 /// induction descriptor. 1546 typedef MapVector<PHINode *, InductionDescriptor> InductionList; 1547 1548 /// RecurrenceSet contains the phi nodes that are recurrences other than 1549 /// inductions and reductions. 1550 typedef SmallPtrSet<const PHINode *, 8> RecurrenceSet; 1551 1552 /// Returns true if it is legal to vectorize this loop. 1553 /// This does not mean that it is profitable to vectorize this 1554 /// loop, only that it is legal to do so. 1555 bool canVectorize(); 1556 1557 /// Returns the Induction variable. 1558 PHINode *getInduction() { return Induction; } 1559 1560 /// Returns the reduction variables found in the loop. 1561 ReductionList *getReductionVars() { return &Reductions; } 1562 1563 /// Returns the induction variables found in the loop. 1564 InductionList *getInductionVars() { return &Inductions; } 1565 1566 /// Return the first-order recurrences found in the loop. 1567 RecurrenceSet *getFirstOrderRecurrences() { return &FirstOrderRecurrences; } 1568 1569 /// Returns the widest induction type. 1570 Type *getWidestInductionType() { return WidestIndTy; } 1571 1572 /// Returns True if V is an induction variable in this loop. 1573 bool isInductionVariable(const Value *V); 1574 1575 /// Returns True if PN is a reduction variable in this loop. 1576 bool isReductionVariable(PHINode *PN) { return Reductions.count(PN); } 1577 1578 /// Returns True if Phi is a first-order recurrence in this loop. 1579 bool isFirstOrderRecurrence(const PHINode *Phi); 1580 1581 /// Return true if the block BB needs to be predicated in order for the loop 1582 /// to be vectorized. 1583 bool blockNeedsPredication(BasicBlock *BB); 1584 1585 /// Check if this pointer is consecutive when vectorizing. This happens 1586 /// when the last index of the GEP is the induction variable, or that the 1587 /// pointer itself is an induction variable. 1588 /// This check allows us to vectorize A[idx] into a wide load/store. 1589 /// Returns: 1590 /// 0 - Stride is unknown or non-consecutive. 1591 /// 1 - Address is consecutive. 1592 /// -1 - Address is consecutive, and decreasing. 1593 int isConsecutivePtr(Value *Ptr); 1594 1595 /// Returns true if the value V is uniform within the loop. 1596 bool isUniform(Value *V); 1597 1598 /// Returns true if \p I is known to be uniform after vectorization. 1599 bool isUniformAfterVectorization(Instruction *I) { return Uniforms.count(I); } 1600 1601 /// Returns true if \p I is known to be scalar after vectorization. 1602 bool isScalarAfterVectorization(Instruction *I) { return Scalars.count(I); } 1603 1604 /// Returns the information that we collected about runtime memory check. 1605 const RuntimePointerChecking *getRuntimePointerChecking() const { 1606 return LAI->getRuntimePointerChecking(); 1607 } 1608 1609 const LoopAccessInfo *getLAI() const { return LAI; } 1610 1611 /// \brief Check if \p Instr belongs to any interleaved access group. 1612 bool isAccessInterleaved(Instruction *Instr) { 1613 return InterleaveInfo.isInterleaved(Instr); 1614 } 1615 1616 /// \brief Return the maximum interleave factor of all interleaved groups. 1617 unsigned getMaxInterleaveFactor() const { 1618 return InterleaveInfo.getMaxInterleaveFactor(); 1619 } 1620 1621 /// \brief Get the interleaved access group that \p Instr belongs to. 1622 const InterleaveGroup *getInterleavedAccessGroup(Instruction *Instr) { 1623 return InterleaveInfo.getInterleaveGroup(Instr); 1624 } 1625 1626 /// \brief Returns true if an interleaved group requires a scalar iteration 1627 /// to handle accesses with gaps. 1628 bool requiresScalarEpilogue() const { 1629 return InterleaveInfo.requiresScalarEpilogue(); 1630 } 1631 1632 unsigned getMaxSafeDepDistBytes() { return LAI->getMaxSafeDepDistBytes(); } 1633 1634 bool hasStride(Value *V) { return LAI->hasStride(V); } 1635 1636 /// Returns true if the target machine supports masked store operation 1637 /// for the given \p DataType and kind of access to \p Ptr. 1638 bool isLegalMaskedStore(Type *DataType, Value *Ptr) { 1639 return isConsecutivePtr(Ptr) && TTI->isLegalMaskedStore(DataType); 1640 } 1641 /// Returns true if the target machine supports masked load operation 1642 /// for the given \p DataType and kind of access to \p Ptr. 1643 bool isLegalMaskedLoad(Type *DataType, Value *Ptr) { 1644 return isConsecutivePtr(Ptr) && TTI->isLegalMaskedLoad(DataType); 1645 } 1646 /// Returns true if the target machine supports masked scatter operation 1647 /// for the given \p DataType. 1648 bool isLegalMaskedScatter(Type *DataType) { 1649 return TTI->isLegalMaskedScatter(DataType); 1650 } 1651 /// Returns true if the target machine supports masked gather operation 1652 /// for the given \p DataType. 1653 bool isLegalMaskedGather(Type *DataType) { 1654 return TTI->isLegalMaskedGather(DataType); 1655 } 1656 /// Returns true if the target machine can represent \p V as a masked gather 1657 /// or scatter operation. 1658 bool isLegalGatherOrScatter(Value *V) { 1659 auto *LI = dyn_cast<LoadInst>(V); 1660 auto *SI = dyn_cast<StoreInst>(V); 1661 if (!LI && !SI) 1662 return false; 1663 auto *Ptr = getPointerOperand(V); 1664 auto *Ty = cast<PointerType>(Ptr->getType())->getElementType(); 1665 return (LI && isLegalMaskedGather(Ty)) || (SI && isLegalMaskedScatter(Ty)); 1666 } 1667 1668 /// Returns true if vector representation of the instruction \p I 1669 /// requires mask. 1670 bool isMaskRequired(const Instruction *I) { return (MaskedOp.count(I) != 0); } 1671 unsigned getNumStores() const { return LAI->getNumStores(); } 1672 unsigned getNumLoads() const { return LAI->getNumLoads(); } 1673 unsigned getNumPredStores() const { return NumPredStores; } 1674 1675 /// Returns true if \p I is an instruction that will be scalarized with 1676 /// predication. Such instructions include conditional stores and 1677 /// instructions that may divide by zero. 1678 bool isScalarWithPredication(Instruction *I); 1679 1680 /// Returns true if \p I is a memory instruction that has a consecutive or 1681 /// consecutive-like pointer operand. Consecutive-like pointers are pointers 1682 /// that are treated like consecutive pointers during vectorization. The 1683 /// pointer operands of interleaved accesses are an example. 1684 bool hasConsecutiveLikePtrOperand(Instruction *I); 1685 1686 /// Returns true if \p I is a memory instruction that must be scalarized 1687 /// during vectorization. 1688 bool memoryInstructionMustBeScalarized(Instruction *I, unsigned VF = 1); 1689 1690 private: 1691 /// Check if a single basic block loop is vectorizable. 1692 /// At this point we know that this is a loop with a constant trip count 1693 /// and we only need to check individual instructions. 1694 bool canVectorizeInstrs(); 1695 1696 /// When we vectorize loops we may change the order in which 1697 /// we read and write from memory. This method checks if it is 1698 /// legal to vectorize the code, considering only memory constrains. 1699 /// Returns true if the loop is vectorizable 1700 bool canVectorizeMemory(); 1701 1702 /// Return true if we can vectorize this loop using the IF-conversion 1703 /// transformation. 1704 bool canVectorizeWithIfConvert(); 1705 1706 /// Collect the instructions that are uniform after vectorization. An 1707 /// instruction is uniform if we represent it with a single scalar value in 1708 /// the vectorized loop corresponding to each vector iteration. Examples of 1709 /// uniform instructions include pointer operands of consecutive or 1710 /// interleaved memory accesses. Note that although uniformity implies an 1711 /// instruction will be scalar, the reverse is not true. In general, a 1712 /// scalarized instruction will be represented by VF scalar values in the 1713 /// vectorized loop, each corresponding to an iteration of the original 1714 /// scalar loop. 1715 void collectLoopUniforms(); 1716 1717 /// Collect the instructions that are scalar after vectorization. An 1718 /// instruction is scalar if it is known to be uniform or will be scalarized 1719 /// during vectorization. Non-uniform scalarized instructions will be 1720 /// represented by VF values in the vectorized loop, each corresponding to an 1721 /// iteration of the original scalar loop. 1722 void collectLoopScalars(); 1723 1724 /// Return true if all of the instructions in the block can be speculatively 1725 /// executed. \p SafePtrs is a list of addresses that are known to be legal 1726 /// and we know that we can read from them without segfault. 1727 bool blockCanBePredicated(BasicBlock *BB, SmallPtrSetImpl<Value *> &SafePtrs); 1728 1729 /// Updates the vectorization state by adding \p Phi to the inductions list. 1730 /// This can set \p Phi as the main induction of the loop if \p Phi is a 1731 /// better choice for the main induction than the existing one. 1732 void addInductionPhi(PHINode *Phi, const InductionDescriptor &ID, 1733 SmallPtrSetImpl<Value *> &AllowedExit); 1734 1735 /// Report an analysis message to assist the user in diagnosing loops that are 1736 /// not vectorized. These are handled as LoopAccessReport rather than 1737 /// VectorizationReport because the << operator of VectorizationReport returns 1738 /// LoopAccessReport. 1739 void emitAnalysis(const LoopAccessReport &Message) const { 1740 emitAnalysisDiag(TheLoop, *Hints, *ORE, Message); 1741 } 1742 1743 /// Create an analysis remark that explains why vectorization failed 1744 /// 1745 /// \p RemarkName is the identifier for the remark. If \p I is passed it is 1746 /// an instruction that prevents vectorization. Otherwise the loop is used 1747 /// for the location of the remark. \return the remark object that can be 1748 /// streamed to. 1749 OptimizationRemarkAnalysis 1750 createMissedAnalysis(StringRef RemarkName, Instruction *I = nullptr) const { 1751 return ::createMissedAnalysis(Hints->vectorizeAnalysisPassName(), 1752 RemarkName, TheLoop, I); 1753 } 1754 1755 /// \brief If an access has a symbolic strides, this maps the pointer value to 1756 /// the stride symbol. 1757 const ValueToValueMap *getSymbolicStrides() { 1758 // FIXME: Currently, the set of symbolic strides is sometimes queried before 1759 // it's collected. This happens from canVectorizeWithIfConvert, when the 1760 // pointer is checked to reference consecutive elements suitable for a 1761 // masked access. 1762 return LAI ? &LAI->getSymbolicStrides() : nullptr; 1763 } 1764 1765 unsigned NumPredStores; 1766 1767 /// The loop that we evaluate. 1768 Loop *TheLoop; 1769 /// A wrapper around ScalarEvolution used to add runtime SCEV checks. 1770 /// Applies dynamic knowledge to simplify SCEV expressions in the context 1771 /// of existing SCEV assumptions. The analysis will also add a minimal set 1772 /// of new predicates if this is required to enable vectorization and 1773 /// unrolling. 1774 PredicatedScalarEvolution &PSE; 1775 /// Target Library Info. 1776 TargetLibraryInfo *TLI; 1777 /// Target Transform Info 1778 const TargetTransformInfo *TTI; 1779 /// Dominator Tree. 1780 DominatorTree *DT; 1781 // LoopAccess analysis. 1782 std::function<const LoopAccessInfo &(Loop &)> *GetLAA; 1783 // And the loop-accesses info corresponding to this loop. This pointer is 1784 // null until canVectorizeMemory sets it up. 1785 const LoopAccessInfo *LAI; 1786 /// Interface to emit optimization remarks. 1787 OptimizationRemarkEmitter *ORE; 1788 1789 /// The interleave access information contains groups of interleaved accesses 1790 /// with the same stride and close to each other. 1791 InterleavedAccessInfo InterleaveInfo; 1792 1793 // --- vectorization state --- // 1794 1795 /// Holds the integer induction variable. This is the counter of the 1796 /// loop. 1797 PHINode *Induction; 1798 /// Holds the reduction variables. 1799 ReductionList Reductions; 1800 /// Holds all of the induction variables that we found in the loop. 1801 /// Notice that inductions don't need to start at zero and that induction 1802 /// variables can be pointers. 1803 InductionList Inductions; 1804 /// Holds the phi nodes that are first-order recurrences. 1805 RecurrenceSet FirstOrderRecurrences; 1806 /// Holds the widest induction type encountered. 1807 Type *WidestIndTy; 1808 1809 /// Allowed outside users. This holds the induction and reduction 1810 /// vars which can be accessed from outside the loop. 1811 SmallPtrSet<Value *, 4> AllowedExit; 1812 1813 /// Holds the instructions known to be uniform after vectorization. 1814 SmallPtrSet<Instruction *, 4> Uniforms; 1815 1816 /// Holds the instructions known to be scalar after vectorization. 1817 SmallPtrSet<Instruction *, 4> Scalars; 1818 1819 /// Can we assume the absence of NaNs. 1820 bool HasFunNoNaNAttr; 1821 1822 /// Vectorization requirements that will go through late-evaluation. 1823 LoopVectorizationRequirements *Requirements; 1824 1825 /// Used to emit an analysis of any legality issues. 1826 LoopVectorizeHints *Hints; 1827 1828 /// While vectorizing these instructions we have to generate a 1829 /// call to the appropriate masked intrinsic 1830 SmallPtrSet<const Instruction *, 8> MaskedOp; 1831 }; 1832 1833 /// LoopVectorizationCostModel - estimates the expected speedups due to 1834 /// vectorization. 1835 /// In many cases vectorization is not profitable. This can happen because of 1836 /// a number of reasons. In this class we mainly attempt to predict the 1837 /// expected speedup/slowdowns due to the supported instruction set. We use the 1838 /// TargetTransformInfo to query the different backends for the cost of 1839 /// different operations. 1840 class LoopVectorizationCostModel { 1841 public: 1842 LoopVectorizationCostModel(Loop *L, PredicatedScalarEvolution &PSE, 1843 LoopInfo *LI, LoopVectorizationLegality *Legal, 1844 const TargetTransformInfo &TTI, 1845 const TargetLibraryInfo *TLI, DemandedBits *DB, 1846 AssumptionCache *AC, 1847 OptimizationRemarkEmitter *ORE, const Function *F, 1848 const LoopVectorizeHints *Hints) 1849 : TheLoop(L), PSE(PSE), LI(LI), Legal(Legal), TTI(TTI), TLI(TLI), DB(DB), 1850 AC(AC), ORE(ORE), TheFunction(F), Hints(Hints) {} 1851 1852 /// Information about vectorization costs 1853 struct VectorizationFactor { 1854 unsigned Width; // Vector width with best cost 1855 unsigned Cost; // Cost of the loop with that width 1856 }; 1857 /// \return The most profitable vectorization factor and the cost of that VF. 1858 /// This method checks every power of two up to VF. If UserVF is not ZERO 1859 /// then this vectorization factor will be selected if vectorization is 1860 /// possible. 1861 VectorizationFactor selectVectorizationFactor(bool OptForSize); 1862 1863 /// \return The size (in bits) of the smallest and widest types in the code 1864 /// that needs to be vectorized. We ignore values that remain scalar such as 1865 /// 64 bit loop indices. 1866 std::pair<unsigned, unsigned> getSmallestAndWidestTypes(); 1867 1868 /// \return The desired interleave count. 1869 /// If interleave count has been specified by metadata it will be returned. 1870 /// Otherwise, the interleave count is computed and returned. VF and LoopCost 1871 /// are the selected vectorization factor and the cost of the selected VF. 1872 unsigned selectInterleaveCount(bool OptForSize, unsigned VF, 1873 unsigned LoopCost); 1874 1875 /// \return The most profitable unroll factor. 1876 /// This method finds the best unroll-factor based on register pressure and 1877 /// other parameters. VF and LoopCost are the selected vectorization factor 1878 /// and the cost of the selected VF. 1879 unsigned computeInterleaveCount(bool OptForSize, unsigned VF, 1880 unsigned LoopCost); 1881 1882 /// \brief A struct that represents some properties of the register usage 1883 /// of a loop. 1884 struct RegisterUsage { 1885 /// Holds the number of loop invariant values that are used in the loop. 1886 unsigned LoopInvariantRegs; 1887 /// Holds the maximum number of concurrent live intervals in the loop. 1888 unsigned MaxLocalUsers; 1889 /// Holds the number of instructions in the loop. 1890 unsigned NumInstructions; 1891 }; 1892 1893 /// \return Returns information about the register usages of the loop for the 1894 /// given vectorization factors. 1895 SmallVector<RegisterUsage, 8> calculateRegisterUsage(ArrayRef<unsigned> VFs); 1896 1897 /// Collect values we want to ignore in the cost model. 1898 void collectValuesToIgnore(); 1899 1900 /// \returns The smallest bitwidth each instruction can be represented with. 1901 /// The vector equivalents of these instructions should be truncated to this 1902 /// type. 1903 const MapVector<Instruction *, uint64_t> &getMinimalBitwidths() const { 1904 return MinBWs; 1905 } 1906 1907 private: 1908 /// The vectorization cost is a combination of the cost itself and a boolean 1909 /// indicating whether any of the contributing operations will actually 1910 /// operate on 1911 /// vector values after type legalization in the backend. If this latter value 1912 /// is 1913 /// false, then all operations will be scalarized (i.e. no vectorization has 1914 /// actually taken place). 1915 typedef std::pair<unsigned, bool> VectorizationCostTy; 1916 1917 /// Returns the expected execution cost. The unit of the cost does 1918 /// not matter because we use the 'cost' units to compare different 1919 /// vector widths. The cost that is returned is *not* normalized by 1920 /// the factor width. 1921 VectorizationCostTy expectedCost(unsigned VF); 1922 1923 /// Returns the execution time cost of an instruction for a given vector 1924 /// width. Vector width of one means scalar. 1925 VectorizationCostTy getInstructionCost(Instruction *I, unsigned VF); 1926 1927 /// The cost-computation logic from getInstructionCost which provides 1928 /// the vector type as an output parameter. 1929 unsigned getInstructionCost(Instruction *I, unsigned VF, Type *&VectorTy); 1930 1931 /// Returns whether the instruction is a load or store and will be a emitted 1932 /// as a vector operation. 1933 bool isConsecutiveLoadOrStore(Instruction *I); 1934 1935 /// Create an analysis remark that explains why vectorization failed 1936 /// 1937 /// \p RemarkName is the identifier for the remark. \return the remark object 1938 /// that can be streamed to. 1939 OptimizationRemarkAnalysis createMissedAnalysis(StringRef RemarkName) { 1940 return ::createMissedAnalysis(Hints->vectorizeAnalysisPassName(), 1941 RemarkName, TheLoop); 1942 } 1943 1944 /// Map of scalar integer values to the smallest bitwidth they can be legally 1945 /// represented as. The vector equivalents of these values should be truncated 1946 /// to this type. 1947 MapVector<Instruction *, uint64_t> MinBWs; 1948 1949 public: 1950 /// The loop that we evaluate. 1951 Loop *TheLoop; 1952 /// Predicated scalar evolution analysis. 1953 PredicatedScalarEvolution &PSE; 1954 /// Loop Info analysis. 1955 LoopInfo *LI; 1956 /// Vectorization legality. 1957 LoopVectorizationLegality *Legal; 1958 /// Vector target information. 1959 const TargetTransformInfo &TTI; 1960 /// Target Library Info. 1961 const TargetLibraryInfo *TLI; 1962 /// Demanded bits analysis. 1963 DemandedBits *DB; 1964 /// Assumption cache. 1965 AssumptionCache *AC; 1966 /// Interface to emit optimization remarks. 1967 OptimizationRemarkEmitter *ORE; 1968 1969 const Function *TheFunction; 1970 /// Loop Vectorize Hint. 1971 const LoopVectorizeHints *Hints; 1972 /// Values to ignore in the cost model. 1973 SmallPtrSet<const Value *, 16> ValuesToIgnore; 1974 /// Values to ignore in the cost model when VF > 1. 1975 SmallPtrSet<const Value *, 16> VecValuesToIgnore; 1976 }; 1977 1978 /// \brief This holds vectorization requirements that must be verified late in 1979 /// the process. The requirements are set by legalize and costmodel. Once 1980 /// vectorization has been determined to be possible and profitable the 1981 /// requirements can be verified by looking for metadata or compiler options. 1982 /// For example, some loops require FP commutativity which is only allowed if 1983 /// vectorization is explicitly specified or if the fast-math compiler option 1984 /// has been provided. 1985 /// Late evaluation of these requirements allows helpful diagnostics to be 1986 /// composed that tells the user what need to be done to vectorize the loop. For 1987 /// example, by specifying #pragma clang loop vectorize or -ffast-math. Late 1988 /// evaluation should be used only when diagnostics can generated that can be 1989 /// followed by a non-expert user. 1990 class LoopVectorizationRequirements { 1991 public: 1992 LoopVectorizationRequirements(OptimizationRemarkEmitter &ORE) 1993 : NumRuntimePointerChecks(0), UnsafeAlgebraInst(nullptr), ORE(ORE) {} 1994 1995 void addUnsafeAlgebraInst(Instruction *I) { 1996 // First unsafe algebra instruction. 1997 if (!UnsafeAlgebraInst) 1998 UnsafeAlgebraInst = I; 1999 } 2000 2001 void addRuntimePointerChecks(unsigned Num) { NumRuntimePointerChecks = Num; } 2002 2003 bool doesNotMeet(Function *F, Loop *L, const LoopVectorizeHints &Hints) { 2004 const char *PassName = Hints.vectorizeAnalysisPassName(); 2005 bool Failed = false; 2006 if (UnsafeAlgebraInst && !Hints.allowReordering()) { 2007 ORE.emit( 2008 OptimizationRemarkAnalysisFPCommute(PassName, "CantReorderFPOps", 2009 UnsafeAlgebraInst->getDebugLoc(), 2010 UnsafeAlgebraInst->getParent()) 2011 << "loop not vectorized: cannot prove it is safe to reorder " 2012 "floating-point operations"); 2013 Failed = true; 2014 } 2015 2016 // Test if runtime memcheck thresholds are exceeded. 2017 bool PragmaThresholdReached = 2018 NumRuntimePointerChecks > PragmaVectorizeMemoryCheckThreshold; 2019 bool ThresholdReached = 2020 NumRuntimePointerChecks > VectorizerParams::RuntimeMemoryCheckThreshold; 2021 if ((ThresholdReached && !Hints.allowReordering()) || 2022 PragmaThresholdReached) { 2023 ORE.emit(OptimizationRemarkAnalysisAliasing(PassName, "CantReorderMemOps", 2024 L->getStartLoc(), 2025 L->getHeader()) 2026 << "loop not vectorized: cannot prove it is safe to reorder " 2027 "memory operations"); 2028 DEBUG(dbgs() << "LV: Too many memory checks needed.\n"); 2029 Failed = true; 2030 } 2031 2032 return Failed; 2033 } 2034 2035 private: 2036 unsigned NumRuntimePointerChecks; 2037 Instruction *UnsafeAlgebraInst; 2038 2039 /// Interface to emit optimization remarks. 2040 OptimizationRemarkEmitter &ORE; 2041 }; 2042 2043 static void addAcyclicInnerLoop(Loop &L, SmallVectorImpl<Loop *> &V) { 2044 if (L.empty()) { 2045 if (!hasCyclesInLoopBody(L)) 2046 V.push_back(&L); 2047 return; 2048 } 2049 for (Loop *InnerL : L) 2050 addAcyclicInnerLoop(*InnerL, V); 2051 } 2052 2053 /// The LoopVectorize Pass. 2054 struct LoopVectorize : public FunctionPass { 2055 /// Pass identification, replacement for typeid 2056 static char ID; 2057 2058 explicit LoopVectorize(bool NoUnrolling = false, bool AlwaysVectorize = true) 2059 : FunctionPass(ID) { 2060 Impl.DisableUnrolling = NoUnrolling; 2061 Impl.AlwaysVectorize = AlwaysVectorize; 2062 initializeLoopVectorizePass(*PassRegistry::getPassRegistry()); 2063 } 2064 2065 LoopVectorizePass Impl; 2066 2067 bool runOnFunction(Function &F) override { 2068 if (skipFunction(F)) 2069 return false; 2070 2071 auto *SE = &getAnalysis<ScalarEvolutionWrapperPass>().getSE(); 2072 auto *LI = &getAnalysis<LoopInfoWrapperPass>().getLoopInfo(); 2073 auto *TTI = &getAnalysis<TargetTransformInfoWrapperPass>().getTTI(F); 2074 auto *DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree(); 2075 auto *BFI = &getAnalysis<BlockFrequencyInfoWrapperPass>().getBFI(); 2076 auto *TLIP = getAnalysisIfAvailable<TargetLibraryInfoWrapperPass>(); 2077 auto *TLI = TLIP ? &TLIP->getTLI() : nullptr; 2078 auto *AA = &getAnalysis<AAResultsWrapperPass>().getAAResults(); 2079 auto *AC = &getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F); 2080 auto *LAA = &getAnalysis<LoopAccessLegacyAnalysis>(); 2081 auto *DB = &getAnalysis<DemandedBitsWrapperPass>().getDemandedBits(); 2082 auto *ORE = &getAnalysis<OptimizationRemarkEmitterWrapperPass>().getORE(); 2083 2084 std::function<const LoopAccessInfo &(Loop &)> GetLAA = 2085 [&](Loop &L) -> const LoopAccessInfo & { return LAA->getInfo(&L); }; 2086 2087 return Impl.runImpl(F, *SE, *LI, *TTI, *DT, *BFI, TLI, *DB, *AA, *AC, 2088 GetLAA, *ORE); 2089 } 2090 2091 void getAnalysisUsage(AnalysisUsage &AU) const override { 2092 AU.addRequired<AssumptionCacheTracker>(); 2093 AU.addRequiredID(LoopSimplifyID); 2094 AU.addRequiredID(LCSSAID); 2095 AU.addRequired<BlockFrequencyInfoWrapperPass>(); 2096 AU.addRequired<DominatorTreeWrapperPass>(); 2097 AU.addRequired<LoopInfoWrapperPass>(); 2098 AU.addRequired<ScalarEvolutionWrapperPass>(); 2099 AU.addRequired<TargetTransformInfoWrapperPass>(); 2100 AU.addRequired<AAResultsWrapperPass>(); 2101 AU.addRequired<LoopAccessLegacyAnalysis>(); 2102 AU.addRequired<DemandedBitsWrapperPass>(); 2103 AU.addRequired<OptimizationRemarkEmitterWrapperPass>(); 2104 AU.addPreserved<LoopInfoWrapperPass>(); 2105 AU.addPreserved<DominatorTreeWrapperPass>(); 2106 AU.addPreserved<BasicAAWrapperPass>(); 2107 AU.addPreserved<GlobalsAAWrapperPass>(); 2108 } 2109 }; 2110 2111 } // end anonymous namespace 2112 2113 //===----------------------------------------------------------------------===// 2114 // Implementation of LoopVectorizationLegality, InnerLoopVectorizer and 2115 // LoopVectorizationCostModel. 2116 //===----------------------------------------------------------------------===// 2117 2118 Value *InnerLoopVectorizer::getBroadcastInstrs(Value *V) { 2119 // We need to place the broadcast of invariant variables outside the loop. 2120 Instruction *Instr = dyn_cast<Instruction>(V); 2121 bool NewInstr = (Instr && Instr->getParent() == LoopVectorBody); 2122 bool Invariant = OrigLoop->isLoopInvariant(V) && !NewInstr; 2123 2124 // Place the code for broadcasting invariant variables in the new preheader. 2125 IRBuilder<>::InsertPointGuard Guard(Builder); 2126 if (Invariant) 2127 Builder.SetInsertPoint(LoopVectorPreHeader->getTerminator()); 2128 2129 // Broadcast the scalar into all locations in the vector. 2130 Value *Shuf = Builder.CreateVectorSplat(VF, V, "broadcast"); 2131 2132 return Shuf; 2133 } 2134 2135 void InnerLoopVectorizer::createVectorIntInductionPHI( 2136 const InductionDescriptor &II, Instruction *EntryVal) { 2137 Value *Start = II.getStartValue(); 2138 ConstantInt *Step = II.getConstIntStepValue(); 2139 assert(Step && "Can not widen an IV with a non-constant step"); 2140 2141 // Construct the initial value of the vector IV in the vector loop preheader 2142 auto CurrIP = Builder.saveIP(); 2143 Builder.SetInsertPoint(LoopVectorPreHeader->getTerminator()); 2144 if (isa<TruncInst>(EntryVal)) { 2145 auto *TruncType = cast<IntegerType>(EntryVal->getType()); 2146 Step = ConstantInt::getSigned(TruncType, Step->getSExtValue()); 2147 Start = Builder.CreateCast(Instruction::Trunc, Start, TruncType); 2148 } 2149 Value *SplatStart = Builder.CreateVectorSplat(VF, Start); 2150 Value *SteppedStart = getStepVector(SplatStart, 0, Step); 2151 Builder.restoreIP(CurrIP); 2152 2153 Value *SplatVF = 2154 ConstantVector::getSplat(VF, ConstantInt::getSigned(Start->getType(), 2155 VF * Step->getSExtValue())); 2156 // We may need to add the step a number of times, depending on the unroll 2157 // factor. The last of those goes into the PHI. 2158 PHINode *VecInd = PHINode::Create(SteppedStart->getType(), 2, "vec.ind", 2159 &*LoopVectorBody->getFirstInsertionPt()); 2160 Instruction *LastInduction = VecInd; 2161 VectorParts Entry(UF); 2162 for (unsigned Part = 0; Part < UF; ++Part) { 2163 Entry[Part] = LastInduction; 2164 LastInduction = cast<Instruction>( 2165 Builder.CreateAdd(LastInduction, SplatVF, "step.add")); 2166 } 2167 VectorLoopValueMap.initVector(EntryVal, Entry); 2168 if (isa<TruncInst>(EntryVal)) 2169 addMetadata(Entry, EntryVal); 2170 2171 // Move the last step to the end of the latch block. This ensures consistent 2172 // placement of all induction updates. 2173 auto *LoopVectorLatch = LI->getLoopFor(LoopVectorBody)->getLoopLatch(); 2174 auto *Br = cast<BranchInst>(LoopVectorLatch->getTerminator()); 2175 auto *ICmp = cast<Instruction>(Br->getCondition()); 2176 LastInduction->moveBefore(ICmp); 2177 LastInduction->setName("vec.ind.next"); 2178 2179 VecInd->addIncoming(SteppedStart, LoopVectorPreHeader); 2180 VecInd->addIncoming(LastInduction, LoopVectorLatch); 2181 } 2182 2183 bool InnerLoopVectorizer::needsScalarInduction(Instruction *IV) const { 2184 if (Legal->isScalarAfterVectorization(IV)) 2185 return true; 2186 auto isScalarInst = [&](User *U) -> bool { 2187 auto *I = cast<Instruction>(U); 2188 return (OrigLoop->contains(I) && Legal->isScalarAfterVectorization(I)); 2189 }; 2190 return any_of(IV->users(), isScalarInst); 2191 } 2192 2193 void InnerLoopVectorizer::widenIntInduction(PHINode *IV, TruncInst *Trunc) { 2194 2195 auto II = Legal->getInductionVars()->find(IV); 2196 assert(II != Legal->getInductionVars()->end() && "IV is not an induction"); 2197 2198 auto ID = II->second; 2199 assert(IV->getType() == ID.getStartValue()->getType() && "Types must match"); 2200 2201 // The scalar value to broadcast. This will be derived from the canonical 2202 // induction variable. 2203 Value *ScalarIV = nullptr; 2204 2205 // The step of the induction. 2206 Value *Step = nullptr; 2207 2208 // The value from the original loop to which we are mapping the new induction 2209 // variable. 2210 Instruction *EntryVal = Trunc ? cast<Instruction>(Trunc) : IV; 2211 2212 // True if we have vectorized the induction variable. 2213 auto VectorizedIV = false; 2214 2215 // Determine if we want a scalar version of the induction variable. This is 2216 // true if the induction variable itself is not widened, or if it has at 2217 // least one user in the loop that is not widened. 2218 auto NeedsScalarIV = VF > 1 && needsScalarInduction(EntryVal); 2219 2220 // If the induction variable has a constant integer step value, go ahead and 2221 // get it now. 2222 if (ID.getConstIntStepValue()) 2223 Step = ID.getConstIntStepValue(); 2224 2225 // Try to create a new independent vector induction variable. If we can't 2226 // create the phi node, we will splat the scalar induction variable in each 2227 // loop iteration. 2228 if (VF > 1 && IV->getType() == Induction->getType() && Step && 2229 !Legal->isScalarAfterVectorization(EntryVal)) { 2230 createVectorIntInductionPHI(ID, EntryVal); 2231 VectorizedIV = true; 2232 } 2233 2234 // If we haven't yet vectorized the induction variable, or if we will create 2235 // a scalar one, we need to define the scalar induction variable and step 2236 // values. If we were given a truncation type, truncate the canonical 2237 // induction variable and constant step. Otherwise, derive these values from 2238 // the induction descriptor. 2239 if (!VectorizedIV || NeedsScalarIV) { 2240 if (Trunc) { 2241 auto *TruncType = cast<IntegerType>(Trunc->getType()); 2242 assert(Step && "Truncation requires constant integer step"); 2243 auto StepInt = cast<ConstantInt>(Step)->getSExtValue(); 2244 ScalarIV = Builder.CreateCast(Instruction::Trunc, Induction, TruncType); 2245 Step = ConstantInt::getSigned(TruncType, StepInt); 2246 } else { 2247 ScalarIV = Induction; 2248 auto &DL = OrigLoop->getHeader()->getModule()->getDataLayout(); 2249 if (IV != OldInduction) { 2250 ScalarIV = Builder.CreateSExtOrTrunc(ScalarIV, IV->getType()); 2251 ScalarIV = ID.transform(Builder, ScalarIV, PSE.getSE(), DL); 2252 ScalarIV->setName("offset.idx"); 2253 } 2254 if (!Step) { 2255 SCEVExpander Exp(*PSE.getSE(), DL, "induction"); 2256 Step = Exp.expandCodeFor(ID.getStep(), ID.getStep()->getType(), 2257 &*Builder.GetInsertPoint()); 2258 } 2259 } 2260 } 2261 2262 // If we haven't yet vectorized the induction variable, splat the scalar 2263 // induction variable, and build the necessary step vectors. 2264 if (!VectorizedIV) { 2265 Value *Broadcasted = getBroadcastInstrs(ScalarIV); 2266 VectorParts Entry(UF); 2267 for (unsigned Part = 0; Part < UF; ++Part) 2268 Entry[Part] = getStepVector(Broadcasted, VF * Part, Step); 2269 VectorLoopValueMap.initVector(EntryVal, Entry); 2270 if (Trunc) 2271 addMetadata(Entry, Trunc); 2272 } 2273 2274 // If an induction variable is only used for counting loop iterations or 2275 // calculating addresses, it doesn't need to be widened. Create scalar steps 2276 // that can be used by instructions we will later scalarize. Note that the 2277 // addition of the scalar steps will not increase the number of instructions 2278 // in the loop in the common case prior to InstCombine. We will be trading 2279 // one vector extract for each scalar step. 2280 if (NeedsScalarIV) 2281 buildScalarSteps(ScalarIV, Step, EntryVal); 2282 } 2283 2284 Value *InnerLoopVectorizer::getStepVector(Value *Val, int StartIdx, Value *Step, 2285 Instruction::BinaryOps BinOp) { 2286 // Create and check the types. 2287 assert(Val->getType()->isVectorTy() && "Must be a vector"); 2288 int VLen = Val->getType()->getVectorNumElements(); 2289 2290 Type *STy = Val->getType()->getScalarType(); 2291 assert((STy->isIntegerTy() || STy->isFloatingPointTy()) && 2292 "Induction Step must be an integer or FP"); 2293 assert(Step->getType() == STy && "Step has wrong type"); 2294 2295 SmallVector<Constant *, 8> Indices; 2296 2297 if (STy->isIntegerTy()) { 2298 // Create a vector of consecutive numbers from zero to VF. 2299 for (int i = 0; i < VLen; ++i) 2300 Indices.push_back(ConstantInt::get(STy, StartIdx + i)); 2301 2302 // Add the consecutive indices to the vector value. 2303 Constant *Cv = ConstantVector::get(Indices); 2304 assert(Cv->getType() == Val->getType() && "Invalid consecutive vec"); 2305 Step = Builder.CreateVectorSplat(VLen, Step); 2306 assert(Step->getType() == Val->getType() && "Invalid step vec"); 2307 // FIXME: The newly created binary instructions should contain nsw/nuw flags, 2308 // which can be found from the original scalar operations. 2309 Step = Builder.CreateMul(Cv, Step); 2310 return Builder.CreateAdd(Val, Step, "induction"); 2311 } 2312 2313 // Floating point induction. 2314 assert((BinOp == Instruction::FAdd || BinOp == Instruction::FSub) && 2315 "Binary Opcode should be specified for FP induction"); 2316 // Create a vector of consecutive numbers from zero to VF. 2317 for (int i = 0; i < VLen; ++i) 2318 Indices.push_back(ConstantFP::get(STy, (double)(StartIdx + i))); 2319 2320 // Add the consecutive indices to the vector value. 2321 Constant *Cv = ConstantVector::get(Indices); 2322 2323 Step = Builder.CreateVectorSplat(VLen, Step); 2324 2325 // Floating point operations had to be 'fast' to enable the induction. 2326 FastMathFlags Flags; 2327 Flags.setUnsafeAlgebra(); 2328 2329 Value *MulOp = Builder.CreateFMul(Cv, Step); 2330 if (isa<Instruction>(MulOp)) 2331 // Have to check, MulOp may be a constant 2332 cast<Instruction>(MulOp)->setFastMathFlags(Flags); 2333 2334 Value *BOp = Builder.CreateBinOp(BinOp, Val, MulOp, "induction"); 2335 if (isa<Instruction>(BOp)) 2336 cast<Instruction>(BOp)->setFastMathFlags(Flags); 2337 return BOp; 2338 } 2339 2340 void InnerLoopVectorizer::buildScalarSteps(Value *ScalarIV, Value *Step, 2341 Value *EntryVal) { 2342 2343 // We shouldn't have to build scalar steps if we aren't vectorizing. 2344 assert(VF > 1 && "VF should be greater than one"); 2345 2346 // Get the value type and ensure it and the step have the same integer type. 2347 Type *ScalarIVTy = ScalarIV->getType()->getScalarType(); 2348 assert(ScalarIVTy->isIntegerTy() && ScalarIVTy == Step->getType() && 2349 "Val and Step should have the same integer type"); 2350 2351 // Determine the number of scalars we need to generate for each unroll 2352 // iteration. If EntryVal is uniform, we only need to generate the first 2353 // lane. Otherwise, we generate all VF values. 2354 unsigned Lanes = 2355 Legal->isUniformAfterVectorization(cast<Instruction>(EntryVal)) ? 1 : VF; 2356 2357 // Compute the scalar steps and save the results in VectorLoopValueMap. 2358 ScalarParts Entry(UF); 2359 for (unsigned Part = 0; Part < UF; ++Part) { 2360 Entry[Part].resize(VF); 2361 for (unsigned Lane = 0; Lane < Lanes; ++Lane) { 2362 auto *StartIdx = ConstantInt::get(ScalarIVTy, VF * Part + Lane); 2363 auto *Mul = Builder.CreateMul(StartIdx, Step); 2364 auto *Add = Builder.CreateAdd(ScalarIV, Mul); 2365 Entry[Part][Lane] = Add; 2366 } 2367 } 2368 VectorLoopValueMap.initScalar(EntryVal, Entry); 2369 } 2370 2371 int LoopVectorizationLegality::isConsecutivePtr(Value *Ptr) { 2372 2373 const ValueToValueMap &Strides = getSymbolicStrides() ? *getSymbolicStrides() : 2374 ValueToValueMap(); 2375 2376 int Stride = getPtrStride(PSE, Ptr, TheLoop, Strides, true, false); 2377 if (Stride == 1 || Stride == -1) 2378 return Stride; 2379 return 0; 2380 } 2381 2382 bool LoopVectorizationLegality::isUniform(Value *V) { 2383 return LAI->isUniform(V); 2384 } 2385 2386 const InnerLoopVectorizer::VectorParts & 2387 InnerLoopVectorizer::getVectorValue(Value *V) { 2388 assert(V != Induction && "The new induction variable should not be used."); 2389 assert(!V->getType()->isVectorTy() && "Can't widen a vector"); 2390 assert(!V->getType()->isVoidTy() && "Type does not produce a value"); 2391 2392 // If we have a stride that is replaced by one, do it here. 2393 if (Legal->hasStride(V)) 2394 V = ConstantInt::get(V->getType(), 1); 2395 2396 // If we have this scalar in the map, return it. 2397 if (VectorLoopValueMap.hasVector(V)) 2398 return VectorLoopValueMap.VectorMapStorage[V]; 2399 2400 // If the value has not been vectorized, check if it has been scalarized 2401 // instead. If it has been scalarized, and we actually need the value in 2402 // vector form, we will construct the vector values on demand. 2403 if (VectorLoopValueMap.hasScalar(V)) { 2404 2405 // Initialize a new vector map entry. 2406 VectorParts Entry(UF); 2407 2408 // If we've scalarized a value, that value should be an instruction. 2409 auto *I = cast<Instruction>(V); 2410 2411 // If we aren't vectorizing, we can just copy the scalar map values over to 2412 // the vector map. 2413 if (VF == 1) { 2414 for (unsigned Part = 0; Part < UF; ++Part) 2415 Entry[Part] = getScalarValue(V, Part, 0); 2416 return VectorLoopValueMap.initVector(V, Entry); 2417 } 2418 2419 // Get the last scalar instruction we generated for V. If the value is 2420 // known to be uniform after vectorization, this corresponds to lane zero 2421 // of the last unroll iteration. Otherwise, the last instruction is the one 2422 // we created for the last vector lane of the last unroll iteration. 2423 unsigned LastLane = Legal->isUniformAfterVectorization(I) ? 0 : VF - 1; 2424 auto *LastInst = cast<Instruction>(getScalarValue(V, UF - 1, LastLane)); 2425 2426 // Set the insert point after the last scalarized instruction. This ensures 2427 // the insertelement sequence will directly follow the scalar definitions. 2428 auto OldIP = Builder.saveIP(); 2429 auto NewIP = std::next(BasicBlock::iterator(LastInst)); 2430 Builder.SetInsertPoint(&*NewIP); 2431 2432 // However, if we are vectorizing, we need to construct the vector values. 2433 // If the value is known to be uniform after vectorization, we can just 2434 // broadcast the scalar value corresponding to lane zero for each unroll 2435 // iteration. Otherwise, we construct the vector values using insertelement 2436 // instructions. Since the resulting vectors are stored in 2437 // VectorLoopValueMap, we will only generate the insertelements once. 2438 for (unsigned Part = 0; Part < UF; ++Part) { 2439 Value *VectorValue = nullptr; 2440 if (Legal->isUniformAfterVectorization(I)) { 2441 VectorValue = getBroadcastInstrs(getScalarValue(V, Part, 0)); 2442 } else { 2443 VectorValue = UndefValue::get(VectorType::get(V->getType(), VF)); 2444 for (unsigned Lane = 0; Lane < VF; ++Lane) 2445 VectorValue = Builder.CreateInsertElement( 2446 VectorValue, getScalarValue(V, Part, Lane), 2447 Builder.getInt32(Lane)); 2448 } 2449 Entry[Part] = VectorValue; 2450 } 2451 Builder.restoreIP(OldIP); 2452 return VectorLoopValueMap.initVector(V, Entry); 2453 } 2454 2455 // If this scalar is unknown, assume that it is a constant or that it is 2456 // loop invariant. Broadcast V and save the value for future uses. 2457 Value *B = getBroadcastInstrs(V); 2458 return VectorLoopValueMap.initVector(V, VectorParts(UF, B)); 2459 } 2460 2461 Value *InnerLoopVectorizer::getScalarValue(Value *V, unsigned Part, 2462 unsigned Lane) { 2463 2464 // If the value is not an instruction contained in the loop, it should 2465 // already be scalar. 2466 if (OrigLoop->isLoopInvariant(V)) 2467 return V; 2468 2469 assert(Lane > 0 ? !Legal->isUniformAfterVectorization(cast<Instruction>(V)) 2470 : true && "Uniform values only have lane zero"); 2471 2472 // If the value from the original loop has not been vectorized, it is 2473 // represented by UF x VF scalar values in the new loop. Return the requested 2474 // scalar value. 2475 if (VectorLoopValueMap.hasScalar(V)) 2476 return VectorLoopValueMap.ScalarMapStorage[V][Part][Lane]; 2477 2478 // If the value has not been scalarized, get its entry in VectorLoopValueMap 2479 // for the given unroll part. If this entry is not a vector type (i.e., the 2480 // vectorization factor is one), there is no need to generate an 2481 // extractelement instruction. 2482 auto *U = getVectorValue(V)[Part]; 2483 if (!U->getType()->isVectorTy()) { 2484 assert(VF == 1 && "Value not scalarized has non-vector type"); 2485 return U; 2486 } 2487 2488 // Otherwise, the value from the original loop has been vectorized and is 2489 // represented by UF vector values. Extract and return the requested scalar 2490 // value from the appropriate vector lane. 2491 return Builder.CreateExtractElement(U, Builder.getInt32(Lane)); 2492 } 2493 2494 Value *InnerLoopVectorizer::reverseVector(Value *Vec) { 2495 assert(Vec->getType()->isVectorTy() && "Invalid type"); 2496 SmallVector<Constant *, 8> ShuffleMask; 2497 for (unsigned i = 0; i < VF; ++i) 2498 ShuffleMask.push_back(Builder.getInt32(VF - i - 1)); 2499 2500 return Builder.CreateShuffleVector(Vec, UndefValue::get(Vec->getType()), 2501 ConstantVector::get(ShuffleMask), 2502 "reverse"); 2503 } 2504 2505 // Get a mask to interleave \p NumVec vectors into a wide vector. 2506 // I.e. <0, VF, VF*2, ..., VF*(NumVec-1), 1, VF+1, VF*2+1, ...> 2507 // E.g. For 2 interleaved vectors, if VF is 4, the mask is: 2508 // <0, 4, 1, 5, 2, 6, 3, 7> 2509 static Constant *getInterleavedMask(IRBuilder<> &Builder, unsigned VF, 2510 unsigned NumVec) { 2511 SmallVector<Constant *, 16> Mask; 2512 for (unsigned i = 0; i < VF; i++) 2513 for (unsigned j = 0; j < NumVec; j++) 2514 Mask.push_back(Builder.getInt32(j * VF + i)); 2515 2516 return ConstantVector::get(Mask); 2517 } 2518 2519 // Get the strided mask starting from index \p Start. 2520 // I.e. <Start, Start + Stride, ..., Start + Stride*(VF-1)> 2521 static Constant *getStridedMask(IRBuilder<> &Builder, unsigned Start, 2522 unsigned Stride, unsigned VF) { 2523 SmallVector<Constant *, 16> Mask; 2524 for (unsigned i = 0; i < VF; i++) 2525 Mask.push_back(Builder.getInt32(Start + i * Stride)); 2526 2527 return ConstantVector::get(Mask); 2528 } 2529 2530 // Get a mask of two parts: The first part consists of sequential integers 2531 // starting from 0, The second part consists of UNDEFs. 2532 // I.e. <0, 1, 2, ..., NumInt - 1, undef, ..., undef> 2533 static Constant *getSequentialMask(IRBuilder<> &Builder, unsigned NumInt, 2534 unsigned NumUndef) { 2535 SmallVector<Constant *, 16> Mask; 2536 for (unsigned i = 0; i < NumInt; i++) 2537 Mask.push_back(Builder.getInt32(i)); 2538 2539 Constant *Undef = UndefValue::get(Builder.getInt32Ty()); 2540 for (unsigned i = 0; i < NumUndef; i++) 2541 Mask.push_back(Undef); 2542 2543 return ConstantVector::get(Mask); 2544 } 2545 2546 // Concatenate two vectors with the same element type. The 2nd vector should 2547 // not have more elements than the 1st vector. If the 2nd vector has less 2548 // elements, extend it with UNDEFs. 2549 static Value *ConcatenateTwoVectors(IRBuilder<> &Builder, Value *V1, 2550 Value *V2) { 2551 VectorType *VecTy1 = dyn_cast<VectorType>(V1->getType()); 2552 VectorType *VecTy2 = dyn_cast<VectorType>(V2->getType()); 2553 assert(VecTy1 && VecTy2 && 2554 VecTy1->getScalarType() == VecTy2->getScalarType() && 2555 "Expect two vectors with the same element type"); 2556 2557 unsigned NumElts1 = VecTy1->getNumElements(); 2558 unsigned NumElts2 = VecTy2->getNumElements(); 2559 assert(NumElts1 >= NumElts2 && "Unexpect the first vector has less elements"); 2560 2561 if (NumElts1 > NumElts2) { 2562 // Extend with UNDEFs. 2563 Constant *ExtMask = 2564 getSequentialMask(Builder, NumElts2, NumElts1 - NumElts2); 2565 V2 = Builder.CreateShuffleVector(V2, UndefValue::get(VecTy2), ExtMask); 2566 } 2567 2568 Constant *Mask = getSequentialMask(Builder, NumElts1 + NumElts2, 0); 2569 return Builder.CreateShuffleVector(V1, V2, Mask); 2570 } 2571 2572 // Concatenate vectors in the given list. All vectors have the same type. 2573 static Value *ConcatenateVectors(IRBuilder<> &Builder, 2574 ArrayRef<Value *> InputList) { 2575 unsigned NumVec = InputList.size(); 2576 assert(NumVec > 1 && "Should be at least two vectors"); 2577 2578 SmallVector<Value *, 8> ResList; 2579 ResList.append(InputList.begin(), InputList.end()); 2580 do { 2581 SmallVector<Value *, 8> TmpList; 2582 for (unsigned i = 0; i < NumVec - 1; i += 2) { 2583 Value *V0 = ResList[i], *V1 = ResList[i + 1]; 2584 assert((V0->getType() == V1->getType() || i == NumVec - 2) && 2585 "Only the last vector may have a different type"); 2586 2587 TmpList.push_back(ConcatenateTwoVectors(Builder, V0, V1)); 2588 } 2589 2590 // Push the last vector if the total number of vectors is odd. 2591 if (NumVec % 2 != 0) 2592 TmpList.push_back(ResList[NumVec - 1]); 2593 2594 ResList = TmpList; 2595 NumVec = ResList.size(); 2596 } while (NumVec > 1); 2597 2598 return ResList[0]; 2599 } 2600 2601 // Try to vectorize the interleave group that \p Instr belongs to. 2602 // 2603 // E.g. Translate following interleaved load group (factor = 3): 2604 // for (i = 0; i < N; i+=3) { 2605 // R = Pic[i]; // Member of index 0 2606 // G = Pic[i+1]; // Member of index 1 2607 // B = Pic[i+2]; // Member of index 2 2608 // ... // do something to R, G, B 2609 // } 2610 // To: 2611 // %wide.vec = load <12 x i32> ; Read 4 tuples of R,G,B 2612 // %R.vec = shuffle %wide.vec, undef, <0, 3, 6, 9> ; R elements 2613 // %G.vec = shuffle %wide.vec, undef, <1, 4, 7, 10> ; G elements 2614 // %B.vec = shuffle %wide.vec, undef, <2, 5, 8, 11> ; B elements 2615 // 2616 // Or translate following interleaved store group (factor = 3): 2617 // for (i = 0; i < N; i+=3) { 2618 // ... do something to R, G, B 2619 // Pic[i] = R; // Member of index 0 2620 // Pic[i+1] = G; // Member of index 1 2621 // Pic[i+2] = B; // Member of index 2 2622 // } 2623 // To: 2624 // %R_G.vec = shuffle %R.vec, %G.vec, <0, 1, 2, ..., 7> 2625 // %B_U.vec = shuffle %B.vec, undef, <0, 1, 2, 3, u, u, u, u> 2626 // %interleaved.vec = shuffle %R_G.vec, %B_U.vec, 2627 // <0, 4, 8, 1, 5, 9, 2, 6, 10, 3, 7, 11> ; Interleave R,G,B elements 2628 // store <12 x i32> %interleaved.vec ; Write 4 tuples of R,G,B 2629 void InnerLoopVectorizer::vectorizeInterleaveGroup(Instruction *Instr) { 2630 const InterleaveGroup *Group = Legal->getInterleavedAccessGroup(Instr); 2631 assert(Group && "Fail to get an interleaved access group."); 2632 2633 // Skip if current instruction is not the insert position. 2634 if (Instr != Group->getInsertPos()) 2635 return; 2636 2637 LoadInst *LI = dyn_cast<LoadInst>(Instr); 2638 StoreInst *SI = dyn_cast<StoreInst>(Instr); 2639 Value *Ptr = getPointerOperand(Instr); 2640 2641 // Prepare for the vector type of the interleaved load/store. 2642 Type *ScalarTy = LI ? LI->getType() : SI->getValueOperand()->getType(); 2643 unsigned InterleaveFactor = Group->getFactor(); 2644 Type *VecTy = VectorType::get(ScalarTy, InterleaveFactor * VF); 2645 Type *PtrTy = VecTy->getPointerTo(Ptr->getType()->getPointerAddressSpace()); 2646 2647 // Prepare for the new pointers. 2648 setDebugLocFromInst(Builder, Ptr); 2649 SmallVector<Value *, 2> NewPtrs; 2650 unsigned Index = Group->getIndex(Instr); 2651 2652 // If the group is reverse, adjust the index to refer to the last vector lane 2653 // instead of the first. We adjust the index from the first vector lane, 2654 // rather than directly getting the pointer for lane VF - 1, because the 2655 // pointer operand of the interleaved access is supposed to be uniform. For 2656 // uniform instructions, we're only required to generate a value for the 2657 // first vector lane in each unroll iteration. 2658 if (Group->isReverse()) 2659 Index += (VF - 1) * Group->getFactor(); 2660 2661 for (unsigned Part = 0; Part < UF; Part++) { 2662 Value *NewPtr = getScalarValue(Ptr, Part, 0); 2663 2664 // Notice current instruction could be any index. Need to adjust the address 2665 // to the member of index 0. 2666 // 2667 // E.g. a = A[i+1]; // Member of index 1 (Current instruction) 2668 // b = A[i]; // Member of index 0 2669 // Current pointer is pointed to A[i+1], adjust it to A[i]. 2670 // 2671 // E.g. A[i+1] = a; // Member of index 1 2672 // A[i] = b; // Member of index 0 2673 // A[i+2] = c; // Member of index 2 (Current instruction) 2674 // Current pointer is pointed to A[i+2], adjust it to A[i]. 2675 NewPtr = Builder.CreateGEP(NewPtr, Builder.getInt32(-Index)); 2676 2677 // Cast to the vector pointer type. 2678 NewPtrs.push_back(Builder.CreateBitCast(NewPtr, PtrTy)); 2679 } 2680 2681 setDebugLocFromInst(Builder, Instr); 2682 Value *UndefVec = UndefValue::get(VecTy); 2683 2684 // Vectorize the interleaved load group. 2685 if (LI) { 2686 2687 // For each unroll part, create a wide load for the group. 2688 SmallVector<Value *, 2> NewLoads; 2689 for (unsigned Part = 0; Part < UF; Part++) { 2690 auto *NewLoad = Builder.CreateAlignedLoad( 2691 NewPtrs[Part], Group->getAlignment(), "wide.vec"); 2692 addMetadata(NewLoad, Instr); 2693 NewLoads.push_back(NewLoad); 2694 } 2695 2696 // For each member in the group, shuffle out the appropriate data from the 2697 // wide loads. 2698 for (unsigned I = 0; I < InterleaveFactor; ++I) { 2699 Instruction *Member = Group->getMember(I); 2700 2701 // Skip the gaps in the group. 2702 if (!Member) 2703 continue; 2704 2705 VectorParts Entry(UF); 2706 Constant *StrideMask = getStridedMask(Builder, I, InterleaveFactor, VF); 2707 for (unsigned Part = 0; Part < UF; Part++) { 2708 Value *StridedVec = Builder.CreateShuffleVector( 2709 NewLoads[Part], UndefVec, StrideMask, "strided.vec"); 2710 2711 // If this member has different type, cast the result type. 2712 if (Member->getType() != ScalarTy) { 2713 VectorType *OtherVTy = VectorType::get(Member->getType(), VF); 2714 StridedVec = Builder.CreateBitOrPointerCast(StridedVec, OtherVTy); 2715 } 2716 2717 Entry[Part] = 2718 Group->isReverse() ? reverseVector(StridedVec) : StridedVec; 2719 } 2720 VectorLoopValueMap.initVector(Member, Entry); 2721 } 2722 return; 2723 } 2724 2725 // The sub vector type for current instruction. 2726 VectorType *SubVT = VectorType::get(ScalarTy, VF); 2727 2728 // Vectorize the interleaved store group. 2729 for (unsigned Part = 0; Part < UF; Part++) { 2730 // Collect the stored vector from each member. 2731 SmallVector<Value *, 4> StoredVecs; 2732 for (unsigned i = 0; i < InterleaveFactor; i++) { 2733 // Interleaved store group doesn't allow a gap, so each index has a member 2734 Instruction *Member = Group->getMember(i); 2735 assert(Member && "Fail to get a member from an interleaved store group"); 2736 2737 Value *StoredVec = 2738 getVectorValue(cast<StoreInst>(Member)->getValueOperand())[Part]; 2739 if (Group->isReverse()) 2740 StoredVec = reverseVector(StoredVec); 2741 2742 // If this member has different type, cast it to an unified type. 2743 if (StoredVec->getType() != SubVT) 2744 StoredVec = Builder.CreateBitOrPointerCast(StoredVec, SubVT); 2745 2746 StoredVecs.push_back(StoredVec); 2747 } 2748 2749 // Concatenate all vectors into a wide vector. 2750 Value *WideVec = ConcatenateVectors(Builder, StoredVecs); 2751 2752 // Interleave the elements in the wide vector. 2753 Constant *IMask = getInterleavedMask(Builder, VF, InterleaveFactor); 2754 Value *IVec = Builder.CreateShuffleVector(WideVec, UndefVec, IMask, 2755 "interleaved.vec"); 2756 2757 Instruction *NewStoreInstr = 2758 Builder.CreateAlignedStore(IVec, NewPtrs[Part], Group->getAlignment()); 2759 addMetadata(NewStoreInstr, Instr); 2760 } 2761 } 2762 2763 void InnerLoopVectorizer::vectorizeMemoryInstruction(Instruction *Instr) { 2764 // Attempt to issue a wide load. 2765 LoadInst *LI = dyn_cast<LoadInst>(Instr); 2766 StoreInst *SI = dyn_cast<StoreInst>(Instr); 2767 2768 assert((LI || SI) && "Invalid Load/Store instruction"); 2769 2770 // Try to vectorize the interleave group if this access is interleaved. 2771 if (Legal->isAccessInterleaved(Instr)) 2772 return vectorizeInterleaveGroup(Instr); 2773 2774 Type *ScalarDataTy = LI ? LI->getType() : SI->getValueOperand()->getType(); 2775 Type *DataTy = VectorType::get(ScalarDataTy, VF); 2776 Value *Ptr = getPointerOperand(Instr); 2777 unsigned Alignment = LI ? LI->getAlignment() : SI->getAlignment(); 2778 // An alignment of 0 means target abi alignment. We need to use the scalar's 2779 // target abi alignment in such a case. 2780 const DataLayout &DL = Instr->getModule()->getDataLayout(); 2781 if (!Alignment) 2782 Alignment = DL.getABITypeAlignment(ScalarDataTy); 2783 unsigned AddressSpace = Ptr->getType()->getPointerAddressSpace(); 2784 2785 // Scalarize the memory instruction if necessary. 2786 if (Legal->memoryInstructionMustBeScalarized(Instr, VF)) 2787 return scalarizeInstruction(Instr, Legal->isScalarWithPredication(Instr)); 2788 2789 // Determine if the pointer operand of the access is either consecutive or 2790 // reverse consecutive. 2791 int ConsecutiveStride = Legal->isConsecutivePtr(Ptr); 2792 bool Reverse = ConsecutiveStride < 0; 2793 2794 // Determine if either a gather or scatter operation is legal. 2795 bool CreateGatherScatter = 2796 !ConsecutiveStride && Legal->isLegalGatherOrScatter(Instr); 2797 2798 VectorParts VectorGep; 2799 2800 // Handle consecutive loads/stores. 2801 GetElementPtrInst *Gep = getGEPInstruction(Ptr); 2802 if (ConsecutiveStride) { 2803 if (Gep) { 2804 unsigned NumOperands = Gep->getNumOperands(); 2805 #ifndef NDEBUG 2806 // The original GEP that identified as a consecutive memory access 2807 // should have only one loop-variant operand. 2808 unsigned NumOfLoopVariantOps = 0; 2809 for (unsigned i = 0; i < NumOperands; ++i) 2810 if (!PSE.getSE()->isLoopInvariant(PSE.getSCEV(Gep->getOperand(i)), 2811 OrigLoop)) 2812 NumOfLoopVariantOps++; 2813 assert(NumOfLoopVariantOps == 1 && 2814 "Consecutive GEP should have only one loop-variant operand"); 2815 #endif 2816 GetElementPtrInst *Gep2 = cast<GetElementPtrInst>(Gep->clone()); 2817 Gep2->setName("gep.indvar"); 2818 2819 // A new GEP is created for a 0-lane value of the first unroll iteration. 2820 // The GEPs for the rest of the unroll iterations are computed below as an 2821 // offset from this GEP. 2822 for (unsigned i = 0; i < NumOperands; ++i) 2823 // We can apply getScalarValue() for all GEP indices. It returns an 2824 // original value for loop-invariant operand and 0-lane for consecutive 2825 // operand. 2826 Gep2->setOperand(i, getScalarValue(Gep->getOperand(i), 2827 0, /* First unroll iteration */ 2828 0 /* 0-lane of the vector */ )); 2829 setDebugLocFromInst(Builder, Gep); 2830 Ptr = Builder.Insert(Gep2); 2831 2832 } else { // No GEP 2833 setDebugLocFromInst(Builder, Ptr); 2834 Ptr = getScalarValue(Ptr, 0, 0); 2835 } 2836 } else { 2837 // At this point we should vector version of GEP for Gather or Scatter 2838 assert(CreateGatherScatter && "The instruction should be scalarized"); 2839 if (Gep) { 2840 // Vectorizing GEP, across UF parts. We want to get a vector value for base 2841 // and each index that's defined inside the loop, even if it is 2842 // loop-invariant but wasn't hoisted out. Otherwise we want to keep them 2843 // scalar. 2844 SmallVector<VectorParts, 4> OpsV; 2845 for (Value *Op : Gep->operands()) { 2846 Instruction *SrcInst = dyn_cast<Instruction>(Op); 2847 if (SrcInst && OrigLoop->contains(SrcInst)) 2848 OpsV.push_back(getVectorValue(Op)); 2849 else 2850 OpsV.push_back(VectorParts(UF, Op)); 2851 } 2852 for (unsigned Part = 0; Part < UF; ++Part) { 2853 SmallVector<Value *, 4> Ops; 2854 Value *GEPBasePtr = OpsV[0][Part]; 2855 for (unsigned i = 1; i < Gep->getNumOperands(); i++) 2856 Ops.push_back(OpsV[i][Part]); 2857 Value *NewGep = Builder.CreateGEP(GEPBasePtr, Ops, "VectorGep"); 2858 cast<GetElementPtrInst>(NewGep)->setIsInBounds(Gep->isInBounds()); 2859 assert(NewGep->getType()->isVectorTy() && "Expected vector GEP"); 2860 2861 NewGep = 2862 Builder.CreateBitCast(NewGep, VectorType::get(Ptr->getType(), VF)); 2863 VectorGep.push_back(NewGep); 2864 } 2865 } else 2866 VectorGep = getVectorValue(Ptr); 2867 } 2868 2869 VectorParts Mask = createBlockInMask(Instr->getParent()); 2870 // Handle Stores: 2871 if (SI) { 2872 assert(!Legal->isUniform(SI->getPointerOperand()) && 2873 "We do not allow storing to uniform addresses"); 2874 setDebugLocFromInst(Builder, SI); 2875 // We don't want to update the value in the map as it might be used in 2876 // another expression. So don't use a reference type for "StoredVal". 2877 VectorParts StoredVal = getVectorValue(SI->getValueOperand()); 2878 2879 for (unsigned Part = 0; Part < UF; ++Part) { 2880 Instruction *NewSI = nullptr; 2881 if (CreateGatherScatter) { 2882 Value *MaskPart = Legal->isMaskRequired(SI) ? Mask[Part] : nullptr; 2883 NewSI = Builder.CreateMaskedScatter(StoredVal[Part], VectorGep[Part], 2884 Alignment, MaskPart); 2885 } else { 2886 // Calculate the pointer for the specific unroll-part. 2887 Value *PartPtr = 2888 Builder.CreateGEP(nullptr, Ptr, Builder.getInt32(Part * VF)); 2889 2890 if (Reverse) { 2891 // If we store to reverse consecutive memory locations, then we need 2892 // to reverse the order of elements in the stored value. 2893 StoredVal[Part] = reverseVector(StoredVal[Part]); 2894 // If the address is consecutive but reversed, then the 2895 // wide store needs to start at the last vector element. 2896 PartPtr = 2897 Builder.CreateGEP(nullptr, Ptr, Builder.getInt32(-Part * VF)); 2898 PartPtr = 2899 Builder.CreateGEP(nullptr, PartPtr, Builder.getInt32(1 - VF)); 2900 Mask[Part] = reverseVector(Mask[Part]); 2901 } 2902 2903 Value *VecPtr = 2904 Builder.CreateBitCast(PartPtr, DataTy->getPointerTo(AddressSpace)); 2905 2906 if (Legal->isMaskRequired(SI)) 2907 NewSI = Builder.CreateMaskedStore(StoredVal[Part], VecPtr, Alignment, 2908 Mask[Part]); 2909 else 2910 NewSI = 2911 Builder.CreateAlignedStore(StoredVal[Part], VecPtr, Alignment); 2912 } 2913 addMetadata(NewSI, SI); 2914 } 2915 return; 2916 } 2917 2918 // Handle loads. 2919 assert(LI && "Must have a load instruction"); 2920 setDebugLocFromInst(Builder, LI); 2921 VectorParts Entry(UF); 2922 for (unsigned Part = 0; Part < UF; ++Part) { 2923 Instruction *NewLI; 2924 if (CreateGatherScatter) { 2925 Value *MaskPart = Legal->isMaskRequired(LI) ? Mask[Part] : nullptr; 2926 NewLI = Builder.CreateMaskedGather(VectorGep[Part], Alignment, MaskPart, 2927 0, "wide.masked.gather"); 2928 Entry[Part] = NewLI; 2929 } else { 2930 // Calculate the pointer for the specific unroll-part. 2931 Value *PartPtr = 2932 Builder.CreateGEP(nullptr, Ptr, Builder.getInt32(Part * VF)); 2933 2934 if (Reverse) { 2935 // If the address is consecutive but reversed, then the 2936 // wide load needs to start at the last vector element. 2937 PartPtr = Builder.CreateGEP(nullptr, Ptr, Builder.getInt32(-Part * VF)); 2938 PartPtr = Builder.CreateGEP(nullptr, PartPtr, Builder.getInt32(1 - VF)); 2939 Mask[Part] = reverseVector(Mask[Part]); 2940 } 2941 2942 Value *VecPtr = 2943 Builder.CreateBitCast(PartPtr, DataTy->getPointerTo(AddressSpace)); 2944 if (Legal->isMaskRequired(LI)) 2945 NewLI = Builder.CreateMaskedLoad(VecPtr, Alignment, Mask[Part], 2946 UndefValue::get(DataTy), 2947 "wide.masked.load"); 2948 else 2949 NewLI = Builder.CreateAlignedLoad(VecPtr, Alignment, "wide.load"); 2950 Entry[Part] = Reverse ? reverseVector(NewLI) : NewLI; 2951 } 2952 addMetadata(NewLI, LI); 2953 } 2954 VectorLoopValueMap.initVector(Instr, Entry); 2955 } 2956 2957 void InnerLoopVectorizer::scalarizeInstruction(Instruction *Instr, 2958 bool IfPredicateInstr) { 2959 assert(!Instr->getType()->isAggregateType() && "Can't handle vectors"); 2960 DEBUG(dbgs() << "LV: Scalarizing" 2961 << (IfPredicateInstr ? " and predicating:" : ":") << *Instr 2962 << '\n'); 2963 // Holds vector parameters or scalars, in case of uniform vals. 2964 SmallVector<VectorParts, 4> Params; 2965 2966 setDebugLocFromInst(Builder, Instr); 2967 2968 // Does this instruction return a value ? 2969 bool IsVoidRetTy = Instr->getType()->isVoidTy(); 2970 2971 // Initialize a new scalar map entry. 2972 ScalarParts Entry(UF); 2973 2974 VectorParts Cond; 2975 if (IfPredicateInstr) 2976 Cond = createBlockInMask(Instr->getParent()); 2977 2978 // Determine the number of scalars we need to generate for each unroll 2979 // iteration. If the instruction is uniform, we only need to generate the 2980 // first lane. Otherwise, we generate all VF values. 2981 unsigned Lanes = Legal->isUniformAfterVectorization(Instr) ? 1 : VF; 2982 2983 // For each vector unroll 'part': 2984 for (unsigned Part = 0; Part < UF; ++Part) { 2985 Entry[Part].resize(VF); 2986 // For each scalar that we create: 2987 for (unsigned Lane = 0; Lane < Lanes; ++Lane) { 2988 2989 // Start if-block. 2990 Value *Cmp = nullptr; 2991 if (IfPredicateInstr) { 2992 Cmp = Builder.CreateExtractElement(Cond[Part], Builder.getInt32(Lane)); 2993 Cmp = Builder.CreateICmp(ICmpInst::ICMP_EQ, Cmp, 2994 ConstantInt::get(Cmp->getType(), 1)); 2995 } 2996 2997 Instruction *Cloned = Instr->clone(); 2998 if (!IsVoidRetTy) 2999 Cloned->setName(Instr->getName() + ".cloned"); 3000 3001 // Replace the operands of the cloned instructions with their scalar 3002 // equivalents in the new loop. 3003 for (unsigned op = 0, e = Instr->getNumOperands(); op != e; ++op) { 3004 auto *NewOp = getScalarValue(Instr->getOperand(op), Part, Lane); 3005 Cloned->setOperand(op, NewOp); 3006 } 3007 addNewMetadata(Cloned, Instr); 3008 3009 // Place the cloned scalar in the new loop. 3010 Builder.Insert(Cloned); 3011 3012 // Add the cloned scalar to the scalar map entry. 3013 Entry[Part][Lane] = Cloned; 3014 3015 // If we just cloned a new assumption, add it the assumption cache. 3016 if (auto *II = dyn_cast<IntrinsicInst>(Cloned)) 3017 if (II->getIntrinsicID() == Intrinsic::assume) 3018 AC->registerAssumption(II); 3019 3020 // End if-block. 3021 if (IfPredicateInstr) 3022 PredicatedInstructions.push_back(std::make_pair(Cloned, Cmp)); 3023 } 3024 } 3025 VectorLoopValueMap.initScalar(Instr, Entry); 3026 } 3027 3028 PHINode *InnerLoopVectorizer::createInductionVariable(Loop *L, Value *Start, 3029 Value *End, Value *Step, 3030 Instruction *DL) { 3031 BasicBlock *Header = L->getHeader(); 3032 BasicBlock *Latch = L->getLoopLatch(); 3033 // As we're just creating this loop, it's possible no latch exists 3034 // yet. If so, use the header as this will be a single block loop. 3035 if (!Latch) 3036 Latch = Header; 3037 3038 IRBuilder<> Builder(&*Header->getFirstInsertionPt()); 3039 setDebugLocFromInst(Builder, getDebugLocFromInstOrOperands(OldInduction)); 3040 auto *Induction = Builder.CreatePHI(Start->getType(), 2, "index"); 3041 3042 Builder.SetInsertPoint(Latch->getTerminator()); 3043 3044 // Create i+1 and fill the PHINode. 3045 Value *Next = Builder.CreateAdd(Induction, Step, "index.next"); 3046 Induction->addIncoming(Start, L->getLoopPreheader()); 3047 Induction->addIncoming(Next, Latch); 3048 // Create the compare. 3049 Value *ICmp = Builder.CreateICmpEQ(Next, End); 3050 Builder.CreateCondBr(ICmp, L->getExitBlock(), Header); 3051 3052 // Now we have two terminators. Remove the old one from the block. 3053 Latch->getTerminator()->eraseFromParent(); 3054 3055 return Induction; 3056 } 3057 3058 Value *InnerLoopVectorizer::getOrCreateTripCount(Loop *L) { 3059 if (TripCount) 3060 return TripCount; 3061 3062 IRBuilder<> Builder(L->getLoopPreheader()->getTerminator()); 3063 // Find the loop boundaries. 3064 ScalarEvolution *SE = PSE.getSE(); 3065 const SCEV *BackedgeTakenCount = PSE.getBackedgeTakenCount(); 3066 assert(BackedgeTakenCount != SE->getCouldNotCompute() && 3067 "Invalid loop count"); 3068 3069 Type *IdxTy = Legal->getWidestInductionType(); 3070 3071 // The exit count might have the type of i64 while the phi is i32. This can 3072 // happen if we have an induction variable that is sign extended before the 3073 // compare. The only way that we get a backedge taken count is that the 3074 // induction variable was signed and as such will not overflow. In such a case 3075 // truncation is legal. 3076 if (BackedgeTakenCount->getType()->getPrimitiveSizeInBits() > 3077 IdxTy->getPrimitiveSizeInBits()) 3078 BackedgeTakenCount = SE->getTruncateOrNoop(BackedgeTakenCount, IdxTy); 3079 BackedgeTakenCount = SE->getNoopOrZeroExtend(BackedgeTakenCount, IdxTy); 3080 3081 // Get the total trip count from the count by adding 1. 3082 const SCEV *ExitCount = SE->getAddExpr( 3083 BackedgeTakenCount, SE->getOne(BackedgeTakenCount->getType())); 3084 3085 const DataLayout &DL = L->getHeader()->getModule()->getDataLayout(); 3086 3087 // Expand the trip count and place the new instructions in the preheader. 3088 // Notice that the pre-header does not change, only the loop body. 3089 SCEVExpander Exp(*SE, DL, "induction"); 3090 3091 // Count holds the overall loop count (N). 3092 TripCount = Exp.expandCodeFor(ExitCount, ExitCount->getType(), 3093 L->getLoopPreheader()->getTerminator()); 3094 3095 if (TripCount->getType()->isPointerTy()) 3096 TripCount = 3097 CastInst::CreatePointerCast(TripCount, IdxTy, "exitcount.ptrcnt.to.int", 3098 L->getLoopPreheader()->getTerminator()); 3099 3100 return TripCount; 3101 } 3102 3103 Value *InnerLoopVectorizer::getOrCreateVectorTripCount(Loop *L) { 3104 if (VectorTripCount) 3105 return VectorTripCount; 3106 3107 Value *TC = getOrCreateTripCount(L); 3108 IRBuilder<> Builder(L->getLoopPreheader()->getTerminator()); 3109 3110 // Now we need to generate the expression for the part of the loop that the 3111 // vectorized body will execute. This is equal to N - (N % Step) if scalar 3112 // iterations are not required for correctness, or N - Step, otherwise. Step 3113 // is equal to the vectorization factor (number of SIMD elements) times the 3114 // unroll factor (number of SIMD instructions). 3115 Constant *Step = ConstantInt::get(TC->getType(), VF * UF); 3116 Value *R = Builder.CreateURem(TC, Step, "n.mod.vf"); 3117 3118 // If there is a non-reversed interleaved group that may speculatively access 3119 // memory out-of-bounds, we need to ensure that there will be at least one 3120 // iteration of the scalar epilogue loop. Thus, if the step evenly divides 3121 // the trip count, we set the remainder to be equal to the step. If the step 3122 // does not evenly divide the trip count, no adjustment is necessary since 3123 // there will already be scalar iterations. Note that the minimum iterations 3124 // check ensures that N >= Step. 3125 if (VF > 1 && Legal->requiresScalarEpilogue()) { 3126 auto *IsZero = Builder.CreateICmpEQ(R, ConstantInt::get(R->getType(), 0)); 3127 R = Builder.CreateSelect(IsZero, Step, R); 3128 } 3129 3130 VectorTripCount = Builder.CreateSub(TC, R, "n.vec"); 3131 3132 return VectorTripCount; 3133 } 3134 3135 void InnerLoopVectorizer::emitMinimumIterationCountCheck(Loop *L, 3136 BasicBlock *Bypass) { 3137 Value *Count = getOrCreateTripCount(L); 3138 BasicBlock *BB = L->getLoopPreheader(); 3139 IRBuilder<> Builder(BB->getTerminator()); 3140 3141 // Generate code to check that the loop's trip count that we computed by 3142 // adding one to the backedge-taken count will not overflow. 3143 Value *CheckMinIters = Builder.CreateICmpULT( 3144 Count, ConstantInt::get(Count->getType(), VF * UF), "min.iters.check"); 3145 3146 BasicBlock *NewBB = 3147 BB->splitBasicBlock(BB->getTerminator(), "min.iters.checked"); 3148 // Update dominator tree immediately if the generated block is a 3149 // LoopBypassBlock because SCEV expansions to generate loop bypass 3150 // checks may query it before the current function is finished. 3151 DT->addNewBlock(NewBB, BB); 3152 if (L->getParentLoop()) 3153 L->getParentLoop()->addBasicBlockToLoop(NewBB, *LI); 3154 ReplaceInstWithInst(BB->getTerminator(), 3155 BranchInst::Create(Bypass, NewBB, CheckMinIters)); 3156 LoopBypassBlocks.push_back(BB); 3157 } 3158 3159 void InnerLoopVectorizer::emitVectorLoopEnteredCheck(Loop *L, 3160 BasicBlock *Bypass) { 3161 Value *TC = getOrCreateVectorTripCount(L); 3162 BasicBlock *BB = L->getLoopPreheader(); 3163 IRBuilder<> Builder(BB->getTerminator()); 3164 3165 // Now, compare the new count to zero. If it is zero skip the vector loop and 3166 // jump to the scalar loop. 3167 Value *Cmp = Builder.CreateICmpEQ(TC, Constant::getNullValue(TC->getType()), 3168 "cmp.zero"); 3169 3170 // Generate code to check that the loop's trip count that we computed by 3171 // adding one to the backedge-taken count will not overflow. 3172 BasicBlock *NewBB = BB->splitBasicBlock(BB->getTerminator(), "vector.ph"); 3173 // Update dominator tree immediately if the generated block is a 3174 // LoopBypassBlock because SCEV expansions to generate loop bypass 3175 // checks may query it before the current function is finished. 3176 DT->addNewBlock(NewBB, BB); 3177 if (L->getParentLoop()) 3178 L->getParentLoop()->addBasicBlockToLoop(NewBB, *LI); 3179 ReplaceInstWithInst(BB->getTerminator(), 3180 BranchInst::Create(Bypass, NewBB, Cmp)); 3181 LoopBypassBlocks.push_back(BB); 3182 } 3183 3184 void InnerLoopVectorizer::emitSCEVChecks(Loop *L, BasicBlock *Bypass) { 3185 BasicBlock *BB = L->getLoopPreheader(); 3186 3187 // Generate the code to check that the SCEV assumptions that we made. 3188 // We want the new basic block to start at the first instruction in a 3189 // sequence of instructions that form a check. 3190 SCEVExpander Exp(*PSE.getSE(), Bypass->getModule()->getDataLayout(), 3191 "scev.check"); 3192 Value *SCEVCheck = 3193 Exp.expandCodeForPredicate(&PSE.getUnionPredicate(), BB->getTerminator()); 3194 3195 if (auto *C = dyn_cast<ConstantInt>(SCEVCheck)) 3196 if (C->isZero()) 3197 return; 3198 3199 // Create a new block containing the stride check. 3200 BB->setName("vector.scevcheck"); 3201 auto *NewBB = BB->splitBasicBlock(BB->getTerminator(), "vector.ph"); 3202 // Update dominator tree immediately if the generated block is a 3203 // LoopBypassBlock because SCEV expansions to generate loop bypass 3204 // checks may query it before the current function is finished. 3205 DT->addNewBlock(NewBB, BB); 3206 if (L->getParentLoop()) 3207 L->getParentLoop()->addBasicBlockToLoop(NewBB, *LI); 3208 ReplaceInstWithInst(BB->getTerminator(), 3209 BranchInst::Create(Bypass, NewBB, SCEVCheck)); 3210 LoopBypassBlocks.push_back(BB); 3211 AddedSafetyChecks = true; 3212 } 3213 3214 void InnerLoopVectorizer::emitMemRuntimeChecks(Loop *L, BasicBlock *Bypass) { 3215 BasicBlock *BB = L->getLoopPreheader(); 3216 3217 // Generate the code that checks in runtime if arrays overlap. We put the 3218 // checks into a separate block to make the more common case of few elements 3219 // faster. 3220 Instruction *FirstCheckInst; 3221 Instruction *MemRuntimeCheck; 3222 std::tie(FirstCheckInst, MemRuntimeCheck) = 3223 Legal->getLAI()->addRuntimeChecks(BB->getTerminator()); 3224 if (!MemRuntimeCheck) 3225 return; 3226 3227 // Create a new block containing the memory check. 3228 BB->setName("vector.memcheck"); 3229 auto *NewBB = BB->splitBasicBlock(BB->getTerminator(), "vector.ph"); 3230 // Update dominator tree immediately if the generated block is a 3231 // LoopBypassBlock because SCEV expansions to generate loop bypass 3232 // checks may query it before the current function is finished. 3233 DT->addNewBlock(NewBB, BB); 3234 if (L->getParentLoop()) 3235 L->getParentLoop()->addBasicBlockToLoop(NewBB, *LI); 3236 ReplaceInstWithInst(BB->getTerminator(), 3237 BranchInst::Create(Bypass, NewBB, MemRuntimeCheck)); 3238 LoopBypassBlocks.push_back(BB); 3239 AddedSafetyChecks = true; 3240 3241 // We currently don't use LoopVersioning for the actual loop cloning but we 3242 // still use it to add the noalias metadata. 3243 LVer = llvm::make_unique<LoopVersioning>(*Legal->getLAI(), OrigLoop, LI, DT, 3244 PSE.getSE()); 3245 LVer->prepareNoAliasMetadata(); 3246 } 3247 3248 void InnerLoopVectorizer::createEmptyLoop() { 3249 /* 3250 In this function we generate a new loop. The new loop will contain 3251 the vectorized instructions while the old loop will continue to run the 3252 scalar remainder. 3253 3254 [ ] <-- loop iteration number check. 3255 / | 3256 / v 3257 | [ ] <-- vector loop bypass (may consist of multiple blocks). 3258 | / | 3259 | / v 3260 || [ ] <-- vector pre header. 3261 |/ | 3262 | v 3263 | [ ] \ 3264 | [ ]_| <-- vector loop. 3265 | | 3266 | v 3267 | -[ ] <--- middle-block. 3268 | / | 3269 | / v 3270 -|- >[ ] <--- new preheader. 3271 | | 3272 | v 3273 | [ ] \ 3274 | [ ]_| <-- old scalar loop to handle remainder. 3275 \ | 3276 \ v 3277 >[ ] <-- exit block. 3278 ... 3279 */ 3280 3281 BasicBlock *OldBasicBlock = OrigLoop->getHeader(); 3282 BasicBlock *VectorPH = OrigLoop->getLoopPreheader(); 3283 BasicBlock *ExitBlock = OrigLoop->getExitBlock(); 3284 assert(VectorPH && "Invalid loop structure"); 3285 assert(ExitBlock && "Must have an exit block"); 3286 3287 // Some loops have a single integer induction variable, while other loops 3288 // don't. One example is c++ iterators that often have multiple pointer 3289 // induction variables. In the code below we also support a case where we 3290 // don't have a single induction variable. 3291 // 3292 // We try to obtain an induction variable from the original loop as hard 3293 // as possible. However if we don't find one that: 3294 // - is an integer 3295 // - counts from zero, stepping by one 3296 // - is the size of the widest induction variable type 3297 // then we create a new one. 3298 OldInduction = Legal->getInduction(); 3299 Type *IdxTy = Legal->getWidestInductionType(); 3300 3301 // Split the single block loop into the two loop structure described above. 3302 BasicBlock *VecBody = 3303 VectorPH->splitBasicBlock(VectorPH->getTerminator(), "vector.body"); 3304 BasicBlock *MiddleBlock = 3305 VecBody->splitBasicBlock(VecBody->getTerminator(), "middle.block"); 3306 BasicBlock *ScalarPH = 3307 MiddleBlock->splitBasicBlock(MiddleBlock->getTerminator(), "scalar.ph"); 3308 3309 // Create and register the new vector loop. 3310 Loop *Lp = new Loop(); 3311 Loop *ParentLoop = OrigLoop->getParentLoop(); 3312 3313 // Insert the new loop into the loop nest and register the new basic blocks 3314 // before calling any utilities such as SCEV that require valid LoopInfo. 3315 if (ParentLoop) { 3316 ParentLoop->addChildLoop(Lp); 3317 ParentLoop->addBasicBlockToLoop(ScalarPH, *LI); 3318 ParentLoop->addBasicBlockToLoop(MiddleBlock, *LI); 3319 } else { 3320 LI->addTopLevelLoop(Lp); 3321 } 3322 Lp->addBasicBlockToLoop(VecBody, *LI); 3323 3324 // Find the loop boundaries. 3325 Value *Count = getOrCreateTripCount(Lp); 3326 3327 Value *StartIdx = ConstantInt::get(IdxTy, 0); 3328 3329 // We need to test whether the backedge-taken count is uint##_max. Adding one 3330 // to it will cause overflow and an incorrect loop trip count in the vector 3331 // body. In case of overflow we want to directly jump to the scalar remainder 3332 // loop. 3333 emitMinimumIterationCountCheck(Lp, ScalarPH); 3334 // Now, compare the new count to zero. If it is zero skip the vector loop and 3335 // jump to the scalar loop. 3336 emitVectorLoopEnteredCheck(Lp, ScalarPH); 3337 // Generate the code to check any assumptions that we've made for SCEV 3338 // expressions. 3339 emitSCEVChecks(Lp, ScalarPH); 3340 3341 // Generate the code that checks in runtime if arrays overlap. We put the 3342 // checks into a separate block to make the more common case of few elements 3343 // faster. 3344 emitMemRuntimeChecks(Lp, ScalarPH); 3345 3346 // Generate the induction variable. 3347 // The loop step is equal to the vectorization factor (num of SIMD elements) 3348 // times the unroll factor (num of SIMD instructions). 3349 Value *CountRoundDown = getOrCreateVectorTripCount(Lp); 3350 Constant *Step = ConstantInt::get(IdxTy, VF * UF); 3351 Induction = 3352 createInductionVariable(Lp, StartIdx, CountRoundDown, Step, 3353 getDebugLocFromInstOrOperands(OldInduction)); 3354 3355 // We are going to resume the execution of the scalar loop. 3356 // Go over all of the induction variables that we found and fix the 3357 // PHIs that are left in the scalar version of the loop. 3358 // The starting values of PHI nodes depend on the counter of the last 3359 // iteration in the vectorized loop. 3360 // If we come from a bypass edge then we need to start from the original 3361 // start value. 3362 3363 // This variable saves the new starting index for the scalar loop. It is used 3364 // to test if there are any tail iterations left once the vector loop has 3365 // completed. 3366 LoopVectorizationLegality::InductionList *List = Legal->getInductionVars(); 3367 for (auto &InductionEntry : *List) { 3368 PHINode *OrigPhi = InductionEntry.first; 3369 InductionDescriptor II = InductionEntry.second; 3370 3371 // Create phi nodes to merge from the backedge-taken check block. 3372 PHINode *BCResumeVal = PHINode::Create( 3373 OrigPhi->getType(), 3, "bc.resume.val", ScalarPH->getTerminator()); 3374 Value *EndValue; 3375 if (OrigPhi == OldInduction) { 3376 // We know what the end value is. 3377 EndValue = CountRoundDown; 3378 } else { 3379 IRBuilder<> B(LoopBypassBlocks.back()->getTerminator()); 3380 Type *StepType = II.getStep()->getType(); 3381 Instruction::CastOps CastOp = 3382 CastInst::getCastOpcode(CountRoundDown, true, StepType, true); 3383 Value *CRD = B.CreateCast(CastOp, CountRoundDown, StepType, "cast.crd"); 3384 const DataLayout &DL = OrigLoop->getHeader()->getModule()->getDataLayout(); 3385 EndValue = II.transform(B, CRD, PSE.getSE(), DL); 3386 EndValue->setName("ind.end"); 3387 } 3388 3389 // The new PHI merges the original incoming value, in case of a bypass, 3390 // or the value at the end of the vectorized loop. 3391 BCResumeVal->addIncoming(EndValue, MiddleBlock); 3392 3393 // Fix up external users of the induction variable. 3394 fixupIVUsers(OrigPhi, II, CountRoundDown, EndValue, MiddleBlock); 3395 3396 // Fix the scalar body counter (PHI node). 3397 unsigned BlockIdx = OrigPhi->getBasicBlockIndex(ScalarPH); 3398 3399 // The old induction's phi node in the scalar body needs the truncated 3400 // value. 3401 for (BasicBlock *BB : LoopBypassBlocks) 3402 BCResumeVal->addIncoming(II.getStartValue(), BB); 3403 OrigPhi->setIncomingValue(BlockIdx, BCResumeVal); 3404 } 3405 3406 // Add a check in the middle block to see if we have completed 3407 // all of the iterations in the first vector loop. 3408 // If (N - N%VF) == N, then we *don't* need to run the remainder. 3409 Value *CmpN = 3410 CmpInst::Create(Instruction::ICmp, CmpInst::ICMP_EQ, Count, 3411 CountRoundDown, "cmp.n", MiddleBlock->getTerminator()); 3412 ReplaceInstWithInst(MiddleBlock->getTerminator(), 3413 BranchInst::Create(ExitBlock, ScalarPH, CmpN)); 3414 3415 // Get ready to start creating new instructions into the vectorized body. 3416 Builder.SetInsertPoint(&*VecBody->getFirstInsertionPt()); 3417 3418 // Save the state. 3419 LoopVectorPreHeader = Lp->getLoopPreheader(); 3420 LoopScalarPreHeader = ScalarPH; 3421 LoopMiddleBlock = MiddleBlock; 3422 LoopExitBlock = ExitBlock; 3423 LoopVectorBody = VecBody; 3424 LoopScalarBody = OldBasicBlock; 3425 3426 // Keep all loop hints from the original loop on the vector loop (we'll 3427 // replace the vectorizer-specific hints below). 3428 if (MDNode *LID = OrigLoop->getLoopID()) 3429 Lp->setLoopID(LID); 3430 3431 LoopVectorizeHints Hints(Lp, true, *ORE); 3432 Hints.setAlreadyVectorized(); 3433 } 3434 3435 // Fix up external users of the induction variable. At this point, we are 3436 // in LCSSA form, with all external PHIs that use the IV having one input value, 3437 // coming from the remainder loop. We need those PHIs to also have a correct 3438 // value for the IV when arriving directly from the middle block. 3439 void InnerLoopVectorizer::fixupIVUsers(PHINode *OrigPhi, 3440 const InductionDescriptor &II, 3441 Value *CountRoundDown, Value *EndValue, 3442 BasicBlock *MiddleBlock) { 3443 // There are two kinds of external IV usages - those that use the value 3444 // computed in the last iteration (the PHI) and those that use the penultimate 3445 // value (the value that feeds into the phi from the loop latch). 3446 // We allow both, but they, obviously, have different values. 3447 3448 assert(OrigLoop->getExitBlock() && "Expected a single exit block"); 3449 3450 DenseMap<Value *, Value *> MissingVals; 3451 3452 // An external user of the last iteration's value should see the value that 3453 // the remainder loop uses to initialize its own IV. 3454 Value *PostInc = OrigPhi->getIncomingValueForBlock(OrigLoop->getLoopLatch()); 3455 for (User *U : PostInc->users()) { 3456 Instruction *UI = cast<Instruction>(U); 3457 if (!OrigLoop->contains(UI)) { 3458 assert(isa<PHINode>(UI) && "Expected LCSSA form"); 3459 MissingVals[UI] = EndValue; 3460 } 3461 } 3462 3463 // An external user of the penultimate value need to see EndValue - Step. 3464 // The simplest way to get this is to recompute it from the constituent SCEVs, 3465 // that is Start + (Step * (CRD - 1)). 3466 for (User *U : OrigPhi->users()) { 3467 auto *UI = cast<Instruction>(U); 3468 if (!OrigLoop->contains(UI)) { 3469 const DataLayout &DL = 3470 OrigLoop->getHeader()->getModule()->getDataLayout(); 3471 assert(isa<PHINode>(UI) && "Expected LCSSA form"); 3472 3473 IRBuilder<> B(MiddleBlock->getTerminator()); 3474 Value *CountMinusOne = B.CreateSub( 3475 CountRoundDown, ConstantInt::get(CountRoundDown->getType(), 1)); 3476 Value *CMO = B.CreateSExtOrTrunc(CountMinusOne, II.getStep()->getType(), 3477 "cast.cmo"); 3478 Value *Escape = II.transform(B, CMO, PSE.getSE(), DL); 3479 Escape->setName("ind.escape"); 3480 MissingVals[UI] = Escape; 3481 } 3482 } 3483 3484 for (auto &I : MissingVals) { 3485 PHINode *PHI = cast<PHINode>(I.first); 3486 // One corner case we have to handle is two IVs "chasing" each-other, 3487 // that is %IV2 = phi [...], [ %IV1, %latch ] 3488 // In this case, if IV1 has an external use, we need to avoid adding both 3489 // "last value of IV1" and "penultimate value of IV2". So, verify that we 3490 // don't already have an incoming value for the middle block. 3491 if (PHI->getBasicBlockIndex(MiddleBlock) == -1) 3492 PHI->addIncoming(I.second, MiddleBlock); 3493 } 3494 } 3495 3496 namespace { 3497 struct CSEDenseMapInfo { 3498 static bool canHandle(Instruction *I) { 3499 return isa<InsertElementInst>(I) || isa<ExtractElementInst>(I) || 3500 isa<ShuffleVectorInst>(I) || isa<GetElementPtrInst>(I); 3501 } 3502 static inline Instruction *getEmptyKey() { 3503 return DenseMapInfo<Instruction *>::getEmptyKey(); 3504 } 3505 static inline Instruction *getTombstoneKey() { 3506 return DenseMapInfo<Instruction *>::getTombstoneKey(); 3507 } 3508 static unsigned getHashValue(Instruction *I) { 3509 assert(canHandle(I) && "Unknown instruction!"); 3510 return hash_combine(I->getOpcode(), hash_combine_range(I->value_op_begin(), 3511 I->value_op_end())); 3512 } 3513 static bool isEqual(Instruction *LHS, Instruction *RHS) { 3514 if (LHS == getEmptyKey() || RHS == getEmptyKey() || 3515 LHS == getTombstoneKey() || RHS == getTombstoneKey()) 3516 return LHS == RHS; 3517 return LHS->isIdenticalTo(RHS); 3518 } 3519 }; 3520 } 3521 3522 ///\brief Perform cse of induction variable instructions. 3523 static void cse(BasicBlock *BB) { 3524 // Perform simple cse. 3525 SmallDenseMap<Instruction *, Instruction *, 4, CSEDenseMapInfo> CSEMap; 3526 for (BasicBlock::iterator I = BB->begin(), E = BB->end(); I != E;) { 3527 Instruction *In = &*I++; 3528 3529 if (!CSEDenseMapInfo::canHandle(In)) 3530 continue; 3531 3532 // Check if we can replace this instruction with any of the 3533 // visited instructions. 3534 if (Instruction *V = CSEMap.lookup(In)) { 3535 In->replaceAllUsesWith(V); 3536 In->eraseFromParent(); 3537 continue; 3538 } 3539 3540 CSEMap[In] = In; 3541 } 3542 } 3543 3544 /// \brief Adds a 'fast' flag to floating point operations. 3545 static Value *addFastMathFlag(Value *V) { 3546 if (isa<FPMathOperator>(V)) { 3547 FastMathFlags Flags; 3548 Flags.setUnsafeAlgebra(); 3549 cast<Instruction>(V)->setFastMathFlags(Flags); 3550 } 3551 return V; 3552 } 3553 3554 /// \brief Estimate the overhead of scalarizing a value based on its type. 3555 /// Insert and Extract are set if the result needs to be inserted and/or 3556 /// extracted from vectors. 3557 static unsigned getScalarizationOverhead(Type *Ty, bool Insert, bool Extract, 3558 const TargetTransformInfo &TTI) { 3559 if (Ty->isVoidTy()) 3560 return 0; 3561 3562 assert(Ty->isVectorTy() && "Can only scalarize vectors"); 3563 unsigned Cost = 0; 3564 3565 for (unsigned I = 0, E = Ty->getVectorNumElements(); I < E; ++I) { 3566 if (Extract) 3567 Cost += TTI.getVectorInstrCost(Instruction::ExtractElement, Ty, I); 3568 if (Insert) 3569 Cost += TTI.getVectorInstrCost(Instruction::InsertElement, Ty, I); 3570 } 3571 3572 return Cost; 3573 } 3574 3575 /// \brief Estimate the overhead of scalarizing an Instruction based on the 3576 /// types of its operands and return value. 3577 static unsigned getScalarizationOverhead(SmallVectorImpl<Type *> &OpTys, 3578 Type *RetTy, 3579 const TargetTransformInfo &TTI) { 3580 unsigned ScalarizationCost = 3581 getScalarizationOverhead(RetTy, true, false, TTI); 3582 3583 for (Type *Ty : OpTys) 3584 ScalarizationCost += getScalarizationOverhead(Ty, false, true, TTI); 3585 3586 return ScalarizationCost; 3587 } 3588 3589 /// \brief Estimate the overhead of scalarizing an instruction. This is a 3590 /// convenience wrapper for the type-based getScalarizationOverhead API. 3591 static unsigned getScalarizationOverhead(Instruction *I, unsigned VF, 3592 const TargetTransformInfo &TTI) { 3593 if (VF == 1) 3594 return 0; 3595 3596 Type *RetTy = ToVectorTy(I->getType(), VF); 3597 3598 SmallVector<Type *, 4> OpTys; 3599 unsigned OperandsNum = I->getNumOperands(); 3600 for (unsigned OpInd = 0; OpInd < OperandsNum; ++OpInd) 3601 OpTys.push_back(ToVectorTy(I->getOperand(OpInd)->getType(), VF)); 3602 3603 return getScalarizationOverhead(OpTys, RetTy, TTI); 3604 } 3605 3606 // Estimate cost of a call instruction CI if it were vectorized with factor VF. 3607 // Return the cost of the instruction, including scalarization overhead if it's 3608 // needed. The flag NeedToScalarize shows if the call needs to be scalarized - 3609 // i.e. either vector version isn't available, or is too expensive. 3610 static unsigned getVectorCallCost(CallInst *CI, unsigned VF, 3611 const TargetTransformInfo &TTI, 3612 const TargetLibraryInfo *TLI, 3613 bool &NeedToScalarize) { 3614 Function *F = CI->getCalledFunction(); 3615 StringRef FnName = CI->getCalledFunction()->getName(); 3616 Type *ScalarRetTy = CI->getType(); 3617 SmallVector<Type *, 4> Tys, ScalarTys; 3618 for (auto &ArgOp : CI->arg_operands()) 3619 ScalarTys.push_back(ArgOp->getType()); 3620 3621 // Estimate cost of scalarized vector call. The source operands are assumed 3622 // to be vectors, so we need to extract individual elements from there, 3623 // execute VF scalar calls, and then gather the result into the vector return 3624 // value. 3625 unsigned ScalarCallCost = TTI.getCallInstrCost(F, ScalarRetTy, ScalarTys); 3626 if (VF == 1) 3627 return ScalarCallCost; 3628 3629 // Compute corresponding vector type for return value and arguments. 3630 Type *RetTy = ToVectorTy(ScalarRetTy, VF); 3631 for (Type *ScalarTy : ScalarTys) 3632 Tys.push_back(ToVectorTy(ScalarTy, VF)); 3633 3634 // Compute costs of unpacking argument values for the scalar calls and 3635 // packing the return values to a vector. 3636 unsigned ScalarizationCost = getScalarizationOverhead(Tys, RetTy, TTI); 3637 3638 unsigned Cost = ScalarCallCost * VF + ScalarizationCost; 3639 3640 // If we can't emit a vector call for this function, then the currently found 3641 // cost is the cost we need to return. 3642 NeedToScalarize = true; 3643 if (!TLI || !TLI->isFunctionVectorizable(FnName, VF) || CI->isNoBuiltin()) 3644 return Cost; 3645 3646 // If the corresponding vector cost is cheaper, return its cost. 3647 unsigned VectorCallCost = TTI.getCallInstrCost(nullptr, RetTy, Tys); 3648 if (VectorCallCost < Cost) { 3649 NeedToScalarize = false; 3650 return VectorCallCost; 3651 } 3652 return Cost; 3653 } 3654 3655 // Estimate cost of an intrinsic call instruction CI if it were vectorized with 3656 // factor VF. Return the cost of the instruction, including scalarization 3657 // overhead if it's needed. 3658 static unsigned getVectorIntrinsicCost(CallInst *CI, unsigned VF, 3659 const TargetTransformInfo &TTI, 3660 const TargetLibraryInfo *TLI) { 3661 Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI); 3662 assert(ID && "Expected intrinsic call!"); 3663 3664 Type *RetTy = ToVectorTy(CI->getType(), VF); 3665 SmallVector<Type *, 4> Tys; 3666 for (Value *ArgOperand : CI->arg_operands()) 3667 Tys.push_back(ToVectorTy(ArgOperand->getType(), VF)); 3668 3669 FastMathFlags FMF; 3670 if (auto *FPMO = dyn_cast<FPMathOperator>(CI)) 3671 FMF = FPMO->getFastMathFlags(); 3672 3673 return TTI.getIntrinsicInstrCost(ID, RetTy, Tys, FMF); 3674 } 3675 3676 static Type *smallestIntegerVectorType(Type *T1, Type *T2) { 3677 auto *I1 = cast<IntegerType>(T1->getVectorElementType()); 3678 auto *I2 = cast<IntegerType>(T2->getVectorElementType()); 3679 return I1->getBitWidth() < I2->getBitWidth() ? T1 : T2; 3680 } 3681 static Type *largestIntegerVectorType(Type *T1, Type *T2) { 3682 auto *I1 = cast<IntegerType>(T1->getVectorElementType()); 3683 auto *I2 = cast<IntegerType>(T2->getVectorElementType()); 3684 return I1->getBitWidth() > I2->getBitWidth() ? T1 : T2; 3685 } 3686 3687 void InnerLoopVectorizer::truncateToMinimalBitwidths() { 3688 // For every instruction `I` in MinBWs, truncate the operands, create a 3689 // truncated version of `I` and reextend its result. InstCombine runs 3690 // later and will remove any ext/trunc pairs. 3691 // 3692 SmallPtrSet<Value *, 4> Erased; 3693 for (const auto &KV : Cost->getMinimalBitwidths()) { 3694 VectorParts &Parts = VectorLoopValueMap.getVector(KV.first); 3695 for (Value *&I : Parts) { 3696 if (Erased.count(I) || I->use_empty() || !isa<Instruction>(I)) 3697 continue; 3698 Type *OriginalTy = I->getType(); 3699 Type *ScalarTruncatedTy = 3700 IntegerType::get(OriginalTy->getContext(), KV.second); 3701 Type *TruncatedTy = VectorType::get(ScalarTruncatedTy, 3702 OriginalTy->getVectorNumElements()); 3703 if (TruncatedTy == OriginalTy) 3704 continue; 3705 3706 IRBuilder<> B(cast<Instruction>(I)); 3707 auto ShrinkOperand = [&](Value *V) -> Value * { 3708 if (auto *ZI = dyn_cast<ZExtInst>(V)) 3709 if (ZI->getSrcTy() == TruncatedTy) 3710 return ZI->getOperand(0); 3711 return B.CreateZExtOrTrunc(V, TruncatedTy); 3712 }; 3713 3714 // The actual instruction modification depends on the instruction type, 3715 // unfortunately. 3716 Value *NewI = nullptr; 3717 if (auto *BO = dyn_cast<BinaryOperator>(I)) { 3718 NewI = B.CreateBinOp(BO->getOpcode(), ShrinkOperand(BO->getOperand(0)), 3719 ShrinkOperand(BO->getOperand(1))); 3720 cast<BinaryOperator>(NewI)->copyIRFlags(I); 3721 } else if (auto *CI = dyn_cast<ICmpInst>(I)) { 3722 NewI = 3723 B.CreateICmp(CI->getPredicate(), ShrinkOperand(CI->getOperand(0)), 3724 ShrinkOperand(CI->getOperand(1))); 3725 } else if (auto *SI = dyn_cast<SelectInst>(I)) { 3726 NewI = B.CreateSelect(SI->getCondition(), 3727 ShrinkOperand(SI->getTrueValue()), 3728 ShrinkOperand(SI->getFalseValue())); 3729 } else if (auto *CI = dyn_cast<CastInst>(I)) { 3730 switch (CI->getOpcode()) { 3731 default: 3732 llvm_unreachable("Unhandled cast!"); 3733 case Instruction::Trunc: 3734 NewI = ShrinkOperand(CI->getOperand(0)); 3735 break; 3736 case Instruction::SExt: 3737 NewI = B.CreateSExtOrTrunc( 3738 CI->getOperand(0), 3739 smallestIntegerVectorType(OriginalTy, TruncatedTy)); 3740 break; 3741 case Instruction::ZExt: 3742 NewI = B.CreateZExtOrTrunc( 3743 CI->getOperand(0), 3744 smallestIntegerVectorType(OriginalTy, TruncatedTy)); 3745 break; 3746 } 3747 } else if (auto *SI = dyn_cast<ShuffleVectorInst>(I)) { 3748 auto Elements0 = SI->getOperand(0)->getType()->getVectorNumElements(); 3749 auto *O0 = B.CreateZExtOrTrunc( 3750 SI->getOperand(0), VectorType::get(ScalarTruncatedTy, Elements0)); 3751 auto Elements1 = SI->getOperand(1)->getType()->getVectorNumElements(); 3752 auto *O1 = B.CreateZExtOrTrunc( 3753 SI->getOperand(1), VectorType::get(ScalarTruncatedTy, Elements1)); 3754 3755 NewI = B.CreateShuffleVector(O0, O1, SI->getMask()); 3756 } else if (isa<LoadInst>(I)) { 3757 // Don't do anything with the operands, just extend the result. 3758 continue; 3759 } else if (auto *IE = dyn_cast<InsertElementInst>(I)) { 3760 auto Elements = IE->getOperand(0)->getType()->getVectorNumElements(); 3761 auto *O0 = B.CreateZExtOrTrunc( 3762 IE->getOperand(0), VectorType::get(ScalarTruncatedTy, Elements)); 3763 auto *O1 = B.CreateZExtOrTrunc(IE->getOperand(1), ScalarTruncatedTy); 3764 NewI = B.CreateInsertElement(O0, O1, IE->getOperand(2)); 3765 } else if (auto *EE = dyn_cast<ExtractElementInst>(I)) { 3766 auto Elements = EE->getOperand(0)->getType()->getVectorNumElements(); 3767 auto *O0 = B.CreateZExtOrTrunc( 3768 EE->getOperand(0), VectorType::get(ScalarTruncatedTy, Elements)); 3769 NewI = B.CreateExtractElement(O0, EE->getOperand(2)); 3770 } else { 3771 llvm_unreachable("Unhandled instruction type!"); 3772 } 3773 3774 // Lastly, extend the result. 3775 NewI->takeName(cast<Instruction>(I)); 3776 Value *Res = B.CreateZExtOrTrunc(NewI, OriginalTy); 3777 I->replaceAllUsesWith(Res); 3778 cast<Instruction>(I)->eraseFromParent(); 3779 Erased.insert(I); 3780 I = Res; 3781 } 3782 } 3783 3784 // We'll have created a bunch of ZExts that are now parentless. Clean up. 3785 for (const auto &KV : Cost->getMinimalBitwidths()) { 3786 VectorParts &Parts = VectorLoopValueMap.getVector(KV.first); 3787 for (Value *&I : Parts) { 3788 ZExtInst *Inst = dyn_cast<ZExtInst>(I); 3789 if (Inst && Inst->use_empty()) { 3790 Value *NewI = Inst->getOperand(0); 3791 Inst->eraseFromParent(); 3792 I = NewI; 3793 } 3794 } 3795 } 3796 } 3797 3798 void InnerLoopVectorizer::vectorizeLoop() { 3799 //===------------------------------------------------===// 3800 // 3801 // Notice: any optimization or new instruction that go 3802 // into the code below should be also be implemented in 3803 // the cost-model. 3804 // 3805 //===------------------------------------------------===// 3806 Constant *Zero = Builder.getInt32(0); 3807 3808 // In order to support recurrences we need to be able to vectorize Phi nodes. 3809 // Phi nodes have cycles, so we need to vectorize them in two stages. First, 3810 // we create a new vector PHI node with no incoming edges. We use this value 3811 // when we vectorize all of the instructions that use the PHI. Next, after 3812 // all of the instructions in the block are complete we add the new incoming 3813 // edges to the PHI. At this point all of the instructions in the basic block 3814 // are vectorized, so we can use them to construct the PHI. 3815 PhiVector PHIsToFix; 3816 3817 // Collect instructions from the original loop that will become trivially 3818 // dead in the vectorized loop. We don't need to vectorize these 3819 // instructions. 3820 collectTriviallyDeadInstructions(); 3821 3822 // Scan the loop in a topological order to ensure that defs are vectorized 3823 // before users. 3824 LoopBlocksDFS DFS(OrigLoop); 3825 DFS.perform(LI); 3826 3827 // Vectorize all of the blocks in the original loop. 3828 for (BasicBlock *BB : make_range(DFS.beginRPO(), DFS.endRPO())) 3829 vectorizeBlockInLoop(BB, &PHIsToFix); 3830 3831 // Insert truncates and extends for any truncated instructions as hints to 3832 // InstCombine. 3833 if (VF > 1) 3834 truncateToMinimalBitwidths(); 3835 3836 // At this point every instruction in the original loop is widened to a 3837 // vector form. Now we need to fix the recurrences in PHIsToFix. These PHI 3838 // nodes are currently empty because we did not want to introduce cycles. 3839 // This is the second stage of vectorizing recurrences. 3840 for (PHINode *Phi : PHIsToFix) { 3841 assert(Phi && "Unable to recover vectorized PHI"); 3842 3843 // Handle first-order recurrences that need to be fixed. 3844 if (Legal->isFirstOrderRecurrence(Phi)) { 3845 fixFirstOrderRecurrence(Phi); 3846 continue; 3847 } 3848 3849 // If the phi node is not a first-order recurrence, it must be a reduction. 3850 // Get it's reduction variable descriptor. 3851 assert(Legal->isReductionVariable(Phi) && 3852 "Unable to find the reduction variable"); 3853 RecurrenceDescriptor RdxDesc = (*Legal->getReductionVars())[Phi]; 3854 3855 RecurrenceDescriptor::RecurrenceKind RK = RdxDesc.getRecurrenceKind(); 3856 TrackingVH<Value> ReductionStartValue = RdxDesc.getRecurrenceStartValue(); 3857 Instruction *LoopExitInst = RdxDesc.getLoopExitInstr(); 3858 RecurrenceDescriptor::MinMaxRecurrenceKind MinMaxKind = 3859 RdxDesc.getMinMaxRecurrenceKind(); 3860 setDebugLocFromInst(Builder, ReductionStartValue); 3861 3862 // We need to generate a reduction vector from the incoming scalar. 3863 // To do so, we need to generate the 'identity' vector and override 3864 // one of the elements with the incoming scalar reduction. We need 3865 // to do it in the vector-loop preheader. 3866 Builder.SetInsertPoint(LoopBypassBlocks[1]->getTerminator()); 3867 3868 // This is the vector-clone of the value that leaves the loop. 3869 const VectorParts &VectorExit = getVectorValue(LoopExitInst); 3870 Type *VecTy = VectorExit[0]->getType(); 3871 3872 // Find the reduction identity variable. Zero for addition, or, xor, 3873 // one for multiplication, -1 for And. 3874 Value *Identity; 3875 Value *VectorStart; 3876 if (RK == RecurrenceDescriptor::RK_IntegerMinMax || 3877 RK == RecurrenceDescriptor::RK_FloatMinMax) { 3878 // MinMax reduction have the start value as their identify. 3879 if (VF == 1) { 3880 VectorStart = Identity = ReductionStartValue; 3881 } else { 3882 VectorStart = Identity = 3883 Builder.CreateVectorSplat(VF, ReductionStartValue, "minmax.ident"); 3884 } 3885 } else { 3886 // Handle other reduction kinds: 3887 Constant *Iden = RecurrenceDescriptor::getRecurrenceIdentity( 3888 RK, VecTy->getScalarType()); 3889 if (VF == 1) { 3890 Identity = Iden; 3891 // This vector is the Identity vector where the first element is the 3892 // incoming scalar reduction. 3893 VectorStart = ReductionStartValue; 3894 } else { 3895 Identity = ConstantVector::getSplat(VF, Iden); 3896 3897 // This vector is the Identity vector where the first element is the 3898 // incoming scalar reduction. 3899 VectorStart = 3900 Builder.CreateInsertElement(Identity, ReductionStartValue, Zero); 3901 } 3902 } 3903 3904 // Fix the vector-loop phi. 3905 3906 // Reductions do not have to start at zero. They can start with 3907 // any loop invariant values. 3908 const VectorParts &VecRdxPhi = getVectorValue(Phi); 3909 BasicBlock *Latch = OrigLoop->getLoopLatch(); 3910 Value *LoopVal = Phi->getIncomingValueForBlock(Latch); 3911 const VectorParts &Val = getVectorValue(LoopVal); 3912 for (unsigned part = 0; part < UF; ++part) { 3913 // Make sure to add the reduction stat value only to the 3914 // first unroll part. 3915 Value *StartVal = (part == 0) ? VectorStart : Identity; 3916 cast<PHINode>(VecRdxPhi[part]) 3917 ->addIncoming(StartVal, LoopVectorPreHeader); 3918 cast<PHINode>(VecRdxPhi[part]) 3919 ->addIncoming(Val[part], LoopVectorBody); 3920 } 3921 3922 // Before each round, move the insertion point right between 3923 // the PHIs and the values we are going to write. 3924 // This allows us to write both PHINodes and the extractelement 3925 // instructions. 3926 Builder.SetInsertPoint(&*LoopMiddleBlock->getFirstInsertionPt()); 3927 3928 VectorParts &RdxParts = VectorLoopValueMap.getVector(LoopExitInst); 3929 setDebugLocFromInst(Builder, LoopExitInst); 3930 3931 // If the vector reduction can be performed in a smaller type, we truncate 3932 // then extend the loop exit value to enable InstCombine to evaluate the 3933 // entire expression in the smaller type. 3934 if (VF > 1 && Phi->getType() != RdxDesc.getRecurrenceType()) { 3935 Type *RdxVecTy = VectorType::get(RdxDesc.getRecurrenceType(), VF); 3936 Builder.SetInsertPoint(LoopVectorBody->getTerminator()); 3937 for (unsigned part = 0; part < UF; ++part) { 3938 Value *Trunc = Builder.CreateTrunc(RdxParts[part], RdxVecTy); 3939 Value *Extnd = RdxDesc.isSigned() ? Builder.CreateSExt(Trunc, VecTy) 3940 : Builder.CreateZExt(Trunc, VecTy); 3941 for (Value::user_iterator UI = RdxParts[part]->user_begin(); 3942 UI != RdxParts[part]->user_end();) 3943 if (*UI != Trunc) { 3944 (*UI++)->replaceUsesOfWith(RdxParts[part], Extnd); 3945 RdxParts[part] = Extnd; 3946 } else { 3947 ++UI; 3948 } 3949 } 3950 Builder.SetInsertPoint(&*LoopMiddleBlock->getFirstInsertionPt()); 3951 for (unsigned part = 0; part < UF; ++part) 3952 RdxParts[part] = Builder.CreateTrunc(RdxParts[part], RdxVecTy); 3953 } 3954 3955 // Reduce all of the unrolled parts into a single vector. 3956 Value *ReducedPartRdx = RdxParts[0]; 3957 unsigned Op = RecurrenceDescriptor::getRecurrenceBinOp(RK); 3958 setDebugLocFromInst(Builder, ReducedPartRdx); 3959 for (unsigned part = 1; part < UF; ++part) { 3960 if (Op != Instruction::ICmp && Op != Instruction::FCmp) 3961 // Floating point operations had to be 'fast' to enable the reduction. 3962 ReducedPartRdx = addFastMathFlag( 3963 Builder.CreateBinOp((Instruction::BinaryOps)Op, RdxParts[part], 3964 ReducedPartRdx, "bin.rdx")); 3965 else 3966 ReducedPartRdx = RecurrenceDescriptor::createMinMaxOp( 3967 Builder, MinMaxKind, ReducedPartRdx, RdxParts[part]); 3968 } 3969 3970 if (VF > 1) { 3971 // VF is a power of 2 so we can emit the reduction using log2(VF) shuffles 3972 // and vector ops, reducing the set of values being computed by half each 3973 // round. 3974 assert(isPowerOf2_32(VF) && 3975 "Reduction emission only supported for pow2 vectors!"); 3976 Value *TmpVec = ReducedPartRdx; 3977 SmallVector<Constant *, 32> ShuffleMask(VF, nullptr); 3978 for (unsigned i = VF; i != 1; i >>= 1) { 3979 // Move the upper half of the vector to the lower half. 3980 for (unsigned j = 0; j != i / 2; ++j) 3981 ShuffleMask[j] = Builder.getInt32(i / 2 + j); 3982 3983 // Fill the rest of the mask with undef. 3984 std::fill(&ShuffleMask[i / 2], ShuffleMask.end(), 3985 UndefValue::get(Builder.getInt32Ty())); 3986 3987 Value *Shuf = Builder.CreateShuffleVector( 3988 TmpVec, UndefValue::get(TmpVec->getType()), 3989 ConstantVector::get(ShuffleMask), "rdx.shuf"); 3990 3991 if (Op != Instruction::ICmp && Op != Instruction::FCmp) 3992 // Floating point operations had to be 'fast' to enable the reduction. 3993 TmpVec = addFastMathFlag(Builder.CreateBinOp( 3994 (Instruction::BinaryOps)Op, TmpVec, Shuf, "bin.rdx")); 3995 else 3996 TmpVec = RecurrenceDescriptor::createMinMaxOp(Builder, MinMaxKind, 3997 TmpVec, Shuf); 3998 } 3999 4000 // The result is in the first element of the vector. 4001 ReducedPartRdx = 4002 Builder.CreateExtractElement(TmpVec, Builder.getInt32(0)); 4003 4004 // If the reduction can be performed in a smaller type, we need to extend 4005 // the reduction to the wider type before we branch to the original loop. 4006 if (Phi->getType() != RdxDesc.getRecurrenceType()) 4007 ReducedPartRdx = 4008 RdxDesc.isSigned() 4009 ? Builder.CreateSExt(ReducedPartRdx, Phi->getType()) 4010 : Builder.CreateZExt(ReducedPartRdx, Phi->getType()); 4011 } 4012 4013 // Create a phi node that merges control-flow from the backedge-taken check 4014 // block and the middle block. 4015 PHINode *BCBlockPhi = PHINode::Create(Phi->getType(), 2, "bc.merge.rdx", 4016 LoopScalarPreHeader->getTerminator()); 4017 for (unsigned I = 0, E = LoopBypassBlocks.size(); I != E; ++I) 4018 BCBlockPhi->addIncoming(ReductionStartValue, LoopBypassBlocks[I]); 4019 BCBlockPhi->addIncoming(ReducedPartRdx, LoopMiddleBlock); 4020 4021 // Now, we need to fix the users of the reduction variable 4022 // inside and outside of the scalar remainder loop. 4023 // We know that the loop is in LCSSA form. We need to update the 4024 // PHI nodes in the exit blocks. 4025 for (BasicBlock::iterator LEI = LoopExitBlock->begin(), 4026 LEE = LoopExitBlock->end(); 4027 LEI != LEE; ++LEI) { 4028 PHINode *LCSSAPhi = dyn_cast<PHINode>(LEI); 4029 if (!LCSSAPhi) 4030 break; 4031 4032 // All PHINodes need to have a single entry edge, or two if 4033 // we already fixed them. 4034 assert(LCSSAPhi->getNumIncomingValues() < 3 && "Invalid LCSSA PHI"); 4035 4036 // We found our reduction value exit-PHI. Update it with the 4037 // incoming bypass edge. 4038 if (LCSSAPhi->getIncomingValue(0) == LoopExitInst) { 4039 // Add an edge coming from the bypass. 4040 LCSSAPhi->addIncoming(ReducedPartRdx, LoopMiddleBlock); 4041 break; 4042 } 4043 } // end of the LCSSA phi scan. 4044 4045 // Fix the scalar loop reduction variable with the incoming reduction sum 4046 // from the vector body and from the backedge value. 4047 int IncomingEdgeBlockIdx = 4048 Phi->getBasicBlockIndex(OrigLoop->getLoopLatch()); 4049 assert(IncomingEdgeBlockIdx >= 0 && "Invalid block index"); 4050 // Pick the other block. 4051 int SelfEdgeBlockIdx = (IncomingEdgeBlockIdx ? 0 : 1); 4052 Phi->setIncomingValue(SelfEdgeBlockIdx, BCBlockPhi); 4053 Phi->setIncomingValue(IncomingEdgeBlockIdx, LoopExitInst); 4054 } // end of for each Phi in PHIsToFix. 4055 4056 fixLCSSAPHIs(); 4057 4058 // Make sure DomTree is updated. 4059 updateAnalysis(); 4060 4061 predicateInstructions(); 4062 4063 // Remove redundant induction instructions. 4064 cse(LoopVectorBody); 4065 } 4066 4067 void InnerLoopVectorizer::fixFirstOrderRecurrence(PHINode *Phi) { 4068 4069 // This is the second phase of vectorizing first-order recurrences. An 4070 // overview of the transformation is described below. Suppose we have the 4071 // following loop. 4072 // 4073 // for (int i = 0; i < n; ++i) 4074 // b[i] = a[i] - a[i - 1]; 4075 // 4076 // There is a first-order recurrence on "a". For this loop, the shorthand 4077 // scalar IR looks like: 4078 // 4079 // scalar.ph: 4080 // s_init = a[-1] 4081 // br scalar.body 4082 // 4083 // scalar.body: 4084 // i = phi [0, scalar.ph], [i+1, scalar.body] 4085 // s1 = phi [s_init, scalar.ph], [s2, scalar.body] 4086 // s2 = a[i] 4087 // b[i] = s2 - s1 4088 // br cond, scalar.body, ... 4089 // 4090 // In this example, s1 is a recurrence because it's value depends on the 4091 // previous iteration. In the first phase of vectorization, we created a 4092 // temporary value for s1. We now complete the vectorization and produce the 4093 // shorthand vector IR shown below (for VF = 4, UF = 1). 4094 // 4095 // vector.ph: 4096 // v_init = vector(..., ..., ..., a[-1]) 4097 // br vector.body 4098 // 4099 // vector.body 4100 // i = phi [0, vector.ph], [i+4, vector.body] 4101 // v1 = phi [v_init, vector.ph], [v2, vector.body] 4102 // v2 = a[i, i+1, i+2, i+3]; 4103 // v3 = vector(v1(3), v2(0, 1, 2)) 4104 // b[i, i+1, i+2, i+3] = v2 - v3 4105 // br cond, vector.body, middle.block 4106 // 4107 // middle.block: 4108 // x = v2(3) 4109 // br scalar.ph 4110 // 4111 // scalar.ph: 4112 // s_init = phi [x, middle.block], [a[-1], otherwise] 4113 // br scalar.body 4114 // 4115 // After execution completes the vector loop, we extract the next value of 4116 // the recurrence (x) to use as the initial value in the scalar loop. 4117 4118 // Get the original loop preheader and single loop latch. 4119 auto *Preheader = OrigLoop->getLoopPreheader(); 4120 auto *Latch = OrigLoop->getLoopLatch(); 4121 4122 // Get the initial and previous values of the scalar recurrence. 4123 auto *ScalarInit = Phi->getIncomingValueForBlock(Preheader); 4124 auto *Previous = Phi->getIncomingValueForBlock(Latch); 4125 4126 // Create a vector from the initial value. 4127 auto *VectorInit = ScalarInit; 4128 if (VF > 1) { 4129 Builder.SetInsertPoint(LoopVectorPreHeader->getTerminator()); 4130 VectorInit = Builder.CreateInsertElement( 4131 UndefValue::get(VectorType::get(VectorInit->getType(), VF)), VectorInit, 4132 Builder.getInt32(VF - 1), "vector.recur.init"); 4133 } 4134 4135 // We constructed a temporary phi node in the first phase of vectorization. 4136 // This phi node will eventually be deleted. 4137 VectorParts &PhiParts = VectorLoopValueMap.getVector(Phi); 4138 Builder.SetInsertPoint(cast<Instruction>(PhiParts[0])); 4139 4140 // Create a phi node for the new recurrence. The current value will either be 4141 // the initial value inserted into a vector or loop-varying vector value. 4142 auto *VecPhi = Builder.CreatePHI(VectorInit->getType(), 2, "vector.recur"); 4143 VecPhi->addIncoming(VectorInit, LoopVectorPreHeader); 4144 4145 // Get the vectorized previous value. We ensured the previous values was an 4146 // instruction when detecting the recurrence. 4147 auto &PreviousParts = getVectorValue(Previous); 4148 4149 // Set the insertion point to be after this instruction. We ensured the 4150 // previous value dominated all uses of the phi when detecting the 4151 // recurrence. 4152 Builder.SetInsertPoint( 4153 &*++BasicBlock::iterator(cast<Instruction>(PreviousParts[UF - 1]))); 4154 4155 // We will construct a vector for the recurrence by combining the values for 4156 // the current and previous iterations. This is the required shuffle mask. 4157 SmallVector<Constant *, 8> ShuffleMask(VF); 4158 ShuffleMask[0] = Builder.getInt32(VF - 1); 4159 for (unsigned I = 1; I < VF; ++I) 4160 ShuffleMask[I] = Builder.getInt32(I + VF - 1); 4161 4162 // The vector from which to take the initial value for the current iteration 4163 // (actual or unrolled). Initially, this is the vector phi node. 4164 Value *Incoming = VecPhi; 4165 4166 // Shuffle the current and previous vector and update the vector parts. 4167 for (unsigned Part = 0; Part < UF; ++Part) { 4168 auto *Shuffle = 4169 VF > 1 4170 ? Builder.CreateShuffleVector(Incoming, PreviousParts[Part], 4171 ConstantVector::get(ShuffleMask)) 4172 : Incoming; 4173 PhiParts[Part]->replaceAllUsesWith(Shuffle); 4174 cast<Instruction>(PhiParts[Part])->eraseFromParent(); 4175 PhiParts[Part] = Shuffle; 4176 Incoming = PreviousParts[Part]; 4177 } 4178 4179 // Fix the latch value of the new recurrence in the vector loop. 4180 VecPhi->addIncoming(Incoming, LI->getLoopFor(LoopVectorBody)->getLoopLatch()); 4181 4182 // Extract the last vector element in the middle block. This will be the 4183 // initial value for the recurrence when jumping to the scalar loop. 4184 auto *Extract = Incoming; 4185 if (VF > 1) { 4186 Builder.SetInsertPoint(LoopMiddleBlock->getTerminator()); 4187 Extract = Builder.CreateExtractElement(Extract, Builder.getInt32(VF - 1), 4188 "vector.recur.extract"); 4189 } 4190 4191 // Fix the initial value of the original recurrence in the scalar loop. 4192 Builder.SetInsertPoint(&*LoopScalarPreHeader->begin()); 4193 auto *Start = Builder.CreatePHI(Phi->getType(), 2, "scalar.recur.init"); 4194 for (auto *BB : predecessors(LoopScalarPreHeader)) { 4195 auto *Incoming = BB == LoopMiddleBlock ? Extract : ScalarInit; 4196 Start->addIncoming(Incoming, BB); 4197 } 4198 4199 Phi->setIncomingValue(Phi->getBasicBlockIndex(LoopScalarPreHeader), Start); 4200 Phi->setName("scalar.recur"); 4201 4202 // Finally, fix users of the recurrence outside the loop. The users will need 4203 // either the last value of the scalar recurrence or the last value of the 4204 // vector recurrence we extracted in the middle block. Since the loop is in 4205 // LCSSA form, we just need to find the phi node for the original scalar 4206 // recurrence in the exit block, and then add an edge for the middle block. 4207 for (auto &I : *LoopExitBlock) { 4208 auto *LCSSAPhi = dyn_cast<PHINode>(&I); 4209 if (!LCSSAPhi) 4210 break; 4211 if (LCSSAPhi->getIncomingValue(0) == Phi) { 4212 LCSSAPhi->addIncoming(Extract, LoopMiddleBlock); 4213 break; 4214 } 4215 } 4216 } 4217 4218 void InnerLoopVectorizer::fixLCSSAPHIs() { 4219 for (Instruction &LEI : *LoopExitBlock) { 4220 auto *LCSSAPhi = dyn_cast<PHINode>(&LEI); 4221 if (!LCSSAPhi) 4222 break; 4223 if (LCSSAPhi->getNumIncomingValues() == 1) 4224 LCSSAPhi->addIncoming(UndefValue::get(LCSSAPhi->getType()), 4225 LoopMiddleBlock); 4226 } 4227 } 4228 4229 void InnerLoopVectorizer::collectTriviallyDeadInstructions() { 4230 BasicBlock *Latch = OrigLoop->getLoopLatch(); 4231 4232 // We create new control-flow for the vectorized loop, so the original 4233 // condition will be dead after vectorization if it's only used by the 4234 // branch. 4235 auto *Cmp = dyn_cast<Instruction>(Latch->getTerminator()->getOperand(0)); 4236 if (Cmp && Cmp->hasOneUse()) 4237 DeadInstructions.insert(Cmp); 4238 4239 // We create new "steps" for induction variable updates to which the original 4240 // induction variables map. An original update instruction will be dead if 4241 // all its users except the induction variable are dead. 4242 for (auto &Induction : *Legal->getInductionVars()) { 4243 PHINode *Ind = Induction.first; 4244 auto *IndUpdate = cast<Instruction>(Ind->getIncomingValueForBlock(Latch)); 4245 if (all_of(IndUpdate->users(), [&](User *U) -> bool { 4246 return U == Ind || DeadInstructions.count(cast<Instruction>(U)); 4247 })) 4248 DeadInstructions.insert(IndUpdate); 4249 } 4250 } 4251 4252 void InnerLoopVectorizer::predicateInstructions() { 4253 4254 // For each instruction I marked for predication on value C, split I into its 4255 // own basic block to form an if-then construct over C. 4256 // Since I may be fed by extractelement and/or be feeding an insertelement 4257 // generated during scalarization we try to move such instructions into the 4258 // predicated basic block as well. For the insertelement this also means that 4259 // the PHI will be created for the resulting vector rather than for the 4260 // scalar instruction. 4261 // So for some predicated instruction, e.g. the conditional sdiv in: 4262 // 4263 // for.body: 4264 // ... 4265 // %add = add nsw i32 %mul, %0 4266 // %cmp5 = icmp sgt i32 %2, 7 4267 // br i1 %cmp5, label %if.then, label %if.end 4268 // 4269 // if.then: 4270 // %div = sdiv i32 %0, %1 4271 // br label %if.end 4272 // 4273 // if.end: 4274 // %x.0 = phi i32 [ %div, %if.then ], [ %add, %for.body ] 4275 // 4276 // the sdiv at this point is scalarized and if-converted using a select. 4277 // The inactive elements in the vector are not used, but the predicated 4278 // instruction is still executed for all vector elements, essentially: 4279 // 4280 // vector.body: 4281 // ... 4282 // %17 = add nsw <2 x i32> %16, %wide.load 4283 // %29 = extractelement <2 x i32> %wide.load, i32 0 4284 // %30 = extractelement <2 x i32> %wide.load51, i32 0 4285 // %31 = sdiv i32 %29, %30 4286 // %32 = insertelement <2 x i32> undef, i32 %31, i32 0 4287 // %35 = extractelement <2 x i32> %wide.load, i32 1 4288 // %36 = extractelement <2 x i32> %wide.load51, i32 1 4289 // %37 = sdiv i32 %35, %36 4290 // %38 = insertelement <2 x i32> %32, i32 %37, i32 1 4291 // %predphi = select <2 x i1> %26, <2 x i32> %38, <2 x i32> %17 4292 // 4293 // Predication will now re-introduce the original control flow to avoid false 4294 // side-effects by the sdiv instructions on the inactive elements, yielding 4295 // (after cleanup): 4296 // 4297 // vector.body: 4298 // ... 4299 // %5 = add nsw <2 x i32> %4, %wide.load 4300 // %8 = icmp sgt <2 x i32> %wide.load52, <i32 7, i32 7> 4301 // %9 = extractelement <2 x i1> %8, i32 0 4302 // br i1 %9, label %pred.sdiv.if, label %pred.sdiv.continue 4303 // 4304 // pred.sdiv.if: 4305 // %10 = extractelement <2 x i32> %wide.load, i32 0 4306 // %11 = extractelement <2 x i32> %wide.load51, i32 0 4307 // %12 = sdiv i32 %10, %11 4308 // %13 = insertelement <2 x i32> undef, i32 %12, i32 0 4309 // br label %pred.sdiv.continue 4310 // 4311 // pred.sdiv.continue: 4312 // %14 = phi <2 x i32> [ undef, %vector.body ], [ %13, %pred.sdiv.if ] 4313 // %15 = extractelement <2 x i1> %8, i32 1 4314 // br i1 %15, label %pred.sdiv.if54, label %pred.sdiv.continue55 4315 // 4316 // pred.sdiv.if54: 4317 // %16 = extractelement <2 x i32> %wide.load, i32 1 4318 // %17 = extractelement <2 x i32> %wide.load51, i32 1 4319 // %18 = sdiv i32 %16, %17 4320 // %19 = insertelement <2 x i32> %14, i32 %18, i32 1 4321 // br label %pred.sdiv.continue55 4322 // 4323 // pred.sdiv.continue55: 4324 // %20 = phi <2 x i32> [ %14, %pred.sdiv.continue ], [ %19, %pred.sdiv.if54 ] 4325 // %predphi = select <2 x i1> %8, <2 x i32> %20, <2 x i32> %5 4326 4327 for (auto KV : PredicatedInstructions) { 4328 BasicBlock::iterator I(KV.first); 4329 BasicBlock *Head = I->getParent(); 4330 auto *BB = SplitBlock(Head, &*std::next(I), DT, LI); 4331 auto *T = SplitBlockAndInsertIfThen(KV.second, &*I, /*Unreachable=*/false, 4332 /*BranchWeights=*/nullptr, DT, LI); 4333 I->moveBefore(T); 4334 // Try to move any extractelement we may have created for the predicated 4335 // instruction into the Then block. 4336 for (Use &Op : I->operands()) { 4337 auto *OpInst = dyn_cast<ExtractElementInst>(&*Op); 4338 if (OpInst && OpInst->hasOneUse()) // TODO: more accurately - hasOneUser() 4339 OpInst->moveBefore(&*I); 4340 } 4341 4342 I->getParent()->setName(Twine("pred.") + I->getOpcodeName() + ".if"); 4343 BB->setName(Twine("pred.") + I->getOpcodeName() + ".continue"); 4344 4345 // If the instruction is non-void create a Phi node at reconvergence point. 4346 if (!I->getType()->isVoidTy()) { 4347 Value *IncomingTrue = nullptr; 4348 Value *IncomingFalse = nullptr; 4349 4350 if (I->hasOneUse() && isa<InsertElementInst>(*I->user_begin())) { 4351 // If the predicated instruction is feeding an insert-element, move it 4352 // into the Then block; Phi node will be created for the vector. 4353 InsertElementInst *IEI = cast<InsertElementInst>(*I->user_begin()); 4354 IEI->moveBefore(T); 4355 IncomingTrue = IEI; // the new vector with the inserted element. 4356 IncomingFalse = IEI->getOperand(0); // the unmodified vector 4357 } else { 4358 // Phi node will be created for the scalar predicated instruction. 4359 IncomingTrue = &*I; 4360 IncomingFalse = UndefValue::get(I->getType()); 4361 } 4362 4363 BasicBlock *PostDom = I->getParent()->getSingleSuccessor(); 4364 assert(PostDom && "Then block has multiple successors"); 4365 PHINode *Phi = 4366 PHINode::Create(IncomingTrue->getType(), 2, "", &PostDom->front()); 4367 IncomingTrue->replaceAllUsesWith(Phi); 4368 Phi->addIncoming(IncomingFalse, Head); 4369 Phi->addIncoming(IncomingTrue, I->getParent()); 4370 } 4371 } 4372 4373 DEBUG(DT->verifyDomTree()); 4374 } 4375 4376 InnerLoopVectorizer::VectorParts 4377 InnerLoopVectorizer::createEdgeMask(BasicBlock *Src, BasicBlock *Dst) { 4378 assert(is_contained(predecessors(Dst), Src) && "Invalid edge"); 4379 4380 // Look for cached value. 4381 std::pair<BasicBlock *, BasicBlock *> Edge(Src, Dst); 4382 EdgeMaskCache::iterator ECEntryIt = MaskCache.find(Edge); 4383 if (ECEntryIt != MaskCache.end()) 4384 return ECEntryIt->second; 4385 4386 VectorParts SrcMask = createBlockInMask(Src); 4387 4388 // The terminator has to be a branch inst! 4389 BranchInst *BI = dyn_cast<BranchInst>(Src->getTerminator()); 4390 assert(BI && "Unexpected terminator found"); 4391 4392 if (BI->isConditional()) { 4393 VectorParts EdgeMask = getVectorValue(BI->getCondition()); 4394 4395 if (BI->getSuccessor(0) != Dst) 4396 for (unsigned part = 0; part < UF; ++part) 4397 EdgeMask[part] = Builder.CreateNot(EdgeMask[part]); 4398 4399 for (unsigned part = 0; part < UF; ++part) 4400 EdgeMask[part] = Builder.CreateAnd(EdgeMask[part], SrcMask[part]); 4401 4402 MaskCache[Edge] = EdgeMask; 4403 return EdgeMask; 4404 } 4405 4406 MaskCache[Edge] = SrcMask; 4407 return SrcMask; 4408 } 4409 4410 InnerLoopVectorizer::VectorParts 4411 InnerLoopVectorizer::createBlockInMask(BasicBlock *BB) { 4412 assert(OrigLoop->contains(BB) && "Block is not a part of a loop"); 4413 4414 // Loop incoming mask is all-one. 4415 if (OrigLoop->getHeader() == BB) { 4416 Value *C = ConstantInt::get(IntegerType::getInt1Ty(BB->getContext()), 1); 4417 return getVectorValue(C); 4418 } 4419 4420 // This is the block mask. We OR all incoming edges, and with zero. 4421 Value *Zero = ConstantInt::get(IntegerType::getInt1Ty(BB->getContext()), 0); 4422 VectorParts BlockMask = getVectorValue(Zero); 4423 4424 // For each pred: 4425 for (pred_iterator it = pred_begin(BB), e = pred_end(BB); it != e; ++it) { 4426 VectorParts EM = createEdgeMask(*it, BB); 4427 for (unsigned part = 0; part < UF; ++part) 4428 BlockMask[part] = Builder.CreateOr(BlockMask[part], EM[part]); 4429 } 4430 4431 return BlockMask; 4432 } 4433 4434 void InnerLoopVectorizer::widenPHIInstruction(Instruction *PN, unsigned UF, 4435 unsigned VF, PhiVector *PV) { 4436 PHINode *P = cast<PHINode>(PN); 4437 // Handle recurrences. 4438 if (Legal->isReductionVariable(P) || Legal->isFirstOrderRecurrence(P)) { 4439 VectorParts Entry(UF); 4440 for (unsigned part = 0; part < UF; ++part) { 4441 // This is phase one of vectorizing PHIs. 4442 Type *VecTy = 4443 (VF == 1) ? PN->getType() : VectorType::get(PN->getType(), VF); 4444 Entry[part] = PHINode::Create( 4445 VecTy, 2, "vec.phi", &*LoopVectorBody->getFirstInsertionPt()); 4446 } 4447 VectorLoopValueMap.initVector(P, Entry); 4448 PV->push_back(P); 4449 return; 4450 } 4451 4452 setDebugLocFromInst(Builder, P); 4453 // Check for PHI nodes that are lowered to vector selects. 4454 if (P->getParent() != OrigLoop->getHeader()) { 4455 // We know that all PHIs in non-header blocks are converted into 4456 // selects, so we don't have to worry about the insertion order and we 4457 // can just use the builder. 4458 // At this point we generate the predication tree. There may be 4459 // duplications since this is a simple recursive scan, but future 4460 // optimizations will clean it up. 4461 4462 unsigned NumIncoming = P->getNumIncomingValues(); 4463 4464 // Generate a sequence of selects of the form: 4465 // SELECT(Mask3, In3, 4466 // SELECT(Mask2, In2, 4467 // ( ...))) 4468 VectorParts Entry(UF); 4469 for (unsigned In = 0; In < NumIncoming; In++) { 4470 VectorParts Cond = 4471 createEdgeMask(P->getIncomingBlock(In), P->getParent()); 4472 const VectorParts &In0 = getVectorValue(P->getIncomingValue(In)); 4473 4474 for (unsigned part = 0; part < UF; ++part) { 4475 // We might have single edge PHIs (blocks) - use an identity 4476 // 'select' for the first PHI operand. 4477 if (In == 0) 4478 Entry[part] = Builder.CreateSelect(Cond[part], In0[part], In0[part]); 4479 else 4480 // Select between the current value and the previous incoming edge 4481 // based on the incoming mask. 4482 Entry[part] = Builder.CreateSelect(Cond[part], In0[part], Entry[part], 4483 "predphi"); 4484 } 4485 } 4486 VectorLoopValueMap.initVector(P, Entry); 4487 return; 4488 } 4489 4490 // This PHINode must be an induction variable. 4491 // Make sure that we know about it. 4492 assert(Legal->getInductionVars()->count(P) && "Not an induction variable"); 4493 4494 InductionDescriptor II = Legal->getInductionVars()->lookup(P); 4495 const DataLayout &DL = OrigLoop->getHeader()->getModule()->getDataLayout(); 4496 4497 // FIXME: The newly created binary instructions should contain nsw/nuw flags, 4498 // which can be found from the original scalar operations. 4499 switch (II.getKind()) { 4500 case InductionDescriptor::IK_NoInduction: 4501 llvm_unreachable("Unknown induction"); 4502 case InductionDescriptor::IK_IntInduction: 4503 return widenIntInduction(P); 4504 case InductionDescriptor::IK_PtrInduction: { 4505 // Handle the pointer induction variable case. 4506 assert(P->getType()->isPointerTy() && "Unexpected type."); 4507 // This is the normalized GEP that starts counting at zero. 4508 Value *PtrInd = Induction; 4509 PtrInd = Builder.CreateSExtOrTrunc(PtrInd, II.getStep()->getType()); 4510 // Determine the number of scalars we need to generate for each unroll 4511 // iteration. If the instruction is uniform, we only need to generate the 4512 // first lane. Otherwise, we generate all VF values. 4513 unsigned Lanes = Legal->isUniformAfterVectorization(P) ? 1 : VF; 4514 // These are the scalar results. Notice that we don't generate vector GEPs 4515 // because scalar GEPs result in better code. 4516 ScalarParts Entry(UF); 4517 for (unsigned Part = 0; Part < UF; ++Part) { 4518 Entry[Part].resize(VF); 4519 for (unsigned Lane = 0; Lane < Lanes; ++Lane) { 4520 Constant *Idx = ConstantInt::get(PtrInd->getType(), Lane + Part * VF); 4521 Value *GlobalIdx = Builder.CreateAdd(PtrInd, Idx); 4522 Value *SclrGep = II.transform(Builder, GlobalIdx, PSE.getSE(), DL); 4523 SclrGep->setName("next.gep"); 4524 Entry[Part][Lane] = SclrGep; 4525 } 4526 } 4527 VectorLoopValueMap.initScalar(P, Entry); 4528 return; 4529 } 4530 case InductionDescriptor::IK_FpInduction: { 4531 assert(P->getType() == II.getStartValue()->getType() && 4532 "Types must match"); 4533 // Handle other induction variables that are now based on the 4534 // canonical one. 4535 assert(P != OldInduction && "Primary induction can be integer only"); 4536 4537 Value *V = Builder.CreateCast(Instruction::SIToFP, Induction, P->getType()); 4538 V = II.transform(Builder, V, PSE.getSE(), DL); 4539 V->setName("fp.offset.idx"); 4540 4541 // Now we have scalar op: %fp.offset.idx = StartVal +/- Induction*StepVal 4542 4543 Value *Broadcasted = getBroadcastInstrs(V); 4544 // After broadcasting the induction variable we need to make the vector 4545 // consecutive by adding StepVal*0, StepVal*1, StepVal*2, etc. 4546 Value *StepVal = cast<SCEVUnknown>(II.getStep())->getValue(); 4547 VectorParts Entry(UF); 4548 for (unsigned part = 0; part < UF; ++part) 4549 Entry[part] = getStepVector(Broadcasted, VF * part, StepVal, 4550 II.getInductionOpcode()); 4551 VectorLoopValueMap.initVector(P, Entry); 4552 return; 4553 } 4554 } 4555 } 4556 4557 /// A helper function for checking whether an integer division-related 4558 /// instruction may divide by zero (in which case it must be predicated if 4559 /// executed conditionally in the scalar code). 4560 /// TODO: It may be worthwhile to generalize and check isKnownNonZero(). 4561 /// Non-zero divisors that are non compile-time constants will not be 4562 /// converted into multiplication, so we will still end up scalarizing 4563 /// the division, but can do so w/o predication. 4564 static bool mayDivideByZero(Instruction &I) { 4565 assert((I.getOpcode() == Instruction::UDiv || 4566 I.getOpcode() == Instruction::SDiv || 4567 I.getOpcode() == Instruction::URem || 4568 I.getOpcode() == Instruction::SRem) && 4569 "Unexpected instruction"); 4570 Value *Divisor = I.getOperand(1); 4571 auto *CInt = dyn_cast<ConstantInt>(Divisor); 4572 return !CInt || CInt->isZero(); 4573 } 4574 4575 void InnerLoopVectorizer::vectorizeBlockInLoop(BasicBlock *BB, PhiVector *PV) { 4576 // For each instruction in the old loop. 4577 for (Instruction &I : *BB) { 4578 4579 // If the instruction will become trivially dead when vectorized, we don't 4580 // need to generate it. 4581 if (DeadInstructions.count(&I)) 4582 continue; 4583 4584 // Scalarize instructions that should remain scalar after vectorization. 4585 if (!(isa<BranchInst>(&I) || isa<PHINode>(&I) || 4586 isa<DbgInfoIntrinsic>(&I)) && 4587 Legal->isScalarAfterVectorization(&I)) { 4588 scalarizeInstruction(&I); 4589 continue; 4590 } 4591 4592 switch (I.getOpcode()) { 4593 case Instruction::Br: 4594 // Nothing to do for PHIs and BR, since we already took care of the 4595 // loop control flow instructions. 4596 continue; 4597 case Instruction::PHI: { 4598 // Vectorize PHINodes. 4599 widenPHIInstruction(&I, UF, VF, PV); 4600 continue; 4601 } // End of PHI. 4602 4603 case Instruction::UDiv: 4604 case Instruction::SDiv: 4605 case Instruction::SRem: 4606 case Instruction::URem: 4607 // Scalarize with predication if this instruction may divide by zero and 4608 // block execution is conditional, otherwise fallthrough. 4609 if (Legal->isScalarWithPredication(&I)) { 4610 scalarizeInstruction(&I, true); 4611 continue; 4612 } 4613 case Instruction::Add: 4614 case Instruction::FAdd: 4615 case Instruction::Sub: 4616 case Instruction::FSub: 4617 case Instruction::Mul: 4618 case Instruction::FMul: 4619 case Instruction::FDiv: 4620 case Instruction::FRem: 4621 case Instruction::Shl: 4622 case Instruction::LShr: 4623 case Instruction::AShr: 4624 case Instruction::And: 4625 case Instruction::Or: 4626 case Instruction::Xor: { 4627 // Just widen binops. 4628 auto *BinOp = cast<BinaryOperator>(&I); 4629 setDebugLocFromInst(Builder, BinOp); 4630 const VectorParts &A = getVectorValue(BinOp->getOperand(0)); 4631 const VectorParts &B = getVectorValue(BinOp->getOperand(1)); 4632 4633 // Use this vector value for all users of the original instruction. 4634 VectorParts Entry(UF); 4635 for (unsigned Part = 0; Part < UF; ++Part) { 4636 Value *V = Builder.CreateBinOp(BinOp->getOpcode(), A[Part], B[Part]); 4637 4638 if (BinaryOperator *VecOp = dyn_cast<BinaryOperator>(V)) 4639 VecOp->copyIRFlags(BinOp); 4640 4641 Entry[Part] = V; 4642 } 4643 4644 VectorLoopValueMap.initVector(&I, Entry); 4645 addMetadata(Entry, BinOp); 4646 break; 4647 } 4648 case Instruction::Select: { 4649 // Widen selects. 4650 // If the selector is loop invariant we can create a select 4651 // instruction with a scalar condition. Otherwise, use vector-select. 4652 auto *SE = PSE.getSE(); 4653 bool InvariantCond = 4654 SE->isLoopInvariant(PSE.getSCEV(I.getOperand(0)), OrigLoop); 4655 setDebugLocFromInst(Builder, &I); 4656 4657 // The condition can be loop invariant but still defined inside the 4658 // loop. This means that we can't just use the original 'cond' value. 4659 // We have to take the 'vectorized' value and pick the first lane. 4660 // Instcombine will make this a no-op. 4661 const VectorParts &Cond = getVectorValue(I.getOperand(0)); 4662 const VectorParts &Op0 = getVectorValue(I.getOperand(1)); 4663 const VectorParts &Op1 = getVectorValue(I.getOperand(2)); 4664 4665 auto *ScalarCond = getScalarValue(I.getOperand(0), 0, 0); 4666 4667 VectorParts Entry(UF); 4668 for (unsigned Part = 0; Part < UF; ++Part) { 4669 Entry[Part] = Builder.CreateSelect( 4670 InvariantCond ? ScalarCond : Cond[Part], Op0[Part], Op1[Part]); 4671 } 4672 4673 VectorLoopValueMap.initVector(&I, Entry); 4674 addMetadata(Entry, &I); 4675 break; 4676 } 4677 4678 case Instruction::ICmp: 4679 case Instruction::FCmp: { 4680 // Widen compares. Generate vector compares. 4681 bool FCmp = (I.getOpcode() == Instruction::FCmp); 4682 auto *Cmp = dyn_cast<CmpInst>(&I); 4683 setDebugLocFromInst(Builder, Cmp); 4684 const VectorParts &A = getVectorValue(Cmp->getOperand(0)); 4685 const VectorParts &B = getVectorValue(Cmp->getOperand(1)); 4686 VectorParts Entry(UF); 4687 for (unsigned Part = 0; Part < UF; ++Part) { 4688 Value *C = nullptr; 4689 if (FCmp) { 4690 C = Builder.CreateFCmp(Cmp->getPredicate(), A[Part], B[Part]); 4691 cast<FCmpInst>(C)->copyFastMathFlags(Cmp); 4692 } else { 4693 C = Builder.CreateICmp(Cmp->getPredicate(), A[Part], B[Part]); 4694 } 4695 Entry[Part] = C; 4696 } 4697 4698 VectorLoopValueMap.initVector(&I, Entry); 4699 addMetadata(Entry, &I); 4700 break; 4701 } 4702 4703 case Instruction::Store: 4704 case Instruction::Load: 4705 vectorizeMemoryInstruction(&I); 4706 break; 4707 case Instruction::ZExt: 4708 case Instruction::SExt: 4709 case Instruction::FPToUI: 4710 case Instruction::FPToSI: 4711 case Instruction::FPExt: 4712 case Instruction::PtrToInt: 4713 case Instruction::IntToPtr: 4714 case Instruction::SIToFP: 4715 case Instruction::UIToFP: 4716 case Instruction::Trunc: 4717 case Instruction::FPTrunc: 4718 case Instruction::BitCast: { 4719 auto *CI = dyn_cast<CastInst>(&I); 4720 setDebugLocFromInst(Builder, CI); 4721 4722 // Optimize the special case where the source is a constant integer 4723 // induction variable. Notice that we can only optimize the 'trunc' case 4724 // because (a) FP conversions lose precision, (b) sext/zext may wrap, and 4725 // (c) other casts depend on pointer size. 4726 auto ID = Legal->getInductionVars()->lookup(OldInduction); 4727 if (isa<TruncInst>(CI) && CI->getOperand(0) == OldInduction && 4728 ID.getConstIntStepValue()) { 4729 widenIntInduction(OldInduction, cast<TruncInst>(CI)); 4730 break; 4731 } 4732 4733 /// Vectorize casts. 4734 Type *DestTy = 4735 (VF == 1) ? CI->getType() : VectorType::get(CI->getType(), VF); 4736 4737 const VectorParts &A = getVectorValue(CI->getOperand(0)); 4738 VectorParts Entry(UF); 4739 for (unsigned Part = 0; Part < UF; ++Part) 4740 Entry[Part] = Builder.CreateCast(CI->getOpcode(), A[Part], DestTy); 4741 VectorLoopValueMap.initVector(&I, Entry); 4742 addMetadata(Entry, &I); 4743 break; 4744 } 4745 4746 case Instruction::Call: { 4747 // Ignore dbg intrinsics. 4748 if (isa<DbgInfoIntrinsic>(I)) 4749 break; 4750 setDebugLocFromInst(Builder, &I); 4751 4752 Module *M = BB->getParent()->getParent(); 4753 auto *CI = cast<CallInst>(&I); 4754 4755 StringRef FnName = CI->getCalledFunction()->getName(); 4756 Function *F = CI->getCalledFunction(); 4757 Type *RetTy = ToVectorTy(CI->getType(), VF); 4758 SmallVector<Type *, 4> Tys; 4759 for (Value *ArgOperand : CI->arg_operands()) 4760 Tys.push_back(ToVectorTy(ArgOperand->getType(), VF)); 4761 4762 Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI); 4763 if (ID && (ID == Intrinsic::assume || ID == Intrinsic::lifetime_end || 4764 ID == Intrinsic::lifetime_start)) { 4765 scalarizeInstruction(&I); 4766 break; 4767 } 4768 // The flag shows whether we use Intrinsic or a usual Call for vectorized 4769 // version of the instruction. 4770 // Is it beneficial to perform intrinsic call compared to lib call? 4771 bool NeedToScalarize; 4772 unsigned CallCost = getVectorCallCost(CI, VF, *TTI, TLI, NeedToScalarize); 4773 bool UseVectorIntrinsic = 4774 ID && getVectorIntrinsicCost(CI, VF, *TTI, TLI) <= CallCost; 4775 if (!UseVectorIntrinsic && NeedToScalarize) { 4776 scalarizeInstruction(&I); 4777 break; 4778 } 4779 4780 VectorParts Entry(UF); 4781 for (unsigned Part = 0; Part < UF; ++Part) { 4782 SmallVector<Value *, 4> Args; 4783 for (unsigned i = 0, ie = CI->getNumArgOperands(); i != ie; ++i) { 4784 Value *Arg = CI->getArgOperand(i); 4785 // Some intrinsics have a scalar argument - don't replace it with a 4786 // vector. 4787 if (!UseVectorIntrinsic || !hasVectorInstrinsicScalarOpd(ID, i)) { 4788 const VectorParts &VectorArg = getVectorValue(CI->getArgOperand(i)); 4789 Arg = VectorArg[Part]; 4790 } 4791 Args.push_back(Arg); 4792 } 4793 4794 Function *VectorF; 4795 if (UseVectorIntrinsic) { 4796 // Use vector version of the intrinsic. 4797 Type *TysForDecl[] = {CI->getType()}; 4798 if (VF > 1) 4799 TysForDecl[0] = VectorType::get(CI->getType()->getScalarType(), VF); 4800 VectorF = Intrinsic::getDeclaration(M, ID, TysForDecl); 4801 } else { 4802 // Use vector version of the library call. 4803 StringRef VFnName = TLI->getVectorizedFunction(FnName, VF); 4804 assert(!VFnName.empty() && "Vector function name is empty."); 4805 VectorF = M->getFunction(VFnName); 4806 if (!VectorF) { 4807 // Generate a declaration 4808 FunctionType *FTy = FunctionType::get(RetTy, Tys, false); 4809 VectorF = 4810 Function::Create(FTy, Function::ExternalLinkage, VFnName, M); 4811 VectorF->copyAttributesFrom(F); 4812 } 4813 } 4814 assert(VectorF && "Can't create vector function."); 4815 4816 SmallVector<OperandBundleDef, 1> OpBundles; 4817 CI->getOperandBundlesAsDefs(OpBundles); 4818 CallInst *V = Builder.CreateCall(VectorF, Args, OpBundles); 4819 4820 if (isa<FPMathOperator>(V)) 4821 V->copyFastMathFlags(CI); 4822 4823 Entry[Part] = V; 4824 } 4825 4826 VectorLoopValueMap.initVector(&I, Entry); 4827 addMetadata(Entry, &I); 4828 break; 4829 } 4830 4831 default: 4832 // All other instructions are unsupported. Scalarize them. 4833 scalarizeInstruction(&I); 4834 break; 4835 } // end of switch. 4836 } // end of for_each instr. 4837 } 4838 4839 void InnerLoopVectorizer::updateAnalysis() { 4840 // Forget the original basic block. 4841 PSE.getSE()->forgetLoop(OrigLoop); 4842 4843 // Update the dominator tree information. 4844 assert(DT->properlyDominates(LoopBypassBlocks.front(), LoopExitBlock) && 4845 "Entry does not dominate exit."); 4846 4847 // We don't predicate stores by this point, so the vector body should be a 4848 // single loop. 4849 DT->addNewBlock(LoopVectorBody, LoopVectorPreHeader); 4850 4851 DT->addNewBlock(LoopMiddleBlock, LoopVectorBody); 4852 DT->addNewBlock(LoopScalarPreHeader, LoopBypassBlocks[0]); 4853 DT->changeImmediateDominator(LoopScalarBody, LoopScalarPreHeader); 4854 DT->changeImmediateDominator(LoopExitBlock, LoopBypassBlocks[0]); 4855 4856 DEBUG(DT->verifyDomTree()); 4857 } 4858 4859 /// \brief Check whether it is safe to if-convert this phi node. 4860 /// 4861 /// Phi nodes with constant expressions that can trap are not safe to if 4862 /// convert. 4863 static bool canIfConvertPHINodes(BasicBlock *BB) { 4864 for (Instruction &I : *BB) { 4865 auto *Phi = dyn_cast<PHINode>(&I); 4866 if (!Phi) 4867 return true; 4868 for (Value *V : Phi->incoming_values()) 4869 if (auto *C = dyn_cast<Constant>(V)) 4870 if (C->canTrap()) 4871 return false; 4872 } 4873 return true; 4874 } 4875 4876 bool LoopVectorizationLegality::canVectorizeWithIfConvert() { 4877 if (!EnableIfConversion) { 4878 ORE->emit(createMissedAnalysis("IfConversionDisabled") 4879 << "if-conversion is disabled"); 4880 return false; 4881 } 4882 4883 assert(TheLoop->getNumBlocks() > 1 && "Single block loops are vectorizable"); 4884 4885 // A list of pointers that we can safely read and write to. 4886 SmallPtrSet<Value *, 8> SafePointes; 4887 4888 // Collect safe addresses. 4889 for (BasicBlock *BB : TheLoop->blocks()) { 4890 if (blockNeedsPredication(BB)) 4891 continue; 4892 4893 for (Instruction &I : *BB) 4894 if (auto *Ptr = getPointerOperand(&I)) 4895 SafePointes.insert(Ptr); 4896 } 4897 4898 // Collect the blocks that need predication. 4899 BasicBlock *Header = TheLoop->getHeader(); 4900 for (BasicBlock *BB : TheLoop->blocks()) { 4901 // We don't support switch statements inside loops. 4902 if (!isa<BranchInst>(BB->getTerminator())) { 4903 ORE->emit(createMissedAnalysis("LoopContainsSwitch", BB->getTerminator()) 4904 << "loop contains a switch statement"); 4905 return false; 4906 } 4907 4908 // We must be able to predicate all blocks that need to be predicated. 4909 if (blockNeedsPredication(BB)) { 4910 if (!blockCanBePredicated(BB, SafePointes)) { 4911 ORE->emit(createMissedAnalysis("NoCFGForSelect", BB->getTerminator()) 4912 << "control flow cannot be substituted for a select"); 4913 return false; 4914 } 4915 } else if (BB != Header && !canIfConvertPHINodes(BB)) { 4916 ORE->emit(createMissedAnalysis("NoCFGForSelect", BB->getTerminator()) 4917 << "control flow cannot be substituted for a select"); 4918 return false; 4919 } 4920 } 4921 4922 // We can if-convert this loop. 4923 return true; 4924 } 4925 4926 bool LoopVectorizationLegality::canVectorize() { 4927 // We must have a loop in canonical form. Loops with indirectbr in them cannot 4928 // be canonicalized. 4929 if (!TheLoop->getLoopPreheader()) { 4930 ORE->emit(createMissedAnalysis("CFGNotUnderstood") 4931 << "loop control flow is not understood by vectorizer"); 4932 return false; 4933 } 4934 4935 // FIXME: The code is currently dead, since the loop gets sent to 4936 // LoopVectorizationLegality is already an innermost loop. 4937 // 4938 // We can only vectorize innermost loops. 4939 if (!TheLoop->empty()) { 4940 ORE->emit(createMissedAnalysis("NotInnermostLoop") 4941 << "loop is not the innermost loop"); 4942 return false; 4943 } 4944 4945 // We must have a single backedge. 4946 if (TheLoop->getNumBackEdges() != 1) { 4947 ORE->emit(createMissedAnalysis("CFGNotUnderstood") 4948 << "loop control flow is not understood by vectorizer"); 4949 return false; 4950 } 4951 4952 // We must have a single exiting block. 4953 if (!TheLoop->getExitingBlock()) { 4954 ORE->emit(createMissedAnalysis("CFGNotUnderstood") 4955 << "loop control flow is not understood by vectorizer"); 4956 return false; 4957 } 4958 4959 // We only handle bottom-tested loops, i.e. loop in which the condition is 4960 // checked at the end of each iteration. With that we can assume that all 4961 // instructions in the loop are executed the same number of times. 4962 if (TheLoop->getExitingBlock() != TheLoop->getLoopLatch()) { 4963 ORE->emit(createMissedAnalysis("CFGNotUnderstood") 4964 << "loop control flow is not understood by vectorizer"); 4965 return false; 4966 } 4967 4968 // We need to have a loop header. 4969 DEBUG(dbgs() << "LV: Found a loop: " << TheLoop->getHeader()->getName() 4970 << '\n'); 4971 4972 // Check if we can if-convert non-single-bb loops. 4973 unsigned NumBlocks = TheLoop->getNumBlocks(); 4974 if (NumBlocks != 1 && !canVectorizeWithIfConvert()) { 4975 DEBUG(dbgs() << "LV: Can't if-convert the loop.\n"); 4976 return false; 4977 } 4978 4979 // ScalarEvolution needs to be able to find the exit count. 4980 const SCEV *ExitCount = PSE.getBackedgeTakenCount(); 4981 if (ExitCount == PSE.getSE()->getCouldNotCompute()) { 4982 ORE->emit(createMissedAnalysis("CantComputeNumberOfIterations") 4983 << "could not determine number of loop iterations"); 4984 DEBUG(dbgs() << "LV: SCEV could not compute the loop exit count.\n"); 4985 return false; 4986 } 4987 4988 // Check if we can vectorize the instructions and CFG in this loop. 4989 if (!canVectorizeInstrs()) { 4990 DEBUG(dbgs() << "LV: Can't vectorize the instructions or CFG\n"); 4991 return false; 4992 } 4993 4994 // Go over each instruction and look at memory deps. 4995 if (!canVectorizeMemory()) { 4996 DEBUG(dbgs() << "LV: Can't vectorize due to memory conflicts\n"); 4997 return false; 4998 } 4999 5000 DEBUG(dbgs() << "LV: We can vectorize this loop" 5001 << (LAI->getRuntimePointerChecking()->Need 5002 ? " (with a runtime bound check)" 5003 : "") 5004 << "!\n"); 5005 5006 bool UseInterleaved = TTI->enableInterleavedAccessVectorization(); 5007 5008 // If an override option has been passed in for interleaved accesses, use it. 5009 if (EnableInterleavedMemAccesses.getNumOccurrences() > 0) 5010 UseInterleaved = EnableInterleavedMemAccesses; 5011 5012 // Analyze interleaved memory accesses. 5013 if (UseInterleaved) 5014 InterleaveInfo.analyzeInterleaving(*getSymbolicStrides()); 5015 5016 // Collect all instructions that are known to be uniform after vectorization. 5017 collectLoopUniforms(); 5018 5019 // Collect all instructions that are known to be scalar after vectorization. 5020 collectLoopScalars(); 5021 5022 unsigned SCEVThreshold = VectorizeSCEVCheckThreshold; 5023 if (Hints->getForce() == LoopVectorizeHints::FK_Enabled) 5024 SCEVThreshold = PragmaVectorizeSCEVCheckThreshold; 5025 5026 if (PSE.getUnionPredicate().getComplexity() > SCEVThreshold) { 5027 ORE->emit(createMissedAnalysis("TooManySCEVRunTimeChecks") 5028 << "Too many SCEV assumptions need to be made and checked " 5029 << "at runtime"); 5030 DEBUG(dbgs() << "LV: Too many SCEV checks needed.\n"); 5031 return false; 5032 } 5033 5034 // Okay! We can vectorize. At this point we don't have any other mem analysis 5035 // which may limit our maximum vectorization factor, so just return true with 5036 // no restrictions. 5037 return true; 5038 } 5039 5040 static Type *convertPointerToIntegerType(const DataLayout &DL, Type *Ty) { 5041 if (Ty->isPointerTy()) 5042 return DL.getIntPtrType(Ty); 5043 5044 // It is possible that char's or short's overflow when we ask for the loop's 5045 // trip count, work around this by changing the type size. 5046 if (Ty->getScalarSizeInBits() < 32) 5047 return Type::getInt32Ty(Ty->getContext()); 5048 5049 return Ty; 5050 } 5051 5052 static Type *getWiderType(const DataLayout &DL, Type *Ty0, Type *Ty1) { 5053 Ty0 = convertPointerToIntegerType(DL, Ty0); 5054 Ty1 = convertPointerToIntegerType(DL, Ty1); 5055 if (Ty0->getScalarSizeInBits() > Ty1->getScalarSizeInBits()) 5056 return Ty0; 5057 return Ty1; 5058 } 5059 5060 /// \brief Check that the instruction has outside loop users and is not an 5061 /// identified reduction variable. 5062 static bool hasOutsideLoopUser(const Loop *TheLoop, Instruction *Inst, 5063 SmallPtrSetImpl<Value *> &AllowedExit) { 5064 // Reduction and Induction instructions are allowed to have exit users. All 5065 // other instructions must not have external users. 5066 if (!AllowedExit.count(Inst)) 5067 // Check that all of the users of the loop are inside the BB. 5068 for (User *U : Inst->users()) { 5069 Instruction *UI = cast<Instruction>(U); 5070 // This user may be a reduction exit value. 5071 if (!TheLoop->contains(UI)) { 5072 DEBUG(dbgs() << "LV: Found an outside user for : " << *UI << '\n'); 5073 return true; 5074 } 5075 } 5076 return false; 5077 } 5078 5079 void LoopVectorizationLegality::addInductionPhi( 5080 PHINode *Phi, const InductionDescriptor &ID, 5081 SmallPtrSetImpl<Value *> &AllowedExit) { 5082 Inductions[Phi] = ID; 5083 Type *PhiTy = Phi->getType(); 5084 const DataLayout &DL = Phi->getModule()->getDataLayout(); 5085 5086 // Get the widest type. 5087 if (!PhiTy->isFloatingPointTy()) { 5088 if (!WidestIndTy) 5089 WidestIndTy = convertPointerToIntegerType(DL, PhiTy); 5090 else 5091 WidestIndTy = getWiderType(DL, PhiTy, WidestIndTy); 5092 } 5093 5094 // Int inductions are special because we only allow one IV. 5095 if (ID.getKind() == InductionDescriptor::IK_IntInduction && 5096 ID.getConstIntStepValue() && 5097 ID.getConstIntStepValue()->isOne() && 5098 isa<Constant>(ID.getStartValue()) && 5099 cast<Constant>(ID.getStartValue())->isNullValue()) { 5100 5101 // Use the phi node with the widest type as induction. Use the last 5102 // one if there are multiple (no good reason for doing this other 5103 // than it is expedient). We've checked that it begins at zero and 5104 // steps by one, so this is a canonical induction variable. 5105 if (!Induction || PhiTy == WidestIndTy) 5106 Induction = Phi; 5107 } 5108 5109 // Both the PHI node itself, and the "post-increment" value feeding 5110 // back into the PHI node may have external users. 5111 AllowedExit.insert(Phi); 5112 AllowedExit.insert(Phi->getIncomingValueForBlock(TheLoop->getLoopLatch())); 5113 5114 DEBUG(dbgs() << "LV: Found an induction variable.\n"); 5115 return; 5116 } 5117 5118 bool LoopVectorizationLegality::canVectorizeInstrs() { 5119 BasicBlock *Header = TheLoop->getHeader(); 5120 5121 // Look for the attribute signaling the absence of NaNs. 5122 Function &F = *Header->getParent(); 5123 HasFunNoNaNAttr = 5124 F.getFnAttribute("no-nans-fp-math").getValueAsString() == "true"; 5125 5126 // For each block in the loop. 5127 for (BasicBlock *BB : TheLoop->blocks()) { 5128 // Scan the instructions in the block and look for hazards. 5129 for (Instruction &I : *BB) { 5130 if (auto *Phi = dyn_cast<PHINode>(&I)) { 5131 Type *PhiTy = Phi->getType(); 5132 // Check that this PHI type is allowed. 5133 if (!PhiTy->isIntegerTy() && !PhiTy->isFloatingPointTy() && 5134 !PhiTy->isPointerTy()) { 5135 ORE->emit(createMissedAnalysis("CFGNotUnderstood", Phi) 5136 << "loop control flow is not understood by vectorizer"); 5137 DEBUG(dbgs() << "LV: Found an non-int non-pointer PHI.\n"); 5138 return false; 5139 } 5140 5141 // If this PHINode is not in the header block, then we know that we 5142 // can convert it to select during if-conversion. No need to check if 5143 // the PHIs in this block are induction or reduction variables. 5144 if (BB != Header) { 5145 // Check that this instruction has no outside users or is an 5146 // identified reduction value with an outside user. 5147 if (!hasOutsideLoopUser(TheLoop, Phi, AllowedExit)) 5148 continue; 5149 ORE->emit(createMissedAnalysis("NeitherInductionNorReduction", Phi) 5150 << "value could not be identified as " 5151 "an induction or reduction variable"); 5152 return false; 5153 } 5154 5155 // We only allow if-converted PHIs with exactly two incoming values. 5156 if (Phi->getNumIncomingValues() != 2) { 5157 ORE->emit(createMissedAnalysis("CFGNotUnderstood", Phi) 5158 << "control flow not understood by vectorizer"); 5159 DEBUG(dbgs() << "LV: Found an invalid PHI.\n"); 5160 return false; 5161 } 5162 5163 RecurrenceDescriptor RedDes; 5164 if (RecurrenceDescriptor::isReductionPHI(Phi, TheLoop, RedDes)) { 5165 if (RedDes.hasUnsafeAlgebra()) 5166 Requirements->addUnsafeAlgebraInst(RedDes.getUnsafeAlgebraInst()); 5167 AllowedExit.insert(RedDes.getLoopExitInstr()); 5168 Reductions[Phi] = RedDes; 5169 continue; 5170 } 5171 5172 InductionDescriptor ID; 5173 if (InductionDescriptor::isInductionPHI(Phi, TheLoop, PSE, ID)) { 5174 addInductionPhi(Phi, ID, AllowedExit); 5175 if (ID.hasUnsafeAlgebra() && !HasFunNoNaNAttr) 5176 Requirements->addUnsafeAlgebraInst(ID.getUnsafeAlgebraInst()); 5177 continue; 5178 } 5179 5180 if (RecurrenceDescriptor::isFirstOrderRecurrence(Phi, TheLoop, DT)) { 5181 FirstOrderRecurrences.insert(Phi); 5182 continue; 5183 } 5184 5185 // As a last resort, coerce the PHI to a AddRec expression 5186 // and re-try classifying it a an induction PHI. 5187 if (InductionDescriptor::isInductionPHI(Phi, TheLoop, PSE, ID, true)) { 5188 addInductionPhi(Phi, ID, AllowedExit); 5189 continue; 5190 } 5191 5192 ORE->emit(createMissedAnalysis("NonReductionValueUsedOutsideLoop", Phi) 5193 << "value that could not be identified as " 5194 "reduction is used outside the loop"); 5195 DEBUG(dbgs() << "LV: Found an unidentified PHI." << *Phi << "\n"); 5196 return false; 5197 } // end of PHI handling 5198 5199 // We handle calls that: 5200 // * Are debug info intrinsics. 5201 // * Have a mapping to an IR intrinsic. 5202 // * Have a vector version available. 5203 auto *CI = dyn_cast<CallInst>(&I); 5204 if (CI && !getVectorIntrinsicIDForCall(CI, TLI) && 5205 !isa<DbgInfoIntrinsic>(CI) && 5206 !(CI->getCalledFunction() && TLI && 5207 TLI->isFunctionVectorizable(CI->getCalledFunction()->getName()))) { 5208 ORE->emit(createMissedAnalysis("CantVectorizeCall", CI) 5209 << "call instruction cannot be vectorized"); 5210 DEBUG(dbgs() << "LV: Found a non-intrinsic, non-libfunc callsite.\n"); 5211 return false; 5212 } 5213 5214 // Intrinsics such as powi,cttz and ctlz are legal to vectorize if the 5215 // second argument is the same (i.e. loop invariant) 5216 if (CI && hasVectorInstrinsicScalarOpd( 5217 getVectorIntrinsicIDForCall(CI, TLI), 1)) { 5218 auto *SE = PSE.getSE(); 5219 if (!SE->isLoopInvariant(PSE.getSCEV(CI->getOperand(1)), TheLoop)) { 5220 ORE->emit(createMissedAnalysis("CantVectorizeIntrinsic", CI) 5221 << "intrinsic instruction cannot be vectorized"); 5222 DEBUG(dbgs() << "LV: Found unvectorizable intrinsic " << *CI << "\n"); 5223 return false; 5224 } 5225 } 5226 5227 // Check that the instruction return type is vectorizable. 5228 // Also, we can't vectorize extractelement instructions. 5229 if ((!VectorType::isValidElementType(I.getType()) && 5230 !I.getType()->isVoidTy()) || 5231 isa<ExtractElementInst>(I)) { 5232 ORE->emit(createMissedAnalysis("CantVectorizeInstructionReturnType", &I) 5233 << "instruction return type cannot be vectorized"); 5234 DEBUG(dbgs() << "LV: Found unvectorizable type.\n"); 5235 return false; 5236 } 5237 5238 // Check that the stored type is vectorizable. 5239 if (auto *ST = dyn_cast<StoreInst>(&I)) { 5240 Type *T = ST->getValueOperand()->getType(); 5241 if (!VectorType::isValidElementType(T)) { 5242 ORE->emit(createMissedAnalysis("CantVectorizeStore", ST) 5243 << "store instruction cannot be vectorized"); 5244 return false; 5245 } 5246 5247 // FP instructions can allow unsafe algebra, thus vectorizable by 5248 // non-IEEE-754 compliant SIMD units. 5249 // This applies to floating-point math operations and calls, not memory 5250 // operations, shuffles, or casts, as they don't change precision or 5251 // semantics. 5252 } else if (I.getType()->isFloatingPointTy() && (CI || I.isBinaryOp()) && 5253 !I.hasUnsafeAlgebra()) { 5254 DEBUG(dbgs() << "LV: Found FP op with unsafe algebra.\n"); 5255 Hints->setPotentiallyUnsafe(); 5256 } 5257 5258 // Reduction instructions are allowed to have exit users. 5259 // All other instructions must not have external users. 5260 if (hasOutsideLoopUser(TheLoop, &I, AllowedExit)) { 5261 ORE->emit(createMissedAnalysis("ValueUsedOutsideLoop", &I) 5262 << "value cannot be used outside the loop"); 5263 return false; 5264 } 5265 5266 } // next instr. 5267 } 5268 5269 if (!Induction) { 5270 DEBUG(dbgs() << "LV: Did not find one integer induction var.\n"); 5271 if (Inductions.empty()) { 5272 ORE->emit(createMissedAnalysis("NoInductionVariable") 5273 << "loop induction variable could not be identified"); 5274 return false; 5275 } 5276 } 5277 5278 // Now we know the widest induction type, check if our found induction 5279 // is the same size. If it's not, unset it here and InnerLoopVectorizer 5280 // will create another. 5281 if (Induction && WidestIndTy != Induction->getType()) 5282 Induction = nullptr; 5283 5284 return true; 5285 } 5286 5287 void LoopVectorizationLegality::collectLoopScalars() { 5288 5289 // If an instruction is uniform after vectorization, it will remain scalar. 5290 Scalars.insert(Uniforms.begin(), Uniforms.end()); 5291 5292 // Collect the getelementptr instructions that will not be vectorized. A 5293 // getelementptr instruction is only vectorized if it is used for a legal 5294 // gather or scatter operation. 5295 for (auto *BB : TheLoop->blocks()) 5296 for (auto &I : *BB) { 5297 if (auto *GEP = dyn_cast<GetElementPtrInst>(&I)) { 5298 Scalars.insert(GEP); 5299 continue; 5300 } 5301 auto *Ptr = getPointerOperand(&I); 5302 if (!Ptr) 5303 continue; 5304 auto *GEP = getGEPInstruction(Ptr); 5305 if (GEP && isLegalGatherOrScatter(&I)) 5306 Scalars.erase(GEP); 5307 } 5308 5309 // An induction variable will remain scalar if all users of the induction 5310 // variable and induction variable update remain scalar. 5311 auto *Latch = TheLoop->getLoopLatch(); 5312 for (auto &Induction : *getInductionVars()) { 5313 auto *Ind = Induction.first; 5314 auto *IndUpdate = cast<Instruction>(Ind->getIncomingValueForBlock(Latch)); 5315 5316 // Determine if all users of the induction variable are scalar after 5317 // vectorization. 5318 auto ScalarInd = all_of(Ind->users(), [&](User *U) -> bool { 5319 auto *I = cast<Instruction>(U); 5320 return I == IndUpdate || !TheLoop->contains(I) || Scalars.count(I); 5321 }); 5322 if (!ScalarInd) 5323 continue; 5324 5325 // Determine if all users of the induction variable update instruction are 5326 // scalar after vectorization. 5327 auto ScalarIndUpdate = all_of(IndUpdate->users(), [&](User *U) -> bool { 5328 auto *I = cast<Instruction>(U); 5329 return I == Ind || !TheLoop->contains(I) || Scalars.count(I); 5330 }); 5331 if (!ScalarIndUpdate) 5332 continue; 5333 5334 // The induction variable and its update instruction will remain scalar. 5335 Scalars.insert(Ind); 5336 Scalars.insert(IndUpdate); 5337 } 5338 } 5339 5340 bool LoopVectorizationLegality::hasConsecutiveLikePtrOperand(Instruction *I) { 5341 if (isAccessInterleaved(I)) 5342 return true; 5343 if (auto *Ptr = getPointerOperand(I)) 5344 return isConsecutivePtr(Ptr); 5345 return false; 5346 } 5347 5348 bool LoopVectorizationLegality::isScalarWithPredication(Instruction *I) { 5349 if (!blockNeedsPredication(I->getParent())) 5350 return false; 5351 switch(I->getOpcode()) { 5352 default: 5353 break; 5354 case Instruction::Store: 5355 return !isMaskRequired(I); 5356 case Instruction::UDiv: 5357 case Instruction::SDiv: 5358 case Instruction::SRem: 5359 case Instruction::URem: 5360 return mayDivideByZero(*I); 5361 } 5362 return false; 5363 } 5364 5365 bool LoopVectorizationLegality::memoryInstructionMustBeScalarized( 5366 Instruction *I, unsigned VF) { 5367 5368 // If the memory instruction is in an interleaved group, it will be 5369 // vectorized and its pointer will remain uniform. 5370 if (isAccessInterleaved(I)) 5371 return false; 5372 5373 // Get and ensure we have a valid memory instruction. 5374 LoadInst *LI = dyn_cast<LoadInst>(I); 5375 StoreInst *SI = dyn_cast<StoreInst>(I); 5376 assert((LI || SI) && "Invalid memory instruction"); 5377 5378 // If the pointer operand is uniform (loop invariant), the memory instruction 5379 // will be scalarized. 5380 auto *Ptr = getPointerOperand(I); 5381 if (LI && isUniform(Ptr)) 5382 return true; 5383 5384 // If the pointer operand is non-consecutive and neither a gather nor a 5385 // scatter operation is legal, the memory instruction will be scalarized. 5386 if (!isConsecutivePtr(Ptr) && !isLegalGatherOrScatter(I)) 5387 return true; 5388 5389 // If the instruction is a store located in a predicated block, it will be 5390 // scalarized. 5391 if (isScalarWithPredication(I)) 5392 return true; 5393 5394 // If the instruction's allocated size doesn't equal it's type size, it 5395 // requires padding and will be scalarized. 5396 auto &DL = I->getModule()->getDataLayout(); 5397 auto *ScalarTy = LI ? LI->getType() : SI->getValueOperand()->getType(); 5398 if (hasIrregularType(ScalarTy, DL, VF)) 5399 return true; 5400 5401 // Otherwise, the memory instruction should be vectorized if the rest of the 5402 // loop is. 5403 return false; 5404 } 5405 5406 void LoopVectorizationLegality::collectLoopUniforms() { 5407 // We now know that the loop is vectorizable! 5408 // Collect instructions inside the loop that will remain uniform after 5409 // vectorization. 5410 5411 // Global values, params and instructions outside of current loop are out of 5412 // scope. 5413 auto isOutOfScope = [&](Value *V) -> bool { 5414 Instruction *I = dyn_cast<Instruction>(V); 5415 return (!I || !TheLoop->contains(I)); 5416 }; 5417 5418 SetVector<Instruction *> Worklist; 5419 BasicBlock *Latch = TheLoop->getLoopLatch(); 5420 5421 // Start with the conditional branch. If the branch condition is an 5422 // instruction contained in the loop that is only used by the branch, it is 5423 // uniform. 5424 auto *Cmp = dyn_cast<Instruction>(Latch->getTerminator()->getOperand(0)); 5425 if (Cmp && TheLoop->contains(Cmp) && Cmp->hasOneUse()) { 5426 Worklist.insert(Cmp); 5427 DEBUG(dbgs() << "LV: Found uniform instruction: " << *Cmp << "\n"); 5428 } 5429 5430 // Holds consecutive and consecutive-like pointers. Consecutive-like pointers 5431 // are pointers that are treated like consecutive pointers during 5432 // vectorization. The pointer operands of interleaved accesses are an 5433 // example. 5434 SmallPtrSet<Instruction *, 8> ConsecutiveLikePtrs; 5435 5436 // Holds pointer operands of instructions that are possibly non-uniform. 5437 SmallPtrSet<Instruction *, 8> PossibleNonUniformPtrs; 5438 5439 // Iterate over the instructions in the loop, and collect all 5440 // consecutive-like pointer operands in ConsecutiveLikePtrs. If it's possible 5441 // that a consecutive-like pointer operand will be scalarized, we collect it 5442 // in PossibleNonUniformPtrs instead. We use two sets here because a single 5443 // getelementptr instruction can be used by both vectorized and scalarized 5444 // memory instructions. For example, if a loop loads and stores from the same 5445 // location, but the store is conditional, the store will be scalarized, and 5446 // the getelementptr won't remain uniform. 5447 for (auto *BB : TheLoop->blocks()) 5448 for (auto &I : *BB) { 5449 5450 // If there's no pointer operand, there's nothing to do. 5451 auto *Ptr = dyn_cast_or_null<Instruction>(getPointerOperand(&I)); 5452 if (!Ptr) 5453 continue; 5454 5455 // True if all users of Ptr are memory accesses that have Ptr as their 5456 // pointer operand. 5457 auto UsersAreMemAccesses = all_of(Ptr->users(), [&](User *U) -> bool { 5458 return getPointerOperand(U) == Ptr; 5459 }); 5460 5461 // Ensure the memory instruction will not be scalarized, making its 5462 // pointer operand non-uniform. If the pointer operand is used by some 5463 // instruction other than a memory access, we're not going to check if 5464 // that other instruction may be scalarized here. Thus, conservatively 5465 // assume the pointer operand may be non-uniform. 5466 if (!UsersAreMemAccesses || memoryInstructionMustBeScalarized(&I)) 5467 PossibleNonUniformPtrs.insert(Ptr); 5468 5469 // If the memory instruction will be vectorized and its pointer operand 5470 // is consecutive-like, the pointer operand should remain uniform. 5471 else if (hasConsecutiveLikePtrOperand(&I)) 5472 ConsecutiveLikePtrs.insert(Ptr); 5473 } 5474 5475 // Add to the Worklist all consecutive and consecutive-like pointers that 5476 // aren't also identified as possibly non-uniform. 5477 for (auto *V : ConsecutiveLikePtrs) 5478 if (!PossibleNonUniformPtrs.count(V)) { 5479 DEBUG(dbgs() << "LV: Found uniform instruction: " << *V << "\n"); 5480 Worklist.insert(V); 5481 } 5482 5483 // Expand Worklist in topological order: whenever a new instruction 5484 // is added , its users should be either already inside Worklist, or 5485 // out of scope. It ensures a uniform instruction will only be used 5486 // by uniform instructions or out of scope instructions. 5487 unsigned idx = 0; 5488 while (idx != Worklist.size()) { 5489 Instruction *I = Worklist[idx++]; 5490 5491 for (auto OV : I->operand_values()) { 5492 if (isOutOfScope(OV)) 5493 continue; 5494 auto *OI = cast<Instruction>(OV); 5495 if (all_of(OI->users(), [&](User *U) -> bool { 5496 return isOutOfScope(U) || Worklist.count(cast<Instruction>(U)); 5497 })) { 5498 Worklist.insert(OI); 5499 DEBUG(dbgs() << "LV: Found uniform instruction: " << *OI << "\n"); 5500 } 5501 } 5502 } 5503 5504 // Returns true if Ptr is the pointer operand of a memory access instruction 5505 // I, and I is known to not require scalarization. 5506 auto isVectorizedMemAccessUse = [&](Instruction *I, Value *Ptr) -> bool { 5507 return getPointerOperand(I) == Ptr && !memoryInstructionMustBeScalarized(I); 5508 }; 5509 5510 // For an instruction to be added into Worklist above, all its users inside 5511 // the loop should also be in Worklist. However, this condition cannot be 5512 // true for phi nodes that form a cyclic dependence. We must process phi 5513 // nodes separately. An induction variable will remain uniform if all users 5514 // of the induction variable and induction variable update remain uniform. 5515 // The code below handles both pointer and non-pointer induction variables. 5516 for (auto &Induction : Inductions) { 5517 auto *Ind = Induction.first; 5518 auto *IndUpdate = cast<Instruction>(Ind->getIncomingValueForBlock(Latch)); 5519 5520 // Determine if all users of the induction variable are uniform after 5521 // vectorization. 5522 auto UniformInd = all_of(Ind->users(), [&](User *U) -> bool { 5523 auto *I = cast<Instruction>(U); 5524 return I == IndUpdate || !TheLoop->contains(I) || Worklist.count(I) || 5525 isVectorizedMemAccessUse(I, Ind); 5526 }); 5527 if (!UniformInd) 5528 continue; 5529 5530 // Determine if all users of the induction variable update instruction are 5531 // uniform after vectorization. 5532 auto UniformIndUpdate = all_of(IndUpdate->users(), [&](User *U) -> bool { 5533 auto *I = cast<Instruction>(U); 5534 return I == Ind || !TheLoop->contains(I) || Worklist.count(I) || 5535 isVectorizedMemAccessUse(I, IndUpdate); 5536 }); 5537 if (!UniformIndUpdate) 5538 continue; 5539 5540 // The induction variable and its update instruction will remain uniform. 5541 Worklist.insert(Ind); 5542 Worklist.insert(IndUpdate); 5543 DEBUG(dbgs() << "LV: Found uniform instruction: " << *Ind << "\n"); 5544 DEBUG(dbgs() << "LV: Found uniform instruction: " << *IndUpdate << "\n"); 5545 } 5546 5547 Uniforms.insert(Worklist.begin(), Worklist.end()); 5548 } 5549 5550 bool LoopVectorizationLegality::canVectorizeMemory() { 5551 LAI = &(*GetLAA)(*TheLoop); 5552 InterleaveInfo.setLAI(LAI); 5553 const OptimizationRemarkAnalysis *LAR = LAI->getReport(); 5554 if (LAR) { 5555 OptimizationRemarkAnalysis VR(Hints->vectorizeAnalysisPassName(), 5556 "loop not vectorized: ", *LAR); 5557 ORE->emit(VR); 5558 } 5559 if (!LAI->canVectorizeMemory()) 5560 return false; 5561 5562 if (LAI->hasStoreToLoopInvariantAddress()) { 5563 ORE->emit(createMissedAnalysis("CantVectorizeStoreToLoopInvariantAddress") 5564 << "write to a loop invariant address could not be vectorized"); 5565 DEBUG(dbgs() << "LV: We don't allow storing to uniform addresses\n"); 5566 return false; 5567 } 5568 5569 Requirements->addRuntimePointerChecks(LAI->getNumRuntimePointerChecks()); 5570 PSE.addPredicate(LAI->getPSE().getUnionPredicate()); 5571 5572 return true; 5573 } 5574 5575 bool LoopVectorizationLegality::isInductionVariable(const Value *V) { 5576 Value *In0 = const_cast<Value *>(V); 5577 PHINode *PN = dyn_cast_or_null<PHINode>(In0); 5578 if (!PN) 5579 return false; 5580 5581 return Inductions.count(PN); 5582 } 5583 5584 bool LoopVectorizationLegality::isFirstOrderRecurrence(const PHINode *Phi) { 5585 return FirstOrderRecurrences.count(Phi); 5586 } 5587 5588 bool LoopVectorizationLegality::blockNeedsPredication(BasicBlock *BB) { 5589 return LoopAccessInfo::blockNeedsPredication(BB, TheLoop, DT); 5590 } 5591 5592 bool LoopVectorizationLegality::blockCanBePredicated( 5593 BasicBlock *BB, SmallPtrSetImpl<Value *> &SafePtrs) { 5594 const bool IsAnnotatedParallel = TheLoop->isAnnotatedParallel(); 5595 5596 for (Instruction &I : *BB) { 5597 // Check that we don't have a constant expression that can trap as operand. 5598 for (Value *Operand : I.operands()) { 5599 if (auto *C = dyn_cast<Constant>(Operand)) 5600 if (C->canTrap()) 5601 return false; 5602 } 5603 // We might be able to hoist the load. 5604 if (I.mayReadFromMemory()) { 5605 auto *LI = dyn_cast<LoadInst>(&I); 5606 if (!LI) 5607 return false; 5608 if (!SafePtrs.count(LI->getPointerOperand())) { 5609 if (isLegalMaskedLoad(LI->getType(), LI->getPointerOperand()) || 5610 isLegalMaskedGather(LI->getType())) { 5611 MaskedOp.insert(LI); 5612 continue; 5613 } 5614 // !llvm.mem.parallel_loop_access implies if-conversion safety. 5615 if (IsAnnotatedParallel) 5616 continue; 5617 return false; 5618 } 5619 } 5620 5621 if (I.mayWriteToMemory()) { 5622 auto *SI = dyn_cast<StoreInst>(&I); 5623 // We only support predication of stores in basic blocks with one 5624 // predecessor. 5625 if (!SI) 5626 return false; 5627 5628 // Build a masked store if it is legal for the target. 5629 if (isLegalMaskedStore(SI->getValueOperand()->getType(), 5630 SI->getPointerOperand()) || 5631 isLegalMaskedScatter(SI->getValueOperand()->getType())) { 5632 MaskedOp.insert(SI); 5633 continue; 5634 } 5635 5636 bool isSafePtr = (SafePtrs.count(SI->getPointerOperand()) != 0); 5637 bool isSinglePredecessor = SI->getParent()->getSinglePredecessor(); 5638 5639 if (++NumPredStores > NumberOfStoresToPredicate || !isSafePtr || 5640 !isSinglePredecessor) 5641 return false; 5642 } 5643 if (I.mayThrow()) 5644 return false; 5645 } 5646 5647 return true; 5648 } 5649 5650 void InterleavedAccessInfo::collectConstStrideAccesses( 5651 MapVector<Instruction *, StrideDescriptor> &AccessStrideInfo, 5652 const ValueToValueMap &Strides) { 5653 5654 auto &DL = TheLoop->getHeader()->getModule()->getDataLayout(); 5655 5656 // Since it's desired that the load/store instructions be maintained in 5657 // "program order" for the interleaved access analysis, we have to visit the 5658 // blocks in the loop in reverse postorder (i.e., in a topological order). 5659 // Such an ordering will ensure that any load/store that may be executed 5660 // before a second load/store will precede the second load/store in 5661 // AccessStrideInfo. 5662 LoopBlocksDFS DFS(TheLoop); 5663 DFS.perform(LI); 5664 for (BasicBlock *BB : make_range(DFS.beginRPO(), DFS.endRPO())) 5665 for (auto &I : *BB) { 5666 auto *LI = dyn_cast<LoadInst>(&I); 5667 auto *SI = dyn_cast<StoreInst>(&I); 5668 if (!LI && !SI) 5669 continue; 5670 5671 Value *Ptr = getPointerOperand(&I); 5672 int64_t Stride = getPtrStride(PSE, Ptr, TheLoop, Strides); 5673 5674 const SCEV *Scev = replaceSymbolicStrideSCEV(PSE, Strides, Ptr); 5675 PointerType *PtrTy = dyn_cast<PointerType>(Ptr->getType()); 5676 uint64_t Size = DL.getTypeAllocSize(PtrTy->getElementType()); 5677 5678 // An alignment of 0 means target ABI alignment. 5679 unsigned Align = LI ? LI->getAlignment() : SI->getAlignment(); 5680 if (!Align) 5681 Align = DL.getABITypeAlignment(PtrTy->getElementType()); 5682 5683 AccessStrideInfo[&I] = StrideDescriptor(Stride, Scev, Size, Align); 5684 } 5685 } 5686 5687 // Analyze interleaved accesses and collect them into interleaved load and 5688 // store groups. 5689 // 5690 // When generating code for an interleaved load group, we effectively hoist all 5691 // loads in the group to the location of the first load in program order. When 5692 // generating code for an interleaved store group, we sink all stores to the 5693 // location of the last store. This code motion can change the order of load 5694 // and store instructions and may break dependences. 5695 // 5696 // The code generation strategy mentioned above ensures that we won't violate 5697 // any write-after-read (WAR) dependences. 5698 // 5699 // E.g., for the WAR dependence: a = A[i]; // (1) 5700 // A[i] = b; // (2) 5701 // 5702 // The store group of (2) is always inserted at or below (2), and the load 5703 // group of (1) is always inserted at or above (1). Thus, the instructions will 5704 // never be reordered. All other dependences are checked to ensure the 5705 // correctness of the instruction reordering. 5706 // 5707 // The algorithm visits all memory accesses in the loop in bottom-up program 5708 // order. Program order is established by traversing the blocks in the loop in 5709 // reverse postorder when collecting the accesses. 5710 // 5711 // We visit the memory accesses in bottom-up order because it can simplify the 5712 // construction of store groups in the presence of write-after-write (WAW) 5713 // dependences. 5714 // 5715 // E.g., for the WAW dependence: A[i] = a; // (1) 5716 // A[i] = b; // (2) 5717 // A[i + 1] = c; // (3) 5718 // 5719 // We will first create a store group with (3) and (2). (1) can't be added to 5720 // this group because it and (2) are dependent. However, (1) can be grouped 5721 // with other accesses that may precede it in program order. Note that a 5722 // bottom-up order does not imply that WAW dependences should not be checked. 5723 void InterleavedAccessInfo::analyzeInterleaving( 5724 const ValueToValueMap &Strides) { 5725 DEBUG(dbgs() << "LV: Analyzing interleaved accesses...\n"); 5726 5727 // Holds all accesses with a constant stride. 5728 MapVector<Instruction *, StrideDescriptor> AccessStrideInfo; 5729 collectConstStrideAccesses(AccessStrideInfo, Strides); 5730 5731 if (AccessStrideInfo.empty()) 5732 return; 5733 5734 // Collect the dependences in the loop. 5735 collectDependences(); 5736 5737 // Holds all interleaved store groups temporarily. 5738 SmallSetVector<InterleaveGroup *, 4> StoreGroups; 5739 // Holds all interleaved load groups temporarily. 5740 SmallSetVector<InterleaveGroup *, 4> LoadGroups; 5741 5742 // Search in bottom-up program order for pairs of accesses (A and B) that can 5743 // form interleaved load or store groups. In the algorithm below, access A 5744 // precedes access B in program order. We initialize a group for B in the 5745 // outer loop of the algorithm, and then in the inner loop, we attempt to 5746 // insert each A into B's group if: 5747 // 5748 // 1. A and B have the same stride, 5749 // 2. A and B have the same memory object size, and 5750 // 3. A belongs in B's group according to its distance from B. 5751 // 5752 // Special care is taken to ensure group formation will not break any 5753 // dependences. 5754 for (auto BI = AccessStrideInfo.rbegin(), E = AccessStrideInfo.rend(); 5755 BI != E; ++BI) { 5756 Instruction *B = BI->first; 5757 StrideDescriptor DesB = BI->second; 5758 5759 // Initialize a group for B if it has an allowable stride. Even if we don't 5760 // create a group for B, we continue with the bottom-up algorithm to ensure 5761 // we don't break any of B's dependences. 5762 InterleaveGroup *Group = nullptr; 5763 if (isStrided(DesB.Stride)) { 5764 Group = getInterleaveGroup(B); 5765 if (!Group) { 5766 DEBUG(dbgs() << "LV: Creating an interleave group with:" << *B << '\n'); 5767 Group = createInterleaveGroup(B, DesB.Stride, DesB.Align); 5768 } 5769 if (B->mayWriteToMemory()) 5770 StoreGroups.insert(Group); 5771 else 5772 LoadGroups.insert(Group); 5773 } 5774 5775 for (auto AI = std::next(BI); AI != E; ++AI) { 5776 Instruction *A = AI->first; 5777 StrideDescriptor DesA = AI->second; 5778 5779 // Our code motion strategy implies that we can't have dependences 5780 // between accesses in an interleaved group and other accesses located 5781 // between the first and last member of the group. Note that this also 5782 // means that a group can't have more than one member at a given offset. 5783 // The accesses in a group can have dependences with other accesses, but 5784 // we must ensure we don't extend the boundaries of the group such that 5785 // we encompass those dependent accesses. 5786 // 5787 // For example, assume we have the sequence of accesses shown below in a 5788 // stride-2 loop: 5789 // 5790 // (1, 2) is a group | A[i] = a; // (1) 5791 // | A[i-1] = b; // (2) | 5792 // A[i-3] = c; // (3) 5793 // A[i] = d; // (4) | (2, 4) is not a group 5794 // 5795 // Because accesses (2) and (3) are dependent, we can group (2) with (1) 5796 // but not with (4). If we did, the dependent access (3) would be within 5797 // the boundaries of the (2, 4) group. 5798 if (!canReorderMemAccessesForInterleavedGroups(&*AI, &*BI)) { 5799 5800 // If a dependence exists and A is already in a group, we know that A 5801 // must be a store since A precedes B and WAR dependences are allowed. 5802 // Thus, A would be sunk below B. We release A's group to prevent this 5803 // illegal code motion. A will then be free to form another group with 5804 // instructions that precede it. 5805 if (isInterleaved(A)) { 5806 InterleaveGroup *StoreGroup = getInterleaveGroup(A); 5807 StoreGroups.remove(StoreGroup); 5808 releaseGroup(StoreGroup); 5809 } 5810 5811 // If a dependence exists and A is not already in a group (or it was 5812 // and we just released it), B might be hoisted above A (if B is a 5813 // load) or another store might be sunk below A (if B is a store). In 5814 // either case, we can't add additional instructions to B's group. B 5815 // will only form a group with instructions that it precedes. 5816 break; 5817 } 5818 5819 // At this point, we've checked for illegal code motion. If either A or B 5820 // isn't strided, there's nothing left to do. 5821 if (!isStrided(DesA.Stride) || !isStrided(DesB.Stride)) 5822 continue; 5823 5824 // Ignore A if it's already in a group or isn't the same kind of memory 5825 // operation as B. 5826 if (isInterleaved(A) || A->mayReadFromMemory() != B->mayReadFromMemory()) 5827 continue; 5828 5829 // Check rules 1 and 2. Ignore A if its stride or size is different from 5830 // that of B. 5831 if (DesA.Stride != DesB.Stride || DesA.Size != DesB.Size) 5832 continue; 5833 5834 // Calculate the distance from A to B. 5835 const SCEVConstant *DistToB = dyn_cast<SCEVConstant>( 5836 PSE.getSE()->getMinusSCEV(DesA.Scev, DesB.Scev)); 5837 if (!DistToB) 5838 continue; 5839 int64_t DistanceToB = DistToB->getAPInt().getSExtValue(); 5840 5841 // Check rule 3. Ignore A if its distance to B is not a multiple of the 5842 // size. 5843 if (DistanceToB % static_cast<int64_t>(DesB.Size)) 5844 continue; 5845 5846 // Ignore A if either A or B is in a predicated block. Although we 5847 // currently prevent group formation for predicated accesses, we may be 5848 // able to relax this limitation in the future once we handle more 5849 // complicated blocks. 5850 if (isPredicated(A->getParent()) || isPredicated(B->getParent())) 5851 continue; 5852 5853 // The index of A is the index of B plus A's distance to B in multiples 5854 // of the size. 5855 int IndexA = 5856 Group->getIndex(B) + DistanceToB / static_cast<int64_t>(DesB.Size); 5857 5858 // Try to insert A into B's group. 5859 if (Group->insertMember(A, IndexA, DesA.Align)) { 5860 DEBUG(dbgs() << "LV: Inserted:" << *A << '\n' 5861 << " into the interleave group with" << *B << '\n'); 5862 InterleaveGroupMap[A] = Group; 5863 5864 // Set the first load in program order as the insert position. 5865 if (A->mayReadFromMemory()) 5866 Group->setInsertPos(A); 5867 } 5868 } // Iteration over A accesses. 5869 } // Iteration over B accesses. 5870 5871 // Remove interleaved store groups with gaps. 5872 for (InterleaveGroup *Group : StoreGroups) 5873 if (Group->getNumMembers() != Group->getFactor()) 5874 releaseGroup(Group); 5875 5876 // If there is a non-reversed interleaved load group with gaps, we will need 5877 // to execute at least one scalar epilogue iteration. This will ensure that 5878 // we don't speculatively access memory out-of-bounds. Note that we only need 5879 // to look for a member at index factor - 1, since every group must have a 5880 // member at index zero. 5881 for (InterleaveGroup *Group : LoadGroups) 5882 if (!Group->getMember(Group->getFactor() - 1)) { 5883 if (Group->isReverse()) { 5884 releaseGroup(Group); 5885 } else { 5886 DEBUG(dbgs() << "LV: Interleaved group requires epilogue iteration.\n"); 5887 RequiresScalarEpilogue = true; 5888 } 5889 } 5890 } 5891 5892 LoopVectorizationCostModel::VectorizationFactor 5893 LoopVectorizationCostModel::selectVectorizationFactor(bool OptForSize) { 5894 // Width 1 means no vectorize 5895 VectorizationFactor Factor = {1U, 0U}; 5896 if (OptForSize && Legal->getRuntimePointerChecking()->Need) { 5897 ORE->emit(createMissedAnalysis("CantVersionLoopWithOptForSize") 5898 << "runtime pointer checks needed. Enable vectorization of this " 5899 "loop with '#pragma clang loop vectorize(enable)' when " 5900 "compiling with -Os/-Oz"); 5901 DEBUG(dbgs() 5902 << "LV: Aborting. Runtime ptr check is required with -Os/-Oz.\n"); 5903 return Factor; 5904 } 5905 5906 if (!EnableCondStoresVectorization && Legal->getNumPredStores()) { 5907 ORE->emit(createMissedAnalysis("ConditionalStore") 5908 << "store that is conditionally executed prevents vectorization"); 5909 DEBUG(dbgs() << "LV: No vectorization. There are conditional stores.\n"); 5910 return Factor; 5911 } 5912 5913 // Find the trip count. 5914 unsigned TC = PSE.getSE()->getSmallConstantTripCount(TheLoop); 5915 DEBUG(dbgs() << "LV: Found trip count: " << TC << '\n'); 5916 5917 MinBWs = computeMinimumValueSizes(TheLoop->getBlocks(), *DB, &TTI); 5918 unsigned SmallestType, WidestType; 5919 std::tie(SmallestType, WidestType) = getSmallestAndWidestTypes(); 5920 unsigned WidestRegister = TTI.getRegisterBitWidth(true); 5921 unsigned MaxSafeDepDist = -1U; 5922 5923 // Get the maximum safe dependence distance in bits computed by LAA. If the 5924 // loop contains any interleaved accesses, we divide the dependence distance 5925 // by the maximum interleave factor of all interleaved groups. Note that 5926 // although the division ensures correctness, this is a fairly conservative 5927 // computation because the maximum distance computed by LAA may not involve 5928 // any of the interleaved accesses. 5929 if (Legal->getMaxSafeDepDistBytes() != -1U) 5930 MaxSafeDepDist = 5931 Legal->getMaxSafeDepDistBytes() * 8 / Legal->getMaxInterleaveFactor(); 5932 5933 WidestRegister = 5934 ((WidestRegister < MaxSafeDepDist) ? WidestRegister : MaxSafeDepDist); 5935 unsigned MaxVectorSize = WidestRegister / WidestType; 5936 5937 DEBUG(dbgs() << "LV: The Smallest and Widest types: " << SmallestType << " / " 5938 << WidestType << " bits.\n"); 5939 DEBUG(dbgs() << "LV: The Widest register is: " << WidestRegister 5940 << " bits.\n"); 5941 5942 if (MaxVectorSize == 0) { 5943 DEBUG(dbgs() << "LV: The target has no vector registers.\n"); 5944 MaxVectorSize = 1; 5945 } 5946 5947 assert(MaxVectorSize <= 64 && "Did not expect to pack so many elements" 5948 " into one vector!"); 5949 5950 unsigned VF = MaxVectorSize; 5951 if (MaximizeBandwidth && !OptForSize) { 5952 // Collect all viable vectorization factors. 5953 SmallVector<unsigned, 8> VFs; 5954 unsigned NewMaxVectorSize = WidestRegister / SmallestType; 5955 for (unsigned VS = MaxVectorSize; VS <= NewMaxVectorSize; VS *= 2) 5956 VFs.push_back(VS); 5957 5958 // For each VF calculate its register usage. 5959 auto RUs = calculateRegisterUsage(VFs); 5960 5961 // Select the largest VF which doesn't require more registers than existing 5962 // ones. 5963 unsigned TargetNumRegisters = TTI.getNumberOfRegisters(true); 5964 for (int i = RUs.size() - 1; i >= 0; --i) { 5965 if (RUs[i].MaxLocalUsers <= TargetNumRegisters) { 5966 VF = VFs[i]; 5967 break; 5968 } 5969 } 5970 } 5971 5972 // If we optimize the program for size, avoid creating the tail loop. 5973 if (OptForSize) { 5974 // If we are unable to calculate the trip count then don't try to vectorize. 5975 if (TC < 2) { 5976 ORE->emit( 5977 createMissedAnalysis("UnknownLoopCountComplexCFG") 5978 << "unable to calculate the loop count due to complex control flow"); 5979 DEBUG(dbgs() << "LV: Aborting. A tail loop is required with -Os/-Oz.\n"); 5980 return Factor; 5981 } 5982 5983 // Find the maximum SIMD width that can fit within the trip count. 5984 VF = TC % MaxVectorSize; 5985 5986 if (VF == 0) 5987 VF = MaxVectorSize; 5988 else { 5989 // If the trip count that we found modulo the vectorization factor is not 5990 // zero then we require a tail. 5991 ORE->emit(createMissedAnalysis("NoTailLoopWithOptForSize") 5992 << "cannot optimize for size and vectorize at the " 5993 "same time. Enable vectorization of this loop " 5994 "with '#pragma clang loop vectorize(enable)' " 5995 "when compiling with -Os/-Oz"); 5996 DEBUG(dbgs() << "LV: Aborting. A tail loop is required with -Os/-Oz.\n"); 5997 return Factor; 5998 } 5999 } 6000 6001 int UserVF = Hints->getWidth(); 6002 if (UserVF != 0) { 6003 assert(isPowerOf2_32(UserVF) && "VF needs to be a power of two"); 6004 DEBUG(dbgs() << "LV: Using user VF " << UserVF << ".\n"); 6005 6006 Factor.Width = UserVF; 6007 return Factor; 6008 } 6009 6010 float Cost = expectedCost(1).first; 6011 #ifndef NDEBUG 6012 const float ScalarCost = Cost; 6013 #endif /* NDEBUG */ 6014 unsigned Width = 1; 6015 DEBUG(dbgs() << "LV: Scalar loop costs: " << (int)ScalarCost << ".\n"); 6016 6017 bool ForceVectorization = Hints->getForce() == LoopVectorizeHints::FK_Enabled; 6018 // Ignore scalar width, because the user explicitly wants vectorization. 6019 if (ForceVectorization && VF > 1) { 6020 Width = 2; 6021 Cost = expectedCost(Width).first / (float)Width; 6022 } 6023 6024 for (unsigned i = 2; i <= VF; i *= 2) { 6025 // Notice that the vector loop needs to be executed less times, so 6026 // we need to divide the cost of the vector loops by the width of 6027 // the vector elements. 6028 VectorizationCostTy C = expectedCost(i); 6029 float VectorCost = C.first / (float)i; 6030 DEBUG(dbgs() << "LV: Vector loop of width " << i 6031 << " costs: " << (int)VectorCost << ".\n"); 6032 if (!C.second && !ForceVectorization) { 6033 DEBUG( 6034 dbgs() << "LV: Not considering vector loop of width " << i 6035 << " because it will not generate any vector instructions.\n"); 6036 continue; 6037 } 6038 if (VectorCost < Cost) { 6039 Cost = VectorCost; 6040 Width = i; 6041 } 6042 } 6043 6044 DEBUG(if (ForceVectorization && Width > 1 && Cost >= ScalarCost) dbgs() 6045 << "LV: Vectorization seems to be not beneficial, " 6046 << "but was forced by a user.\n"); 6047 DEBUG(dbgs() << "LV: Selecting VF: " << Width << ".\n"); 6048 Factor.Width = Width; 6049 Factor.Cost = Width * Cost; 6050 return Factor; 6051 } 6052 6053 std::pair<unsigned, unsigned> 6054 LoopVectorizationCostModel::getSmallestAndWidestTypes() { 6055 unsigned MinWidth = -1U; 6056 unsigned MaxWidth = 8; 6057 const DataLayout &DL = TheFunction->getParent()->getDataLayout(); 6058 6059 // For each block. 6060 for (BasicBlock *BB : TheLoop->blocks()) { 6061 // For each instruction in the loop. 6062 for (Instruction &I : *BB) { 6063 Type *T = I.getType(); 6064 6065 // Skip ignored values. 6066 if (ValuesToIgnore.count(&I)) 6067 continue; 6068 6069 // Only examine Loads, Stores and PHINodes. 6070 if (!isa<LoadInst>(I) && !isa<StoreInst>(I) && !isa<PHINode>(I)) 6071 continue; 6072 6073 // Examine PHI nodes that are reduction variables. Update the type to 6074 // account for the recurrence type. 6075 if (auto *PN = dyn_cast<PHINode>(&I)) { 6076 if (!Legal->isReductionVariable(PN)) 6077 continue; 6078 RecurrenceDescriptor RdxDesc = (*Legal->getReductionVars())[PN]; 6079 T = RdxDesc.getRecurrenceType(); 6080 } 6081 6082 // Examine the stored values. 6083 if (auto *ST = dyn_cast<StoreInst>(&I)) 6084 T = ST->getValueOperand()->getType(); 6085 6086 // Ignore loaded pointer types and stored pointer types that are not 6087 // consecutive. However, we do want to take consecutive stores/loads of 6088 // pointer vectors into account. 6089 if (T->isPointerTy() && !isConsecutiveLoadOrStore(&I)) 6090 continue; 6091 6092 MinWidth = std::min(MinWidth, 6093 (unsigned)DL.getTypeSizeInBits(T->getScalarType())); 6094 MaxWidth = std::max(MaxWidth, 6095 (unsigned)DL.getTypeSizeInBits(T->getScalarType())); 6096 } 6097 } 6098 6099 return {MinWidth, MaxWidth}; 6100 } 6101 6102 unsigned LoopVectorizationCostModel::selectInterleaveCount(bool OptForSize, 6103 unsigned VF, 6104 unsigned LoopCost) { 6105 6106 // -- The interleave heuristics -- 6107 // We interleave the loop in order to expose ILP and reduce the loop overhead. 6108 // There are many micro-architectural considerations that we can't predict 6109 // at this level. For example, frontend pressure (on decode or fetch) due to 6110 // code size, or the number and capabilities of the execution ports. 6111 // 6112 // We use the following heuristics to select the interleave count: 6113 // 1. If the code has reductions, then we interleave to break the cross 6114 // iteration dependency. 6115 // 2. If the loop is really small, then we interleave to reduce the loop 6116 // overhead. 6117 // 3. We don't interleave if we think that we will spill registers to memory 6118 // due to the increased register pressure. 6119 6120 // When we optimize for size, we don't interleave. 6121 if (OptForSize) 6122 return 1; 6123 6124 // We used the distance for the interleave count. 6125 if (Legal->getMaxSafeDepDistBytes() != -1U) 6126 return 1; 6127 6128 // Do not interleave loops with a relatively small trip count. 6129 unsigned TC = PSE.getSE()->getSmallConstantTripCount(TheLoop); 6130 if (TC > 1 && TC < TinyTripCountInterleaveThreshold) 6131 return 1; 6132 6133 unsigned TargetNumRegisters = TTI.getNumberOfRegisters(VF > 1); 6134 DEBUG(dbgs() << "LV: The target has " << TargetNumRegisters 6135 << " registers\n"); 6136 6137 if (VF == 1) { 6138 if (ForceTargetNumScalarRegs.getNumOccurrences() > 0) 6139 TargetNumRegisters = ForceTargetNumScalarRegs; 6140 } else { 6141 if (ForceTargetNumVectorRegs.getNumOccurrences() > 0) 6142 TargetNumRegisters = ForceTargetNumVectorRegs; 6143 } 6144 6145 RegisterUsage R = calculateRegisterUsage({VF})[0]; 6146 // We divide by these constants so assume that we have at least one 6147 // instruction that uses at least one register. 6148 R.MaxLocalUsers = std::max(R.MaxLocalUsers, 1U); 6149 R.NumInstructions = std::max(R.NumInstructions, 1U); 6150 6151 // We calculate the interleave count using the following formula. 6152 // Subtract the number of loop invariants from the number of available 6153 // registers. These registers are used by all of the interleaved instances. 6154 // Next, divide the remaining registers by the number of registers that is 6155 // required by the loop, in order to estimate how many parallel instances 6156 // fit without causing spills. All of this is rounded down if necessary to be 6157 // a power of two. We want power of two interleave count to simplify any 6158 // addressing operations or alignment considerations. 6159 unsigned IC = PowerOf2Floor((TargetNumRegisters - R.LoopInvariantRegs) / 6160 R.MaxLocalUsers); 6161 6162 // Don't count the induction variable as interleaved. 6163 if (EnableIndVarRegisterHeur) 6164 IC = PowerOf2Floor((TargetNumRegisters - R.LoopInvariantRegs - 1) / 6165 std::max(1U, (R.MaxLocalUsers - 1))); 6166 6167 // Clamp the interleave ranges to reasonable counts. 6168 unsigned MaxInterleaveCount = TTI.getMaxInterleaveFactor(VF); 6169 6170 // Check if the user has overridden the max. 6171 if (VF == 1) { 6172 if (ForceTargetMaxScalarInterleaveFactor.getNumOccurrences() > 0) 6173 MaxInterleaveCount = ForceTargetMaxScalarInterleaveFactor; 6174 } else { 6175 if (ForceTargetMaxVectorInterleaveFactor.getNumOccurrences() > 0) 6176 MaxInterleaveCount = ForceTargetMaxVectorInterleaveFactor; 6177 } 6178 6179 // If we did not calculate the cost for VF (because the user selected the VF) 6180 // then we calculate the cost of VF here. 6181 if (LoopCost == 0) 6182 LoopCost = expectedCost(VF).first; 6183 6184 // Clamp the calculated IC to be between the 1 and the max interleave count 6185 // that the target allows. 6186 if (IC > MaxInterleaveCount) 6187 IC = MaxInterleaveCount; 6188 else if (IC < 1) 6189 IC = 1; 6190 6191 // Interleave if we vectorized this loop and there is a reduction that could 6192 // benefit from interleaving. 6193 if (VF > 1 && Legal->getReductionVars()->size()) { 6194 DEBUG(dbgs() << "LV: Interleaving because of reductions.\n"); 6195 return IC; 6196 } 6197 6198 // Note that if we've already vectorized the loop we will have done the 6199 // runtime check and so interleaving won't require further checks. 6200 bool InterleavingRequiresRuntimePointerCheck = 6201 (VF == 1 && Legal->getRuntimePointerChecking()->Need); 6202 6203 // We want to interleave small loops in order to reduce the loop overhead and 6204 // potentially expose ILP opportunities. 6205 DEBUG(dbgs() << "LV: Loop cost is " << LoopCost << '\n'); 6206 if (!InterleavingRequiresRuntimePointerCheck && LoopCost < SmallLoopCost) { 6207 // We assume that the cost overhead is 1 and we use the cost model 6208 // to estimate the cost of the loop and interleave until the cost of the 6209 // loop overhead is about 5% of the cost of the loop. 6210 unsigned SmallIC = 6211 std::min(IC, (unsigned)PowerOf2Floor(SmallLoopCost / LoopCost)); 6212 6213 // Interleave until store/load ports (estimated by max interleave count) are 6214 // saturated. 6215 unsigned NumStores = Legal->getNumStores(); 6216 unsigned NumLoads = Legal->getNumLoads(); 6217 unsigned StoresIC = IC / (NumStores ? NumStores : 1); 6218 unsigned LoadsIC = IC / (NumLoads ? NumLoads : 1); 6219 6220 // If we have a scalar reduction (vector reductions are already dealt with 6221 // by this point), we can increase the critical path length if the loop 6222 // we're interleaving is inside another loop. Limit, by default to 2, so the 6223 // critical path only gets increased by one reduction operation. 6224 if (Legal->getReductionVars()->size() && TheLoop->getLoopDepth() > 1) { 6225 unsigned F = static_cast<unsigned>(MaxNestedScalarReductionIC); 6226 SmallIC = std::min(SmallIC, F); 6227 StoresIC = std::min(StoresIC, F); 6228 LoadsIC = std::min(LoadsIC, F); 6229 } 6230 6231 if (EnableLoadStoreRuntimeInterleave && 6232 std::max(StoresIC, LoadsIC) > SmallIC) { 6233 DEBUG(dbgs() << "LV: Interleaving to saturate store or load ports.\n"); 6234 return std::max(StoresIC, LoadsIC); 6235 } 6236 6237 DEBUG(dbgs() << "LV: Interleaving to reduce branch cost.\n"); 6238 return SmallIC; 6239 } 6240 6241 // Interleave if this is a large loop (small loops are already dealt with by 6242 // this point) that could benefit from interleaving. 6243 bool HasReductions = (Legal->getReductionVars()->size() > 0); 6244 if (TTI.enableAggressiveInterleaving(HasReductions)) { 6245 DEBUG(dbgs() << "LV: Interleaving to expose ILP.\n"); 6246 return IC; 6247 } 6248 6249 DEBUG(dbgs() << "LV: Not Interleaving.\n"); 6250 return 1; 6251 } 6252 6253 SmallVector<LoopVectorizationCostModel::RegisterUsage, 8> 6254 LoopVectorizationCostModel::calculateRegisterUsage(ArrayRef<unsigned> VFs) { 6255 // This function calculates the register usage by measuring the highest number 6256 // of values that are alive at a single location. Obviously, this is a very 6257 // rough estimation. We scan the loop in a topological order in order and 6258 // assign a number to each instruction. We use RPO to ensure that defs are 6259 // met before their users. We assume that each instruction that has in-loop 6260 // users starts an interval. We record every time that an in-loop value is 6261 // used, so we have a list of the first and last occurrences of each 6262 // instruction. Next, we transpose this data structure into a multi map that 6263 // holds the list of intervals that *end* at a specific location. This multi 6264 // map allows us to perform a linear search. We scan the instructions linearly 6265 // and record each time that a new interval starts, by placing it in a set. 6266 // If we find this value in the multi-map then we remove it from the set. 6267 // The max register usage is the maximum size of the set. 6268 // We also search for instructions that are defined outside the loop, but are 6269 // used inside the loop. We need this number separately from the max-interval 6270 // usage number because when we unroll, loop-invariant values do not take 6271 // more register. 6272 LoopBlocksDFS DFS(TheLoop); 6273 DFS.perform(LI); 6274 6275 RegisterUsage RU; 6276 RU.NumInstructions = 0; 6277 6278 // Each 'key' in the map opens a new interval. The values 6279 // of the map are the index of the 'last seen' usage of the 6280 // instruction that is the key. 6281 typedef DenseMap<Instruction *, unsigned> IntervalMap; 6282 // Maps instruction to its index. 6283 DenseMap<unsigned, Instruction *> IdxToInstr; 6284 // Marks the end of each interval. 6285 IntervalMap EndPoint; 6286 // Saves the list of instruction indices that are used in the loop. 6287 SmallSet<Instruction *, 8> Ends; 6288 // Saves the list of values that are used in the loop but are 6289 // defined outside the loop, such as arguments and constants. 6290 SmallPtrSet<Value *, 8> LoopInvariants; 6291 6292 unsigned Index = 0; 6293 for (BasicBlock *BB : make_range(DFS.beginRPO(), DFS.endRPO())) { 6294 RU.NumInstructions += BB->size(); 6295 for (Instruction &I : *BB) { 6296 IdxToInstr[Index++] = &I; 6297 6298 // Save the end location of each USE. 6299 for (Value *U : I.operands()) { 6300 auto *Instr = dyn_cast<Instruction>(U); 6301 6302 // Ignore non-instruction values such as arguments, constants, etc. 6303 if (!Instr) 6304 continue; 6305 6306 // If this instruction is outside the loop then record it and continue. 6307 if (!TheLoop->contains(Instr)) { 6308 LoopInvariants.insert(Instr); 6309 continue; 6310 } 6311 6312 // Overwrite previous end points. 6313 EndPoint[Instr] = Index; 6314 Ends.insert(Instr); 6315 } 6316 } 6317 } 6318 6319 // Saves the list of intervals that end with the index in 'key'. 6320 typedef SmallVector<Instruction *, 2> InstrList; 6321 DenseMap<unsigned, InstrList> TransposeEnds; 6322 6323 // Transpose the EndPoints to a list of values that end at each index. 6324 for (auto &Interval : EndPoint) 6325 TransposeEnds[Interval.second].push_back(Interval.first); 6326 6327 SmallSet<Instruction *, 8> OpenIntervals; 6328 6329 // Get the size of the widest register. 6330 unsigned MaxSafeDepDist = -1U; 6331 if (Legal->getMaxSafeDepDistBytes() != -1U) 6332 MaxSafeDepDist = Legal->getMaxSafeDepDistBytes() * 8; 6333 unsigned WidestRegister = 6334 std::min(TTI.getRegisterBitWidth(true), MaxSafeDepDist); 6335 const DataLayout &DL = TheFunction->getParent()->getDataLayout(); 6336 6337 SmallVector<RegisterUsage, 8> RUs(VFs.size()); 6338 SmallVector<unsigned, 8> MaxUsages(VFs.size(), 0); 6339 6340 DEBUG(dbgs() << "LV(REG): Calculating max register usage:\n"); 6341 6342 // A lambda that gets the register usage for the given type and VF. 6343 auto GetRegUsage = [&DL, WidestRegister](Type *Ty, unsigned VF) { 6344 if (Ty->isTokenTy()) 6345 return 0U; 6346 unsigned TypeSize = DL.getTypeSizeInBits(Ty->getScalarType()); 6347 return std::max<unsigned>(1, VF * TypeSize / WidestRegister); 6348 }; 6349 6350 for (unsigned int i = 0; i < Index; ++i) { 6351 Instruction *I = IdxToInstr[i]; 6352 // Ignore instructions that are never used within the loop. 6353 if (!Ends.count(I)) 6354 continue; 6355 6356 // Remove all of the instructions that end at this location. 6357 InstrList &List = TransposeEnds[i]; 6358 for (Instruction *ToRemove : List) 6359 OpenIntervals.erase(ToRemove); 6360 6361 // Skip ignored values. 6362 if (ValuesToIgnore.count(I)) 6363 continue; 6364 6365 // For each VF find the maximum usage of registers. 6366 for (unsigned j = 0, e = VFs.size(); j < e; ++j) { 6367 if (VFs[j] == 1) { 6368 MaxUsages[j] = std::max(MaxUsages[j], OpenIntervals.size()); 6369 continue; 6370 } 6371 6372 // Count the number of live intervals. 6373 unsigned RegUsage = 0; 6374 for (auto Inst : OpenIntervals) { 6375 // Skip ignored values for VF > 1. 6376 if (VecValuesToIgnore.count(Inst)) 6377 continue; 6378 RegUsage += GetRegUsage(Inst->getType(), VFs[j]); 6379 } 6380 MaxUsages[j] = std::max(MaxUsages[j], RegUsage); 6381 } 6382 6383 DEBUG(dbgs() << "LV(REG): At #" << i << " Interval # " 6384 << OpenIntervals.size() << '\n'); 6385 6386 // Add the current instruction to the list of open intervals. 6387 OpenIntervals.insert(I); 6388 } 6389 6390 for (unsigned i = 0, e = VFs.size(); i < e; ++i) { 6391 unsigned Invariant = 0; 6392 if (VFs[i] == 1) 6393 Invariant = LoopInvariants.size(); 6394 else { 6395 for (auto Inst : LoopInvariants) 6396 Invariant += GetRegUsage(Inst->getType(), VFs[i]); 6397 } 6398 6399 DEBUG(dbgs() << "LV(REG): VF = " << VFs[i] << '\n'); 6400 DEBUG(dbgs() << "LV(REG): Found max usage: " << MaxUsages[i] << '\n'); 6401 DEBUG(dbgs() << "LV(REG): Found invariant usage: " << Invariant << '\n'); 6402 DEBUG(dbgs() << "LV(REG): LoopSize: " << RU.NumInstructions << '\n'); 6403 6404 RU.LoopInvariantRegs = Invariant; 6405 RU.MaxLocalUsers = MaxUsages[i]; 6406 RUs[i] = RU; 6407 } 6408 6409 return RUs; 6410 } 6411 6412 LoopVectorizationCostModel::VectorizationCostTy 6413 LoopVectorizationCostModel::expectedCost(unsigned VF) { 6414 VectorizationCostTy Cost; 6415 6416 // For each block. 6417 for (BasicBlock *BB : TheLoop->blocks()) { 6418 VectorizationCostTy BlockCost; 6419 6420 // For each instruction in the old loop. 6421 for (Instruction &I : *BB) { 6422 // Skip dbg intrinsics. 6423 if (isa<DbgInfoIntrinsic>(I)) 6424 continue; 6425 6426 // Skip ignored values. 6427 if (ValuesToIgnore.count(&I)) 6428 continue; 6429 6430 VectorizationCostTy C = getInstructionCost(&I, VF); 6431 6432 // Check if we should override the cost. 6433 if (ForceTargetInstructionCost.getNumOccurrences() > 0) 6434 C.first = ForceTargetInstructionCost; 6435 6436 BlockCost.first += C.first; 6437 BlockCost.second |= C.second; 6438 DEBUG(dbgs() << "LV: Found an estimated cost of " << C.first << " for VF " 6439 << VF << " For instruction: " << I << '\n'); 6440 } 6441 6442 // If we are vectorizing a predicated block, it will have been 6443 // if-converted. This means that the block's instructions (aside from 6444 // stores and instructions that may divide by zero) will now be 6445 // unconditionally executed. For the scalar case, we may not always execute 6446 // the predicated block. Thus, scale the block's cost by the probability of 6447 // executing it. 6448 if (VF == 1 && Legal->blockNeedsPredication(BB)) 6449 BlockCost.first /= getReciprocalPredBlockProb(); 6450 6451 Cost.first += BlockCost.first; 6452 Cost.second |= BlockCost.second; 6453 } 6454 6455 return Cost; 6456 } 6457 6458 /// \brief Check whether the address computation for a non-consecutive memory 6459 /// access looks like an unlikely candidate for being merged into the indexing 6460 /// mode. 6461 /// 6462 /// We look for a GEP which has one index that is an induction variable and all 6463 /// other indices are loop invariant. If the stride of this access is also 6464 /// within a small bound we decide that this address computation can likely be 6465 /// merged into the addressing mode. 6466 /// In all other cases, we identify the address computation as complex. 6467 static bool isLikelyComplexAddressComputation(Value *Ptr, 6468 LoopVectorizationLegality *Legal, 6469 ScalarEvolution *SE, 6470 const Loop *TheLoop) { 6471 auto *Gep = dyn_cast<GetElementPtrInst>(Ptr); 6472 if (!Gep) 6473 return true; 6474 6475 // We are looking for a gep with all loop invariant indices except for one 6476 // which should be an induction variable. 6477 unsigned NumOperands = Gep->getNumOperands(); 6478 for (unsigned i = 1; i < NumOperands; ++i) { 6479 Value *Opd = Gep->getOperand(i); 6480 if (!SE->isLoopInvariant(SE->getSCEV(Opd), TheLoop) && 6481 !Legal->isInductionVariable(Opd)) 6482 return true; 6483 } 6484 6485 // Now we know we have a GEP ptr, %inv, %ind, %inv. Make sure that the step 6486 // can likely be merged into the address computation. 6487 unsigned MaxMergeDistance = 64; 6488 6489 const SCEVAddRecExpr *AddRec = dyn_cast<SCEVAddRecExpr>(SE->getSCEV(Ptr)); 6490 if (!AddRec) 6491 return true; 6492 6493 // Check the step is constant. 6494 const SCEV *Step = AddRec->getStepRecurrence(*SE); 6495 // Calculate the pointer stride and check if it is consecutive. 6496 const auto *C = dyn_cast<SCEVConstant>(Step); 6497 if (!C) 6498 return true; 6499 6500 const APInt &APStepVal = C->getAPInt(); 6501 6502 // Huge step value - give up. 6503 if (APStepVal.getBitWidth() > 64) 6504 return true; 6505 6506 int64_t StepVal = APStepVal.getSExtValue(); 6507 6508 return StepVal > MaxMergeDistance; 6509 } 6510 6511 static bool isStrideMul(Instruction *I, LoopVectorizationLegality *Legal) { 6512 return Legal->hasStride(I->getOperand(0)) || 6513 Legal->hasStride(I->getOperand(1)); 6514 } 6515 6516 LoopVectorizationCostModel::VectorizationCostTy 6517 LoopVectorizationCostModel::getInstructionCost(Instruction *I, unsigned VF) { 6518 // If we know that this instruction will remain uniform, check the cost of 6519 // the scalar version. 6520 if (Legal->isUniformAfterVectorization(I)) 6521 VF = 1; 6522 6523 Type *VectorTy; 6524 unsigned C = getInstructionCost(I, VF, VectorTy); 6525 6526 bool TypeNotScalarized = 6527 VF > 1 && !VectorTy->isVoidTy() && TTI.getNumberOfParts(VectorTy) < VF; 6528 return VectorizationCostTy(C, TypeNotScalarized); 6529 } 6530 6531 unsigned LoopVectorizationCostModel::getInstructionCost(Instruction *I, 6532 unsigned VF, 6533 Type *&VectorTy) { 6534 Type *RetTy = I->getType(); 6535 if (VF > 1 && MinBWs.count(I)) 6536 RetTy = IntegerType::get(RetTy->getContext(), MinBWs[I]); 6537 VectorTy = ToVectorTy(RetTy, VF); 6538 auto SE = PSE.getSE(); 6539 6540 // TODO: We need to estimate the cost of intrinsic calls. 6541 switch (I->getOpcode()) { 6542 case Instruction::GetElementPtr: 6543 // We mark this instruction as zero-cost because the cost of GEPs in 6544 // vectorized code depends on whether the corresponding memory instruction 6545 // is scalarized or not. Therefore, we handle GEPs with the memory 6546 // instruction cost. 6547 return 0; 6548 case Instruction::Br: { 6549 return TTI.getCFInstrCost(I->getOpcode()); 6550 } 6551 case Instruction::PHI: { 6552 auto *Phi = cast<PHINode>(I); 6553 6554 // First-order recurrences are replaced by vector shuffles inside the loop. 6555 if (VF > 1 && Legal->isFirstOrderRecurrence(Phi)) 6556 return TTI.getShuffleCost(TargetTransformInfo::SK_ExtractSubvector, 6557 VectorTy, VF - 1, VectorTy); 6558 6559 // TODO: IF-converted IFs become selects. 6560 return 0; 6561 } 6562 case Instruction::UDiv: 6563 case Instruction::SDiv: 6564 case Instruction::URem: 6565 case Instruction::SRem: 6566 // If we have a predicated instruction, it may not be executed for each 6567 // vector lane. Get the scalarization cost and scale this amount by the 6568 // probability of executing the predicated block. If the instruction is not 6569 // predicated, we fall through to the next case. 6570 if (VF > 1 && Legal->isScalarWithPredication(I)) { 6571 unsigned Cost = 0; 6572 6573 // These instructions have a non-void type, so account for the phi nodes 6574 // that we will create. This cost is likely to be zero. The phi node 6575 // cost, if any, should be scaled by the block probability because it 6576 // models a copy at the end of each predicated block. 6577 Cost += VF * TTI.getCFInstrCost(Instruction::PHI); 6578 6579 // The cost of the non-predicated instruction. 6580 Cost += VF * TTI.getArithmeticInstrCost(I->getOpcode(), RetTy); 6581 6582 // The cost of insertelement and extractelement instructions needed for 6583 // scalarization. 6584 Cost += getScalarizationOverhead(I, VF, TTI); 6585 6586 // Scale the cost by the probability of executing the predicated blocks. 6587 // This assumes the predicated block for each vector lane is equally 6588 // likely. 6589 return Cost / getReciprocalPredBlockProb(); 6590 } 6591 case Instruction::Add: 6592 case Instruction::FAdd: 6593 case Instruction::Sub: 6594 case Instruction::FSub: 6595 case Instruction::Mul: 6596 case Instruction::FMul: 6597 case Instruction::FDiv: 6598 case Instruction::FRem: 6599 case Instruction::Shl: 6600 case Instruction::LShr: 6601 case Instruction::AShr: 6602 case Instruction::And: 6603 case Instruction::Or: 6604 case Instruction::Xor: { 6605 // Since we will replace the stride by 1 the multiplication should go away. 6606 if (I->getOpcode() == Instruction::Mul && isStrideMul(I, Legal)) 6607 return 0; 6608 // Certain instructions can be cheaper to vectorize if they have a constant 6609 // second vector operand. One example of this are shifts on x86. 6610 TargetTransformInfo::OperandValueKind Op1VK = 6611 TargetTransformInfo::OK_AnyValue; 6612 TargetTransformInfo::OperandValueKind Op2VK = 6613 TargetTransformInfo::OK_AnyValue; 6614 TargetTransformInfo::OperandValueProperties Op1VP = 6615 TargetTransformInfo::OP_None; 6616 TargetTransformInfo::OperandValueProperties Op2VP = 6617 TargetTransformInfo::OP_None; 6618 Value *Op2 = I->getOperand(1); 6619 6620 // Check for a splat or for a non uniform vector of constants. 6621 if (isa<ConstantInt>(Op2)) { 6622 ConstantInt *CInt = cast<ConstantInt>(Op2); 6623 if (CInt && CInt->getValue().isPowerOf2()) 6624 Op2VP = TargetTransformInfo::OP_PowerOf2; 6625 Op2VK = TargetTransformInfo::OK_UniformConstantValue; 6626 } else if (isa<ConstantVector>(Op2) || isa<ConstantDataVector>(Op2)) { 6627 Op2VK = TargetTransformInfo::OK_NonUniformConstantValue; 6628 Constant *SplatValue = cast<Constant>(Op2)->getSplatValue(); 6629 if (SplatValue) { 6630 ConstantInt *CInt = dyn_cast<ConstantInt>(SplatValue); 6631 if (CInt && CInt->getValue().isPowerOf2()) 6632 Op2VP = TargetTransformInfo::OP_PowerOf2; 6633 Op2VK = TargetTransformInfo::OK_UniformConstantValue; 6634 } 6635 } else if (Legal->isUniform(Op2)) { 6636 Op2VK = TargetTransformInfo::OK_UniformValue; 6637 } 6638 6639 return TTI.getArithmeticInstrCost(I->getOpcode(), VectorTy, Op1VK, Op2VK, 6640 Op1VP, Op2VP); 6641 } 6642 case Instruction::Select: { 6643 SelectInst *SI = cast<SelectInst>(I); 6644 const SCEV *CondSCEV = SE->getSCEV(SI->getCondition()); 6645 bool ScalarCond = (SE->isLoopInvariant(CondSCEV, TheLoop)); 6646 Type *CondTy = SI->getCondition()->getType(); 6647 if (!ScalarCond) 6648 CondTy = VectorType::get(CondTy, VF); 6649 6650 return TTI.getCmpSelInstrCost(I->getOpcode(), VectorTy, CondTy); 6651 } 6652 case Instruction::ICmp: 6653 case Instruction::FCmp: { 6654 Type *ValTy = I->getOperand(0)->getType(); 6655 Instruction *Op0AsInstruction = dyn_cast<Instruction>(I->getOperand(0)); 6656 auto It = MinBWs.find(Op0AsInstruction); 6657 if (VF > 1 && It != MinBWs.end()) 6658 ValTy = IntegerType::get(ValTy->getContext(), It->second); 6659 VectorTy = ToVectorTy(ValTy, VF); 6660 return TTI.getCmpSelInstrCost(I->getOpcode(), VectorTy); 6661 } 6662 case Instruction::Store: 6663 case Instruction::Load: { 6664 StoreInst *SI = dyn_cast<StoreInst>(I); 6665 LoadInst *LI = dyn_cast<LoadInst>(I); 6666 Type *ValTy = (SI ? SI->getValueOperand()->getType() : LI->getType()); 6667 VectorTy = ToVectorTy(ValTy, VF); 6668 6669 unsigned Alignment = SI ? SI->getAlignment() : LI->getAlignment(); 6670 unsigned AS = 6671 SI ? SI->getPointerAddressSpace() : LI->getPointerAddressSpace(); 6672 Value *Ptr = getPointerOperand(I); 6673 // We add the cost of address computation here instead of with the gep 6674 // instruction because only here we know whether the operation is 6675 // scalarized. 6676 if (VF == 1) 6677 return TTI.getAddressComputationCost(VectorTy) + 6678 TTI.getMemoryOpCost(I->getOpcode(), VectorTy, Alignment, AS); 6679 6680 if (LI && Legal->isUniform(Ptr)) { 6681 // Scalar load + broadcast 6682 unsigned Cost = TTI.getAddressComputationCost(ValTy->getScalarType()); 6683 Cost += TTI.getMemoryOpCost(I->getOpcode(), ValTy->getScalarType(), 6684 Alignment, AS); 6685 return Cost + 6686 TTI.getShuffleCost(TargetTransformInfo::SK_Broadcast, ValTy); 6687 } 6688 6689 // For an interleaved access, calculate the total cost of the whole 6690 // interleave group. 6691 if (Legal->isAccessInterleaved(I)) { 6692 auto Group = Legal->getInterleavedAccessGroup(I); 6693 assert(Group && "Fail to get an interleaved access group."); 6694 6695 // Only calculate the cost once at the insert position. 6696 if (Group->getInsertPos() != I) 6697 return 0; 6698 6699 unsigned InterleaveFactor = Group->getFactor(); 6700 Type *WideVecTy = 6701 VectorType::get(VectorTy->getVectorElementType(), 6702 VectorTy->getVectorNumElements() * InterleaveFactor); 6703 6704 // Holds the indices of existing members in an interleaved load group. 6705 // An interleaved store group doesn't need this as it doesn't allow gaps. 6706 SmallVector<unsigned, 4> Indices; 6707 if (LI) { 6708 for (unsigned i = 0; i < InterleaveFactor; i++) 6709 if (Group->getMember(i)) 6710 Indices.push_back(i); 6711 } 6712 6713 // Calculate the cost of the whole interleaved group. 6714 unsigned Cost = TTI.getInterleavedMemoryOpCost( 6715 I->getOpcode(), WideVecTy, Group->getFactor(), Indices, 6716 Group->getAlignment(), AS); 6717 6718 if (Group->isReverse()) 6719 Cost += 6720 Group->getNumMembers() * 6721 TTI.getShuffleCost(TargetTransformInfo::SK_Reverse, VectorTy, 0); 6722 6723 // FIXME: The interleaved load group with a huge gap could be even more 6724 // expensive than scalar operations. Then we could ignore such group and 6725 // use scalar operations instead. 6726 return Cost; 6727 } 6728 6729 // Check if the memory instruction will be scalarized. 6730 if (Legal->memoryInstructionMustBeScalarized(I, VF)) { 6731 unsigned Cost = 0; 6732 Type *PtrTy = ToVectorTy(Ptr->getType(), VF); 6733 6734 // True if the memory instruction's address computation is complex. 6735 bool IsComplexComputation = 6736 isLikelyComplexAddressComputation(Ptr, Legal, SE, TheLoop); 6737 6738 // Get the cost of the scalar memory instruction and address computation. 6739 Cost += VF * TTI.getAddressComputationCost(PtrTy, IsComplexComputation); 6740 Cost += VF * 6741 TTI.getMemoryOpCost(I->getOpcode(), ValTy->getScalarType(), 6742 Alignment, AS); 6743 6744 // Get the overhead of the extractelement and insertelement instructions 6745 // we might create due to scalarization. 6746 Cost += getScalarizationOverhead(I, VF, TTI); 6747 6748 // If we have a predicated store, it may not be executed for each vector 6749 // lane. Scale the cost by the probability of executing the predicated 6750 // block. 6751 if (Legal->isScalarWithPredication(I)) 6752 Cost /= getReciprocalPredBlockProb(); 6753 6754 return Cost; 6755 } 6756 6757 // Determine if the pointer operand of the access is either consecutive or 6758 // reverse consecutive. 6759 int ConsecutiveStride = Legal->isConsecutivePtr(Ptr); 6760 bool Reverse = ConsecutiveStride < 0; 6761 6762 // Determine if either a gather or scatter operation is legal. 6763 bool UseGatherOrScatter = 6764 !ConsecutiveStride && Legal->isLegalGatherOrScatter(I); 6765 6766 unsigned Cost = TTI.getAddressComputationCost(VectorTy); 6767 if (UseGatherOrScatter) { 6768 assert(ConsecutiveStride == 0 && 6769 "Gather/Scatter are not used for consecutive stride"); 6770 return Cost + 6771 TTI.getGatherScatterOpCost(I->getOpcode(), VectorTy, Ptr, 6772 Legal->isMaskRequired(I), Alignment); 6773 } 6774 // Wide load/stores. 6775 if (Legal->isMaskRequired(I)) 6776 Cost += 6777 TTI.getMaskedMemoryOpCost(I->getOpcode(), VectorTy, Alignment, AS); 6778 else 6779 Cost += TTI.getMemoryOpCost(I->getOpcode(), VectorTy, Alignment, AS); 6780 6781 if (Reverse) 6782 Cost += TTI.getShuffleCost(TargetTransformInfo::SK_Reverse, VectorTy, 0); 6783 return Cost; 6784 } 6785 case Instruction::ZExt: 6786 case Instruction::SExt: 6787 case Instruction::FPToUI: 6788 case Instruction::FPToSI: 6789 case Instruction::FPExt: 6790 case Instruction::PtrToInt: 6791 case Instruction::IntToPtr: 6792 case Instruction::SIToFP: 6793 case Instruction::UIToFP: 6794 case Instruction::Trunc: 6795 case Instruction::FPTrunc: 6796 case Instruction::BitCast: { 6797 // We optimize the truncation of induction variable. 6798 // The cost of these is the same as the scalar operation. 6799 if (I->getOpcode() == Instruction::Trunc && 6800 Legal->isInductionVariable(I->getOperand(0))) 6801 return TTI.getCastInstrCost(I->getOpcode(), I->getType(), 6802 I->getOperand(0)->getType()); 6803 6804 Type *SrcScalarTy = I->getOperand(0)->getType(); 6805 Type *SrcVecTy = ToVectorTy(SrcScalarTy, VF); 6806 if (VF > 1 && MinBWs.count(I)) { 6807 // This cast is going to be shrunk. This may remove the cast or it might 6808 // turn it into slightly different cast. For example, if MinBW == 16, 6809 // "zext i8 %1 to i32" becomes "zext i8 %1 to i16". 6810 // 6811 // Calculate the modified src and dest types. 6812 Type *MinVecTy = VectorTy; 6813 if (I->getOpcode() == Instruction::Trunc) { 6814 SrcVecTy = smallestIntegerVectorType(SrcVecTy, MinVecTy); 6815 VectorTy = 6816 largestIntegerVectorType(ToVectorTy(I->getType(), VF), MinVecTy); 6817 } else if (I->getOpcode() == Instruction::ZExt || 6818 I->getOpcode() == Instruction::SExt) { 6819 SrcVecTy = largestIntegerVectorType(SrcVecTy, MinVecTy); 6820 VectorTy = 6821 smallestIntegerVectorType(ToVectorTy(I->getType(), VF), MinVecTy); 6822 } 6823 } 6824 6825 return TTI.getCastInstrCost(I->getOpcode(), VectorTy, SrcVecTy); 6826 } 6827 case Instruction::Call: { 6828 bool NeedToScalarize; 6829 CallInst *CI = cast<CallInst>(I); 6830 unsigned CallCost = getVectorCallCost(CI, VF, TTI, TLI, NeedToScalarize); 6831 if (getVectorIntrinsicIDForCall(CI, TLI)) 6832 return std::min(CallCost, getVectorIntrinsicCost(CI, VF, TTI, TLI)); 6833 return CallCost; 6834 } 6835 default: 6836 // The cost of executing VF copies of the scalar instruction. This opcode 6837 // is unknown. Assume that it is the same as 'mul'. 6838 return VF * TTI.getArithmeticInstrCost(Instruction::Mul, VectorTy) + 6839 getScalarizationOverhead(I, VF, TTI); 6840 } // end of switch. 6841 } 6842 6843 char LoopVectorize::ID = 0; 6844 static const char lv_name[] = "Loop Vectorization"; 6845 INITIALIZE_PASS_BEGIN(LoopVectorize, LV_NAME, lv_name, false, false) 6846 INITIALIZE_PASS_DEPENDENCY(TargetTransformInfoWrapperPass) 6847 INITIALIZE_PASS_DEPENDENCY(BasicAAWrapperPass) 6848 INITIALIZE_PASS_DEPENDENCY(AAResultsWrapperPass) 6849 INITIALIZE_PASS_DEPENDENCY(GlobalsAAWrapperPass) 6850 INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker) 6851 INITIALIZE_PASS_DEPENDENCY(BlockFrequencyInfoWrapperPass) 6852 INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass) 6853 INITIALIZE_PASS_DEPENDENCY(ScalarEvolutionWrapperPass) 6854 INITIALIZE_PASS_DEPENDENCY(LCSSAWrapperPass) 6855 INITIALIZE_PASS_DEPENDENCY(LoopInfoWrapperPass) 6856 INITIALIZE_PASS_DEPENDENCY(LoopSimplify) 6857 INITIALIZE_PASS_DEPENDENCY(LoopAccessLegacyAnalysis) 6858 INITIALIZE_PASS_DEPENDENCY(DemandedBitsWrapperPass) 6859 INITIALIZE_PASS_DEPENDENCY(OptimizationRemarkEmitterWrapperPass) 6860 INITIALIZE_PASS_END(LoopVectorize, LV_NAME, lv_name, false, false) 6861 6862 namespace llvm { 6863 Pass *createLoopVectorizePass(bool NoUnrolling, bool AlwaysVectorize) { 6864 return new LoopVectorize(NoUnrolling, AlwaysVectorize); 6865 } 6866 } 6867 6868 bool LoopVectorizationCostModel::isConsecutiveLoadOrStore(Instruction *Inst) { 6869 6870 // Check if the pointer operand of a load or store instruction is 6871 // consecutive. 6872 if (auto *Ptr = getPointerOperand(Inst)) 6873 return Legal->isConsecutivePtr(Ptr); 6874 return false; 6875 } 6876 6877 void LoopVectorizationCostModel::collectValuesToIgnore() { 6878 // Ignore ephemeral values. 6879 CodeMetrics::collectEphemeralValues(TheLoop, AC, ValuesToIgnore); 6880 6881 // Ignore type-promoting instructions we identified during reduction 6882 // detection. 6883 for (auto &Reduction : *Legal->getReductionVars()) { 6884 RecurrenceDescriptor &RedDes = Reduction.second; 6885 SmallPtrSetImpl<Instruction *> &Casts = RedDes.getCastInsts(); 6886 VecValuesToIgnore.insert(Casts.begin(), Casts.end()); 6887 } 6888 6889 // Insert values known to be scalar into VecValuesToIgnore. 6890 for (auto *BB : TheLoop->getBlocks()) 6891 for (auto &I : *BB) 6892 if (Legal->isScalarAfterVectorization(&I)) 6893 VecValuesToIgnore.insert(&I); 6894 } 6895 6896 void InnerLoopUnroller::scalarizeInstruction(Instruction *Instr, 6897 bool IfPredicateInstr) { 6898 assert(!Instr->getType()->isAggregateType() && "Can't handle vectors"); 6899 // Holds vector parameters or scalars, in case of uniform vals. 6900 SmallVector<VectorParts, 4> Params; 6901 6902 setDebugLocFromInst(Builder, Instr); 6903 6904 // Does this instruction return a value ? 6905 bool IsVoidRetTy = Instr->getType()->isVoidTy(); 6906 6907 // Initialize a new scalar map entry. 6908 ScalarParts Entry(UF); 6909 6910 VectorParts Cond; 6911 if (IfPredicateInstr) 6912 Cond = createBlockInMask(Instr->getParent()); 6913 6914 // For each vector unroll 'part': 6915 for (unsigned Part = 0; Part < UF; ++Part) { 6916 Entry[Part].resize(1); 6917 // For each scalar that we create: 6918 6919 // Start an "if (pred) a[i] = ..." block. 6920 Value *Cmp = nullptr; 6921 if (IfPredicateInstr) { 6922 if (Cond[Part]->getType()->isVectorTy()) 6923 Cond[Part] = 6924 Builder.CreateExtractElement(Cond[Part], Builder.getInt32(0)); 6925 Cmp = Builder.CreateICmp(ICmpInst::ICMP_EQ, Cond[Part], 6926 ConstantInt::get(Cond[Part]->getType(), 1)); 6927 } 6928 6929 Instruction *Cloned = Instr->clone(); 6930 if (!IsVoidRetTy) 6931 Cloned->setName(Instr->getName() + ".cloned"); 6932 6933 // Replace the operands of the cloned instructions with their scalar 6934 // equivalents in the new loop. 6935 for (unsigned op = 0, e = Instr->getNumOperands(); op != e; ++op) { 6936 auto *NewOp = getScalarValue(Instr->getOperand(op), Part, 0); 6937 Cloned->setOperand(op, NewOp); 6938 } 6939 6940 // Place the cloned scalar in the new loop. 6941 Builder.Insert(Cloned); 6942 6943 // Add the cloned scalar to the scalar map entry. 6944 Entry[Part][0] = Cloned; 6945 6946 // If we just cloned a new assumption, add it the assumption cache. 6947 if (auto *II = dyn_cast<IntrinsicInst>(Cloned)) 6948 if (II->getIntrinsicID() == Intrinsic::assume) 6949 AC->registerAssumption(II); 6950 6951 // End if-block. 6952 if (IfPredicateInstr) 6953 PredicatedInstructions.push_back(std::make_pair(Cloned, Cmp)); 6954 } 6955 VectorLoopValueMap.initScalar(Instr, Entry); 6956 } 6957 6958 void InnerLoopUnroller::vectorizeMemoryInstruction(Instruction *Instr) { 6959 auto *SI = dyn_cast<StoreInst>(Instr); 6960 bool IfPredicateInstr = (SI && Legal->blockNeedsPredication(SI->getParent())); 6961 6962 return scalarizeInstruction(Instr, IfPredicateInstr); 6963 } 6964 6965 Value *InnerLoopUnroller::reverseVector(Value *Vec) { return Vec; } 6966 6967 Value *InnerLoopUnroller::getBroadcastInstrs(Value *V) { return V; } 6968 6969 Value *InnerLoopUnroller::getStepVector(Value *Val, int StartIdx, Value *Step, 6970 Instruction::BinaryOps BinOp) { 6971 // When unrolling and the VF is 1, we only need to add a simple scalar. 6972 Type *Ty = Val->getType(); 6973 assert(!Ty->isVectorTy() && "Val must be a scalar"); 6974 6975 if (Ty->isFloatingPointTy()) { 6976 Constant *C = ConstantFP::get(Ty, (double)StartIdx); 6977 6978 // Floating point operations had to be 'fast' to enable the unrolling. 6979 Value *MulOp = addFastMathFlag(Builder.CreateFMul(C, Step)); 6980 return addFastMathFlag(Builder.CreateBinOp(BinOp, Val, MulOp)); 6981 } 6982 Constant *C = ConstantInt::get(Ty, StartIdx); 6983 return Builder.CreateAdd(Val, Builder.CreateMul(C, Step), "induction"); 6984 } 6985 6986 static void AddRuntimeUnrollDisableMetaData(Loop *L) { 6987 SmallVector<Metadata *, 4> MDs; 6988 // Reserve first location for self reference to the LoopID metadata node. 6989 MDs.push_back(nullptr); 6990 bool IsUnrollMetadata = false; 6991 MDNode *LoopID = L->getLoopID(); 6992 if (LoopID) { 6993 // First find existing loop unrolling disable metadata. 6994 for (unsigned i = 1, ie = LoopID->getNumOperands(); i < ie; ++i) { 6995 auto *MD = dyn_cast<MDNode>(LoopID->getOperand(i)); 6996 if (MD) { 6997 const auto *S = dyn_cast<MDString>(MD->getOperand(0)); 6998 IsUnrollMetadata = 6999 S && S->getString().startswith("llvm.loop.unroll.disable"); 7000 } 7001 MDs.push_back(LoopID->getOperand(i)); 7002 } 7003 } 7004 7005 if (!IsUnrollMetadata) { 7006 // Add runtime unroll disable metadata. 7007 LLVMContext &Context = L->getHeader()->getContext(); 7008 SmallVector<Metadata *, 1> DisableOperands; 7009 DisableOperands.push_back( 7010 MDString::get(Context, "llvm.loop.unroll.runtime.disable")); 7011 MDNode *DisableNode = MDNode::get(Context, DisableOperands); 7012 MDs.push_back(DisableNode); 7013 MDNode *NewLoopID = MDNode::get(Context, MDs); 7014 // Set operand 0 to refer to the loop id itself. 7015 NewLoopID->replaceOperandWith(0, NewLoopID); 7016 L->setLoopID(NewLoopID); 7017 } 7018 } 7019 7020 bool LoopVectorizePass::processLoop(Loop *L) { 7021 assert(L->empty() && "Only process inner loops."); 7022 7023 #ifndef NDEBUG 7024 const std::string DebugLocStr = getDebugLocString(L); 7025 #endif /* NDEBUG */ 7026 7027 DEBUG(dbgs() << "\nLV: Checking a loop in \"" 7028 << L->getHeader()->getParent()->getName() << "\" from " 7029 << DebugLocStr << "\n"); 7030 7031 LoopVectorizeHints Hints(L, DisableUnrolling, *ORE); 7032 7033 DEBUG(dbgs() << "LV: Loop hints:" 7034 << " force=" 7035 << (Hints.getForce() == LoopVectorizeHints::FK_Disabled 7036 ? "disabled" 7037 : (Hints.getForce() == LoopVectorizeHints::FK_Enabled 7038 ? "enabled" 7039 : "?")) 7040 << " width=" << Hints.getWidth() 7041 << " unroll=" << Hints.getInterleave() << "\n"); 7042 7043 // Function containing loop 7044 Function *F = L->getHeader()->getParent(); 7045 7046 // Looking at the diagnostic output is the only way to determine if a loop 7047 // was vectorized (other than looking at the IR or machine code), so it 7048 // is important to generate an optimization remark for each loop. Most of 7049 // these messages are generated as OptimizationRemarkAnalysis. Remarks 7050 // generated as OptimizationRemark and OptimizationRemarkMissed are 7051 // less verbose reporting vectorized loops and unvectorized loops that may 7052 // benefit from vectorization, respectively. 7053 7054 if (!Hints.allowVectorization(F, L, AlwaysVectorize)) { 7055 DEBUG(dbgs() << "LV: Loop hints prevent vectorization.\n"); 7056 return false; 7057 } 7058 7059 // Check the loop for a trip count threshold: 7060 // do not vectorize loops with a tiny trip count. 7061 const unsigned TC = SE->getSmallConstantTripCount(L); 7062 if (TC > 0u && TC < TinyTripCountVectorThreshold) { 7063 DEBUG(dbgs() << "LV: Found a loop with a very small trip count. " 7064 << "This loop is not worth vectorizing."); 7065 if (Hints.getForce() == LoopVectorizeHints::FK_Enabled) 7066 DEBUG(dbgs() << " But vectorizing was explicitly forced.\n"); 7067 else { 7068 DEBUG(dbgs() << "\n"); 7069 ORE->emit(createMissedAnalysis(Hints.vectorizeAnalysisPassName(), 7070 "NotBeneficial", L) 7071 << "vectorization is not beneficial " 7072 "and is not explicitly forced"); 7073 return false; 7074 } 7075 } 7076 7077 PredicatedScalarEvolution PSE(*SE, *L); 7078 7079 // Check if it is legal to vectorize the loop. 7080 LoopVectorizationRequirements Requirements(*ORE); 7081 LoopVectorizationLegality LVL(L, PSE, DT, TLI, AA, F, TTI, GetLAA, LI, ORE, 7082 &Requirements, &Hints); 7083 if (!LVL.canVectorize()) { 7084 DEBUG(dbgs() << "LV: Not vectorizing: Cannot prove legality.\n"); 7085 emitMissedWarning(F, L, Hints, ORE); 7086 return false; 7087 } 7088 7089 // Use the cost model. 7090 LoopVectorizationCostModel CM(L, PSE, LI, &LVL, *TTI, TLI, DB, AC, ORE, F, 7091 &Hints); 7092 CM.collectValuesToIgnore(); 7093 7094 // Check the function attributes to find out if this function should be 7095 // optimized for size. 7096 bool OptForSize = 7097 Hints.getForce() != LoopVectorizeHints::FK_Enabled && F->optForSize(); 7098 7099 // Compute the weighted frequency of this loop being executed and see if it 7100 // is less than 20% of the function entry baseline frequency. Note that we 7101 // always have a canonical loop here because we think we *can* vectorize. 7102 // FIXME: This is hidden behind a flag due to pervasive problems with 7103 // exactly what block frequency models. 7104 if (LoopVectorizeWithBlockFrequency) { 7105 BlockFrequency LoopEntryFreq = BFI->getBlockFreq(L->getLoopPreheader()); 7106 if (Hints.getForce() != LoopVectorizeHints::FK_Enabled && 7107 LoopEntryFreq < ColdEntryFreq) 7108 OptForSize = true; 7109 } 7110 7111 // Check the function attributes to see if implicit floats are allowed. 7112 // FIXME: This check doesn't seem possibly correct -- what if the loop is 7113 // an integer loop and the vector instructions selected are purely integer 7114 // vector instructions? 7115 if (F->hasFnAttribute(Attribute::NoImplicitFloat)) { 7116 DEBUG(dbgs() << "LV: Can't vectorize when the NoImplicitFloat" 7117 "attribute is used.\n"); 7118 ORE->emit(createMissedAnalysis(Hints.vectorizeAnalysisPassName(), 7119 "NoImplicitFloat", L) 7120 << "loop not vectorized due to NoImplicitFloat attribute"); 7121 emitMissedWarning(F, L, Hints, ORE); 7122 return false; 7123 } 7124 7125 // Check if the target supports potentially unsafe FP vectorization. 7126 // FIXME: Add a check for the type of safety issue (denormal, signaling) 7127 // for the target we're vectorizing for, to make sure none of the 7128 // additional fp-math flags can help. 7129 if (Hints.isPotentiallyUnsafe() && 7130 TTI->isFPVectorizationPotentiallyUnsafe()) { 7131 DEBUG(dbgs() << "LV: Potentially unsafe FP op prevents vectorization.\n"); 7132 ORE->emit( 7133 createMissedAnalysis(Hints.vectorizeAnalysisPassName(), "UnsafeFP", L) 7134 << "loop not vectorized due to unsafe FP support."); 7135 emitMissedWarning(F, L, Hints, ORE); 7136 return false; 7137 } 7138 7139 // Select the optimal vectorization factor. 7140 const LoopVectorizationCostModel::VectorizationFactor VF = 7141 CM.selectVectorizationFactor(OptForSize); 7142 7143 // Select the interleave count. 7144 unsigned IC = CM.selectInterleaveCount(OptForSize, VF.Width, VF.Cost); 7145 7146 // Get user interleave count. 7147 unsigned UserIC = Hints.getInterleave(); 7148 7149 // Identify the diagnostic messages that should be produced. 7150 std::pair<StringRef, std::string> VecDiagMsg, IntDiagMsg; 7151 bool VectorizeLoop = true, InterleaveLoop = true; 7152 if (Requirements.doesNotMeet(F, L, Hints)) { 7153 DEBUG(dbgs() << "LV: Not vectorizing: loop did not meet vectorization " 7154 "requirements.\n"); 7155 emitMissedWarning(F, L, Hints, ORE); 7156 return false; 7157 } 7158 7159 if (VF.Width == 1) { 7160 DEBUG(dbgs() << "LV: Vectorization is possible but not beneficial.\n"); 7161 VecDiagMsg = std::make_pair( 7162 "VectorizationNotBeneficial", 7163 "the cost-model indicates that vectorization is not beneficial"); 7164 VectorizeLoop = false; 7165 } 7166 7167 if (IC == 1 && UserIC <= 1) { 7168 // Tell the user interleaving is not beneficial. 7169 DEBUG(dbgs() << "LV: Interleaving is not beneficial.\n"); 7170 IntDiagMsg = std::make_pair( 7171 "InterleavingNotBeneficial", 7172 "the cost-model indicates that interleaving is not beneficial"); 7173 InterleaveLoop = false; 7174 if (UserIC == 1) { 7175 IntDiagMsg.first = "InterleavingNotBeneficialAndDisabled"; 7176 IntDiagMsg.second += 7177 " and is explicitly disabled or interleave count is set to 1"; 7178 } 7179 } else if (IC > 1 && UserIC == 1) { 7180 // Tell the user interleaving is beneficial, but it explicitly disabled. 7181 DEBUG(dbgs() 7182 << "LV: Interleaving is beneficial but is explicitly disabled."); 7183 IntDiagMsg = std::make_pair( 7184 "InterleavingBeneficialButDisabled", 7185 "the cost-model indicates that interleaving is beneficial " 7186 "but is explicitly disabled or interleave count is set to 1"); 7187 InterleaveLoop = false; 7188 } 7189 7190 // Override IC if user provided an interleave count. 7191 IC = UserIC > 0 ? UserIC : IC; 7192 7193 // Emit diagnostic messages, if any. 7194 const char *VAPassName = Hints.vectorizeAnalysisPassName(); 7195 if (!VectorizeLoop && !InterleaveLoop) { 7196 // Do not vectorize or interleaving the loop. 7197 ORE->emit(OptimizationRemarkAnalysis(VAPassName, VecDiagMsg.first, 7198 L->getStartLoc(), L->getHeader()) 7199 << VecDiagMsg.second); 7200 ORE->emit(OptimizationRemarkAnalysis(LV_NAME, IntDiagMsg.first, 7201 L->getStartLoc(), L->getHeader()) 7202 << IntDiagMsg.second); 7203 return false; 7204 } else if (!VectorizeLoop && InterleaveLoop) { 7205 DEBUG(dbgs() << "LV: Interleave Count is " << IC << '\n'); 7206 ORE->emit(OptimizationRemarkAnalysis(VAPassName, VecDiagMsg.first, 7207 L->getStartLoc(), L->getHeader()) 7208 << VecDiagMsg.second); 7209 } else if (VectorizeLoop && !InterleaveLoop) { 7210 DEBUG(dbgs() << "LV: Found a vectorizable loop (" << VF.Width << ") in " 7211 << DebugLocStr << '\n'); 7212 ORE->emit(OptimizationRemarkAnalysis(LV_NAME, IntDiagMsg.first, 7213 L->getStartLoc(), L->getHeader()) 7214 << IntDiagMsg.second); 7215 } else if (VectorizeLoop && InterleaveLoop) { 7216 DEBUG(dbgs() << "LV: Found a vectorizable loop (" << VF.Width << ") in " 7217 << DebugLocStr << '\n'); 7218 DEBUG(dbgs() << "LV: Interleave Count is " << IC << '\n'); 7219 } 7220 7221 using namespace ore; 7222 if (!VectorizeLoop) { 7223 assert(IC > 1 && "interleave count should not be 1 or 0"); 7224 // If we decided that it is not legal to vectorize the loop, then 7225 // interleave it. 7226 InnerLoopUnroller Unroller(L, PSE, LI, DT, TLI, TTI, AC, ORE, IC, &LVL, 7227 &CM); 7228 Unroller.vectorize(); 7229 7230 ORE->emit(OptimizationRemark(LV_NAME, "Interleaved", L->getStartLoc(), 7231 L->getHeader()) 7232 << "interleaved loop (interleaved count: " 7233 << NV("InterleaveCount", IC) << ")"); 7234 } else { 7235 // If we decided that it is *legal* to vectorize the loop, then do it. 7236 InnerLoopVectorizer LB(L, PSE, LI, DT, TLI, TTI, AC, ORE, VF.Width, IC, 7237 &LVL, &CM); 7238 LB.vectorize(); 7239 ++LoopsVectorized; 7240 7241 // Add metadata to disable runtime unrolling a scalar loop when there are 7242 // no runtime checks about strides and memory. A scalar loop that is 7243 // rarely used is not worth unrolling. 7244 if (!LB.areSafetyChecksAdded()) 7245 AddRuntimeUnrollDisableMetaData(L); 7246 7247 // Report the vectorization decision. 7248 ORE->emit(OptimizationRemark(LV_NAME, "Vectorized", L->getStartLoc(), 7249 L->getHeader()) 7250 << "vectorized loop (vectorization width: " 7251 << NV("VectorizationFactor", VF.Width) 7252 << ", interleaved count: " << NV("InterleaveCount", IC) << ")"); 7253 } 7254 7255 // Mark the loop as already vectorized to avoid vectorizing again. 7256 Hints.setAlreadyVectorized(); 7257 7258 DEBUG(verifyFunction(*L->getHeader()->getParent())); 7259 return true; 7260 } 7261 7262 bool LoopVectorizePass::runImpl( 7263 Function &F, ScalarEvolution &SE_, LoopInfo &LI_, TargetTransformInfo &TTI_, 7264 DominatorTree &DT_, BlockFrequencyInfo &BFI_, TargetLibraryInfo *TLI_, 7265 DemandedBits &DB_, AliasAnalysis &AA_, AssumptionCache &AC_, 7266 std::function<const LoopAccessInfo &(Loop &)> &GetLAA_, 7267 OptimizationRemarkEmitter &ORE_) { 7268 7269 SE = &SE_; 7270 LI = &LI_; 7271 TTI = &TTI_; 7272 DT = &DT_; 7273 BFI = &BFI_; 7274 TLI = TLI_; 7275 AA = &AA_; 7276 AC = &AC_; 7277 GetLAA = &GetLAA_; 7278 DB = &DB_; 7279 ORE = &ORE_; 7280 7281 // Compute some weights outside of the loop over the loops. Compute this 7282 // using a BranchProbability to re-use its scaling math. 7283 const BranchProbability ColdProb(1, 5); // 20% 7284 ColdEntryFreq = BlockFrequency(BFI->getEntryFreq()) * ColdProb; 7285 7286 // Don't attempt if 7287 // 1. the target claims to have no vector registers, and 7288 // 2. interleaving won't help ILP. 7289 // 7290 // The second condition is necessary because, even if the target has no 7291 // vector registers, loop vectorization may still enable scalar 7292 // interleaving. 7293 if (!TTI->getNumberOfRegisters(true) && TTI->getMaxInterleaveFactor(1) < 2) 7294 return false; 7295 7296 // Build up a worklist of inner-loops to vectorize. This is necessary as 7297 // the act of vectorizing or partially unrolling a loop creates new loops 7298 // and can invalidate iterators across the loops. 7299 SmallVector<Loop *, 8> Worklist; 7300 7301 for (Loop *L : *LI) 7302 addAcyclicInnerLoop(*L, Worklist); 7303 7304 LoopsAnalyzed += Worklist.size(); 7305 7306 // Now walk the identified inner loops. 7307 bool Changed = false; 7308 while (!Worklist.empty()) 7309 Changed |= processLoop(Worklist.pop_back_val()); 7310 7311 // Process each loop nest in the function. 7312 return Changed; 7313 7314 } 7315 7316 7317 PreservedAnalyses LoopVectorizePass::run(Function &F, 7318 FunctionAnalysisManager &AM) { 7319 auto &SE = AM.getResult<ScalarEvolutionAnalysis>(F); 7320 auto &LI = AM.getResult<LoopAnalysis>(F); 7321 auto &TTI = AM.getResult<TargetIRAnalysis>(F); 7322 auto &DT = AM.getResult<DominatorTreeAnalysis>(F); 7323 auto &BFI = AM.getResult<BlockFrequencyAnalysis>(F); 7324 auto *TLI = AM.getCachedResult<TargetLibraryAnalysis>(F); 7325 auto &AA = AM.getResult<AAManager>(F); 7326 auto &AC = AM.getResult<AssumptionAnalysis>(F); 7327 auto &DB = AM.getResult<DemandedBitsAnalysis>(F); 7328 auto &ORE = AM.getResult<OptimizationRemarkEmitterAnalysis>(F); 7329 7330 auto &LAM = AM.getResult<LoopAnalysisManagerFunctionProxy>(F).getManager(); 7331 std::function<const LoopAccessInfo &(Loop &)> GetLAA = 7332 [&](Loop &L) -> const LoopAccessInfo & { 7333 return LAM.getResult<LoopAccessAnalysis>(L); 7334 }; 7335 bool Changed = 7336 runImpl(F, SE, LI, TTI, DT, BFI, TLI, DB, AA, AC, GetLAA, ORE); 7337 if (!Changed) 7338 return PreservedAnalyses::all(); 7339 PreservedAnalyses PA; 7340 PA.preserve<LoopAnalysis>(); 7341 PA.preserve<DominatorTreeAnalysis>(); 7342 PA.preserve<BasicAA>(); 7343 PA.preserve<GlobalsAA>(); 7344 return PA; 7345 } 7346