1 //===- LoopVectorize.cpp - A Loop Vectorizer ------------------------------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // This is the LLVM loop vectorizer. This pass modifies 'vectorizable' loops 11 // and generates target-independent LLVM-IR. 12 // The vectorizer uses the TargetTransformInfo analysis to estimate the costs 13 // of instructions in order to estimate the profitability of vectorization. 14 // 15 // The loop vectorizer combines consecutive loop iterations into a single 16 // 'wide' iteration. After this transformation the index is incremented 17 // by the SIMD vector width, and not by one. 18 // 19 // This pass has three parts: 20 // 1. The main loop pass that drives the different parts. 21 // 2. LoopVectorizationLegality - A unit that checks for the legality 22 // of the vectorization. 23 // 3. InnerLoopVectorizer - A unit that performs the actual 24 // widening of instructions. 25 // 4. LoopVectorizationCostModel - A unit that checks for the profitability 26 // of vectorization. It decides on the optimal vector width, which 27 // can be one, if vectorization is not profitable. 28 // 29 //===----------------------------------------------------------------------===// 30 // 31 // The reduction-variable vectorization is based on the paper: 32 // D. Nuzman and R. Henderson. Multi-platform Auto-vectorization. 33 // 34 // Variable uniformity checks are inspired by: 35 // Karrenberg, R. and Hack, S. Whole Function Vectorization. 36 // 37 // The interleaved access vectorization is based on the paper: 38 // Dorit Nuzman, Ira Rosen and Ayal Zaks. Auto-Vectorization of Interleaved 39 // Data for SIMD 40 // 41 // Other ideas/concepts are from: 42 // A. Zaks and D. Nuzman. Autovectorization in GCC-two years later. 43 // 44 // S. Maleki, Y. Gao, M. Garzaran, T. Wong and D. Padua. An Evaluation of 45 // Vectorizing Compilers. 46 // 47 //===----------------------------------------------------------------------===// 48 49 #include "llvm/Transforms/Vectorize/LoopVectorize.h" 50 #include "llvm/ADT/DenseMap.h" 51 #include "llvm/ADT/Hashing.h" 52 #include "llvm/ADT/MapVector.h" 53 #include "llvm/ADT/SCCIterator.h" 54 #include "llvm/ADT/SetVector.h" 55 #include "llvm/ADT/SmallPtrSet.h" 56 #include "llvm/ADT/SmallSet.h" 57 #include "llvm/ADT/SmallVector.h" 58 #include "llvm/ADT/Statistic.h" 59 #include "llvm/ADT/StringExtras.h" 60 #include "llvm/Analysis/CodeMetrics.h" 61 #include "llvm/Analysis/GlobalsModRef.h" 62 #include "llvm/Analysis/LoopInfo.h" 63 #include "llvm/Analysis/LoopIterator.h" 64 #include "llvm/Analysis/LoopPass.h" 65 #include "llvm/Analysis/ScalarEvolutionExpander.h" 66 #include "llvm/Analysis/ScalarEvolutionExpressions.h" 67 #include "llvm/Analysis/ValueTracking.h" 68 #include "llvm/Analysis/VectorUtils.h" 69 #include "llvm/IR/Constants.h" 70 #include "llvm/IR/DataLayout.h" 71 #include "llvm/IR/DebugInfo.h" 72 #include "llvm/IR/DerivedTypes.h" 73 #include "llvm/IR/DiagnosticInfo.h" 74 #include "llvm/IR/Dominators.h" 75 #include "llvm/IR/Function.h" 76 #include "llvm/IR/IRBuilder.h" 77 #include "llvm/IR/Instructions.h" 78 #include "llvm/IR/IntrinsicInst.h" 79 #include "llvm/IR/LLVMContext.h" 80 #include "llvm/IR/Module.h" 81 #include "llvm/IR/PatternMatch.h" 82 #include "llvm/IR/Type.h" 83 #include "llvm/IR/User.h" 84 #include "llvm/IR/Value.h" 85 #include "llvm/IR/ValueHandle.h" 86 #include "llvm/IR/Verifier.h" 87 #include "llvm/Pass.h" 88 #include "llvm/Support/BranchProbability.h" 89 #include "llvm/Support/CommandLine.h" 90 #include "llvm/Support/Debug.h" 91 #include "llvm/Support/raw_ostream.h" 92 #include "llvm/Transforms/Scalar.h" 93 #include "llvm/Transforms/Utils/BasicBlockUtils.h" 94 #include "llvm/Transforms/Utils/Local.h" 95 #include "llvm/Transforms/Utils/LoopSimplify.h" 96 #include "llvm/Transforms/Utils/LoopUtils.h" 97 #include "llvm/Transforms/Utils/LoopVersioning.h" 98 #include "llvm/Transforms/Vectorize.h" 99 #include <algorithm> 100 #include <map> 101 #include <tuple> 102 103 using namespace llvm; 104 using namespace llvm::PatternMatch; 105 106 #define LV_NAME "loop-vectorize" 107 #define DEBUG_TYPE LV_NAME 108 109 STATISTIC(LoopsVectorized, "Number of loops vectorized"); 110 STATISTIC(LoopsAnalyzed, "Number of loops analyzed for vectorization"); 111 112 static cl::opt<bool> 113 EnableIfConversion("enable-if-conversion", cl::init(true), cl::Hidden, 114 cl::desc("Enable if-conversion during vectorization.")); 115 116 /// We don't vectorize loops with a known constant trip count below this number. 117 static cl::opt<unsigned> TinyTripCountVectorThreshold( 118 "vectorizer-min-trip-count", cl::init(16), cl::Hidden, 119 cl::desc("Don't vectorize loops with a constant " 120 "trip count that is smaller than this " 121 "value.")); 122 123 static cl::opt<bool> MaximizeBandwidth( 124 "vectorizer-maximize-bandwidth", cl::init(false), cl::Hidden, 125 cl::desc("Maximize bandwidth when selecting vectorization factor which " 126 "will be determined by the smallest type in loop.")); 127 128 static cl::opt<bool> EnableInterleavedMemAccesses( 129 "enable-interleaved-mem-accesses", cl::init(false), cl::Hidden, 130 cl::desc("Enable vectorization on interleaved memory accesses in a loop")); 131 132 /// Maximum factor for an interleaved memory access. 133 static cl::opt<unsigned> MaxInterleaveGroupFactor( 134 "max-interleave-group-factor", cl::Hidden, 135 cl::desc("Maximum factor for an interleaved access group (default = 8)"), 136 cl::init(8)); 137 138 /// We don't interleave loops with a known constant trip count below this 139 /// number. 140 static const unsigned TinyTripCountInterleaveThreshold = 128; 141 142 static cl::opt<unsigned> ForceTargetNumScalarRegs( 143 "force-target-num-scalar-regs", cl::init(0), cl::Hidden, 144 cl::desc("A flag that overrides the target's number of scalar registers.")); 145 146 static cl::opt<unsigned> ForceTargetNumVectorRegs( 147 "force-target-num-vector-regs", cl::init(0), cl::Hidden, 148 cl::desc("A flag that overrides the target's number of vector registers.")); 149 150 /// Maximum vectorization interleave count. 151 static const unsigned MaxInterleaveFactor = 16; 152 153 static cl::opt<unsigned> ForceTargetMaxScalarInterleaveFactor( 154 "force-target-max-scalar-interleave", cl::init(0), cl::Hidden, 155 cl::desc("A flag that overrides the target's max interleave factor for " 156 "scalar loops.")); 157 158 static cl::opt<unsigned> ForceTargetMaxVectorInterleaveFactor( 159 "force-target-max-vector-interleave", cl::init(0), cl::Hidden, 160 cl::desc("A flag that overrides the target's max interleave factor for " 161 "vectorized loops.")); 162 163 static cl::opt<unsigned> ForceTargetInstructionCost( 164 "force-target-instruction-cost", cl::init(0), cl::Hidden, 165 cl::desc("A flag that overrides the target's expected cost for " 166 "an instruction to a single constant value. Mostly " 167 "useful for getting consistent testing.")); 168 169 static cl::opt<unsigned> SmallLoopCost( 170 "small-loop-cost", cl::init(20), cl::Hidden, 171 cl::desc( 172 "The cost of a loop that is considered 'small' by the interleaver.")); 173 174 static cl::opt<bool> LoopVectorizeWithBlockFrequency( 175 "loop-vectorize-with-block-frequency", cl::init(false), cl::Hidden, 176 cl::desc("Enable the use of the block frequency analysis to access PGO " 177 "heuristics minimizing code growth in cold regions and being more " 178 "aggressive in hot regions.")); 179 180 // Runtime interleave loops for load/store throughput. 181 static cl::opt<bool> EnableLoadStoreRuntimeInterleave( 182 "enable-loadstore-runtime-interleave", cl::init(true), cl::Hidden, 183 cl::desc( 184 "Enable runtime interleaving until load/store ports are saturated")); 185 186 /// The number of stores in a loop that are allowed to need predication. 187 static cl::opt<unsigned> NumberOfStoresToPredicate( 188 "vectorize-num-stores-pred", cl::init(1), cl::Hidden, 189 cl::desc("Max number of stores to be predicated behind an if.")); 190 191 static cl::opt<bool> EnableIndVarRegisterHeur( 192 "enable-ind-var-reg-heur", cl::init(true), cl::Hidden, 193 cl::desc("Count the induction variable only once when interleaving")); 194 195 static cl::opt<bool> EnableCondStoresVectorization( 196 "enable-cond-stores-vec", cl::init(true), cl::Hidden, 197 cl::desc("Enable if predication of stores during vectorization.")); 198 199 static cl::opt<unsigned> MaxNestedScalarReductionIC( 200 "max-nested-scalar-reduction-interleave", cl::init(2), cl::Hidden, 201 cl::desc("The maximum interleave count to use when interleaving a scalar " 202 "reduction in a nested loop.")); 203 204 static cl::opt<unsigned> PragmaVectorizeMemoryCheckThreshold( 205 "pragma-vectorize-memory-check-threshold", cl::init(128), cl::Hidden, 206 cl::desc("The maximum allowed number of runtime memory checks with a " 207 "vectorize(enable) pragma.")); 208 209 static cl::opt<unsigned> VectorizeSCEVCheckThreshold( 210 "vectorize-scev-check-threshold", cl::init(16), cl::Hidden, 211 cl::desc("The maximum number of SCEV checks allowed.")); 212 213 static cl::opt<unsigned> PragmaVectorizeSCEVCheckThreshold( 214 "pragma-vectorize-scev-check-threshold", cl::init(128), cl::Hidden, 215 cl::desc("The maximum number of SCEV checks allowed with a " 216 "vectorize(enable) pragma")); 217 218 /// Create an analysis remark that explains why vectorization failed 219 /// 220 /// \p PassName is the name of the pass (e.g. can be AlwaysPrint). \p 221 /// RemarkName is the identifier for the remark. If \p I is passed it is an 222 /// instruction that prevents vectorization. Otherwise \p TheLoop is used for 223 /// the location of the remark. \return the remark object that can be 224 /// streamed to. 225 static OptimizationRemarkAnalysis 226 createMissedAnalysis(const char *PassName, StringRef RemarkName, Loop *TheLoop, 227 Instruction *I = nullptr) { 228 Value *CodeRegion = TheLoop->getHeader(); 229 DebugLoc DL = TheLoop->getStartLoc(); 230 231 if (I) { 232 CodeRegion = I->getParent(); 233 // If there is no debug location attached to the instruction, revert back to 234 // using the loop's. 235 if (I->getDebugLoc()) 236 DL = I->getDebugLoc(); 237 } 238 239 OptimizationRemarkAnalysis R(PassName, RemarkName, DL, CodeRegion); 240 R << "loop not vectorized: "; 241 return R; 242 } 243 244 namespace { 245 246 // Forward declarations. 247 class LoopVectorizeHints; 248 class LoopVectorizationLegality; 249 class LoopVectorizationCostModel; 250 class LoopVectorizationRequirements; 251 252 /// Returns true if the given loop body has a cycle, excluding the loop 253 /// itself. 254 static bool hasCyclesInLoopBody(const Loop &L) { 255 if (!L.empty()) 256 return true; 257 258 for (const auto &SCC : 259 make_range(scc_iterator<Loop, LoopBodyTraits>::begin(L), 260 scc_iterator<Loop, LoopBodyTraits>::end(L))) { 261 if (SCC.size() > 1) { 262 DEBUG(dbgs() << "LVL: Detected a cycle in the loop body:\n"); 263 DEBUG(L.dump()); 264 return true; 265 } 266 } 267 return false; 268 } 269 270 /// \brief This modifies LoopAccessReport to initialize message with 271 /// loop-vectorizer-specific part. 272 class VectorizationReport : public LoopAccessReport { 273 public: 274 VectorizationReport(Instruction *I = nullptr) 275 : LoopAccessReport("loop not vectorized: ", I) {} 276 277 /// \brief This allows promotion of the loop-access analysis report into the 278 /// loop-vectorizer report. It modifies the message to add the 279 /// loop-vectorizer-specific part of the message. 280 explicit VectorizationReport(const LoopAccessReport &R) 281 : LoopAccessReport(Twine("loop not vectorized: ") + R.str(), 282 R.getInstr()) {} 283 }; 284 285 /// A helper function for converting Scalar types to vector types. 286 /// If the incoming type is void, we return void. If the VF is 1, we return 287 /// the scalar type. 288 static Type *ToVectorTy(Type *Scalar, unsigned VF) { 289 if (Scalar->isVoidTy() || VF == 1) 290 return Scalar; 291 return VectorType::get(Scalar, VF); 292 } 293 294 /// A helper function that returns GEP instruction and knows to skip a 295 /// 'bitcast'. The 'bitcast' may be skipped if the source and the destination 296 /// pointee types of the 'bitcast' have the same size. 297 /// For example: 298 /// bitcast double** %var to i64* - can be skipped 299 /// bitcast double** %var to i8* - can not 300 static GetElementPtrInst *getGEPInstruction(Value *Ptr) { 301 302 if (isa<GetElementPtrInst>(Ptr)) 303 return cast<GetElementPtrInst>(Ptr); 304 305 if (isa<BitCastInst>(Ptr) && 306 isa<GetElementPtrInst>(cast<BitCastInst>(Ptr)->getOperand(0))) { 307 Type *BitcastTy = Ptr->getType(); 308 Type *GEPTy = cast<BitCastInst>(Ptr)->getSrcTy(); 309 if (!isa<PointerType>(BitcastTy) || !isa<PointerType>(GEPTy)) 310 return nullptr; 311 Type *Pointee1Ty = cast<PointerType>(BitcastTy)->getPointerElementType(); 312 Type *Pointee2Ty = cast<PointerType>(GEPTy)->getPointerElementType(); 313 const DataLayout &DL = cast<BitCastInst>(Ptr)->getModule()->getDataLayout(); 314 if (DL.getTypeSizeInBits(Pointee1Ty) == DL.getTypeSizeInBits(Pointee2Ty)) 315 return cast<GetElementPtrInst>(cast<BitCastInst>(Ptr)->getOperand(0)); 316 } 317 return nullptr; 318 } 319 320 /// A helper function that returns the pointer operand of a load or store 321 /// instruction. 322 static Value *getPointerOperand(Value *I) { 323 if (auto *LI = dyn_cast<LoadInst>(I)) 324 return LI->getPointerOperand(); 325 if (auto *SI = dyn_cast<StoreInst>(I)) 326 return SI->getPointerOperand(); 327 return nullptr; 328 } 329 330 /// A helper function that returns true if the given type is irregular. The 331 /// type is irregular if its allocated size doesn't equal the store size of an 332 /// element of the corresponding vector type at the given vectorization factor. 333 static bool hasIrregularType(Type *Ty, const DataLayout &DL, unsigned VF) { 334 335 // Determine if an array of VF elements of type Ty is "bitcast compatible" 336 // with a <VF x Ty> vector. 337 if (VF > 1) { 338 auto *VectorTy = VectorType::get(Ty, VF); 339 return VF * DL.getTypeAllocSize(Ty) != DL.getTypeStoreSize(VectorTy); 340 } 341 342 // If the vectorization factor is one, we just check if an array of type Ty 343 // requires padding between elements. 344 return DL.getTypeAllocSizeInBits(Ty) != DL.getTypeSizeInBits(Ty); 345 } 346 347 /// A helper function that returns the reciprocal of the block probability of 348 /// predicated blocks. If we return X, we are assuming the predicated block 349 /// will execute once for for every X iterations of the loop header. 350 /// 351 /// TODO: We should use actual block probability here, if available. Currently, 352 /// we always assume predicated blocks have a 50% chance of executing. 353 static unsigned getReciprocalPredBlockProb() { return 2; } 354 355 /// InnerLoopVectorizer vectorizes loops which contain only one basic 356 /// block to a specified vectorization factor (VF). 357 /// This class performs the widening of scalars into vectors, or multiple 358 /// scalars. This class also implements the following features: 359 /// * It inserts an epilogue loop for handling loops that don't have iteration 360 /// counts that are known to be a multiple of the vectorization factor. 361 /// * It handles the code generation for reduction variables. 362 /// * Scalarization (implementation using scalars) of un-vectorizable 363 /// instructions. 364 /// InnerLoopVectorizer does not perform any vectorization-legality 365 /// checks, and relies on the caller to check for the different legality 366 /// aspects. The InnerLoopVectorizer relies on the 367 /// LoopVectorizationLegality class to provide information about the induction 368 /// and reduction variables that were found to a given vectorization factor. 369 class InnerLoopVectorizer { 370 public: 371 InnerLoopVectorizer(Loop *OrigLoop, PredicatedScalarEvolution &PSE, 372 LoopInfo *LI, DominatorTree *DT, 373 const TargetLibraryInfo *TLI, 374 const TargetTransformInfo *TTI, AssumptionCache *AC, 375 OptimizationRemarkEmitter *ORE, unsigned VecWidth, 376 unsigned UnrollFactor, LoopVectorizationLegality *LVL, 377 LoopVectorizationCostModel *CM) 378 : OrigLoop(OrigLoop), PSE(PSE), LI(LI), DT(DT), TLI(TLI), TTI(TTI), 379 AC(AC), ORE(ORE), VF(VecWidth), UF(UnrollFactor), 380 Builder(PSE.getSE()->getContext()), Induction(nullptr), 381 OldInduction(nullptr), VectorLoopValueMap(UnrollFactor, VecWidth), 382 TripCount(nullptr), VectorTripCount(nullptr), Legal(LVL), Cost(CM), 383 AddedSafetyChecks(false) {} 384 385 // Perform the actual loop widening (vectorization). 386 void vectorize() { 387 // Create a new empty loop. Unlink the old loop and connect the new one. 388 createEmptyLoop(); 389 // Widen each instruction in the old loop to a new one in the new loop. 390 vectorizeLoop(); 391 } 392 393 // Return true if any runtime check is added. 394 bool areSafetyChecksAdded() { return AddedSafetyChecks; } 395 396 virtual ~InnerLoopVectorizer() {} 397 398 protected: 399 /// A small list of PHINodes. 400 typedef SmallVector<PHINode *, 4> PhiVector; 401 402 /// A type for vectorized values in the new loop. Each value from the 403 /// original loop, when vectorized, is represented by UF vector values in the 404 /// new unrolled loop, where UF is the unroll factor. 405 typedef SmallVector<Value *, 2> VectorParts; 406 407 /// A type for scalarized values in the new loop. Each value from the 408 /// original loop, when scalarized, is represented by UF x VF scalar values 409 /// in the new unrolled loop, where UF is the unroll factor and VF is the 410 /// vectorization factor. 411 typedef SmallVector<SmallVector<Value *, 4>, 2> ScalarParts; 412 413 // When we if-convert we need to create edge masks. We have to cache values 414 // so that we don't end up with exponential recursion/IR. 415 typedef DenseMap<std::pair<BasicBlock *, BasicBlock *>, VectorParts> 416 EdgeMaskCache; 417 418 /// Create an empty loop, based on the loop ranges of the old loop. 419 void createEmptyLoop(); 420 421 /// Set up the values of the IVs correctly when exiting the vector loop. 422 void fixupIVUsers(PHINode *OrigPhi, const InductionDescriptor &II, 423 Value *CountRoundDown, Value *EndValue, 424 BasicBlock *MiddleBlock); 425 426 /// Create a new induction variable inside L. 427 PHINode *createInductionVariable(Loop *L, Value *Start, Value *End, 428 Value *Step, Instruction *DL); 429 /// Copy and widen the instructions from the old loop. 430 virtual void vectorizeLoop(); 431 432 /// Fix a first-order recurrence. This is the second phase of vectorizing 433 /// this phi node. 434 void fixFirstOrderRecurrence(PHINode *Phi); 435 436 /// \brief The Loop exit block may have single value PHI nodes where the 437 /// incoming value is 'Undef'. While vectorizing we only handled real values 438 /// that were defined inside the loop. Here we fix the 'undef case'. 439 /// See PR14725. 440 void fixLCSSAPHIs(); 441 442 /// Iteratively sink the scalarized operands of a predicated instruction into 443 /// the block that was created for it. 444 void sinkScalarOperands(Instruction *PredInst); 445 446 /// Predicate conditional instructions that require predication on their 447 /// respective conditions. 448 void predicateInstructions(); 449 450 /// Collect the instructions from the original loop that would be trivially 451 /// dead in the vectorized loop if generated. 452 void collectTriviallyDeadInstructions(); 453 454 /// Shrinks vector element sizes to the smallest bitwidth they can be legally 455 /// represented as. 456 void truncateToMinimalBitwidths(); 457 458 /// A helper function that computes the predicate of the block BB, assuming 459 /// that the header block of the loop is set to True. It returns the *entry* 460 /// mask for the block BB. 461 VectorParts createBlockInMask(BasicBlock *BB); 462 /// A helper function that computes the predicate of the edge between SRC 463 /// and DST. 464 VectorParts createEdgeMask(BasicBlock *Src, BasicBlock *Dst); 465 466 /// A helper function to vectorize a single BB within the innermost loop. 467 void vectorizeBlockInLoop(BasicBlock *BB, PhiVector *PV); 468 469 /// Vectorize a single PHINode in a block. This method handles the induction 470 /// variable canonicalization. It supports both VF = 1 for unrolled loops and 471 /// arbitrary length vectors. 472 void widenPHIInstruction(Instruction *PN, unsigned UF, unsigned VF, 473 PhiVector *PV); 474 475 /// Insert the new loop to the loop hierarchy and pass manager 476 /// and update the analysis passes. 477 void updateAnalysis(); 478 479 /// This instruction is un-vectorizable. Implement it as a sequence 480 /// of scalars. If \p IfPredicateInstr is true we need to 'hide' each 481 /// scalarized instruction behind an if block predicated on the control 482 /// dependence of the instruction. 483 virtual void scalarizeInstruction(Instruction *Instr, 484 bool IfPredicateInstr = false); 485 486 /// Vectorize Load and Store instructions, 487 virtual void vectorizeMemoryInstruction(Instruction *Instr); 488 489 /// Create a broadcast instruction. This method generates a broadcast 490 /// instruction (shuffle) for loop invariant values and for the induction 491 /// value. If this is the induction variable then we extend it to N, N+1, ... 492 /// this is needed because each iteration in the loop corresponds to a SIMD 493 /// element. 494 virtual Value *getBroadcastInstrs(Value *V); 495 496 /// This function adds (StartIdx, StartIdx + Step, StartIdx + 2*Step, ...) 497 /// to each vector element of Val. The sequence starts at StartIndex. 498 /// \p Opcode is relevant for FP induction variable. 499 virtual Value *getStepVector(Value *Val, int StartIdx, Value *Step, 500 Instruction::BinaryOps Opcode = 501 Instruction::BinaryOpsEnd); 502 503 /// Compute scalar induction steps. \p ScalarIV is the scalar induction 504 /// variable on which to base the steps, \p Step is the size of the step, and 505 /// \p EntryVal is the value from the original loop that maps to the steps. 506 /// Note that \p EntryVal doesn't have to be an induction variable (e.g., it 507 /// can be a truncate instruction). 508 void buildScalarSteps(Value *ScalarIV, Value *Step, Value *EntryVal); 509 510 /// Create a vector induction phi node based on an existing scalar one. This 511 /// currently only works for integer induction variables with a constant 512 /// step. \p EntryVal is the value from the original loop that maps to the 513 /// vector phi node. If \p EntryVal is a truncate instruction, instead of 514 /// widening the original IV, we widen a version of the IV truncated to \p 515 /// EntryVal's type. 516 void createVectorIntInductionPHI(const InductionDescriptor &II, 517 Instruction *EntryVal); 518 519 /// Widen an integer induction variable \p IV. If \p Trunc is provided, the 520 /// induction variable will first be truncated to the corresponding type. 521 void widenIntInduction(PHINode *IV, TruncInst *Trunc = nullptr); 522 523 /// Returns true if an instruction \p I should be scalarized instead of 524 /// vectorized for the chosen vectorization factor. 525 bool shouldScalarizeInstruction(Instruction *I) const; 526 527 /// Returns true if we should generate a scalar version of \p IV. 528 bool needsScalarInduction(Instruction *IV) const; 529 530 /// Return a constant reference to the VectorParts corresponding to \p V from 531 /// the original loop. If the value has already been vectorized, the 532 /// corresponding vector entry in VectorLoopValueMap is returned. If, 533 /// however, the value has a scalar entry in VectorLoopValueMap, we construct 534 /// new vector values on-demand by inserting the scalar values into vectors 535 /// with an insertelement sequence. If the value has been neither vectorized 536 /// nor scalarized, it must be loop invariant, so we simply broadcast the 537 /// value into vectors. 538 const VectorParts &getVectorValue(Value *V); 539 540 /// Return a value in the new loop corresponding to \p V from the original 541 /// loop at unroll index \p Part and vector index \p Lane. If the value has 542 /// been vectorized but not scalarized, the necessary extractelement 543 /// instruction will be generated. 544 Value *getScalarValue(Value *V, unsigned Part, unsigned Lane); 545 546 /// Try to vectorize the interleaved access group that \p Instr belongs to. 547 void vectorizeInterleaveGroup(Instruction *Instr); 548 549 /// Generate a shuffle sequence that will reverse the vector Vec. 550 virtual Value *reverseVector(Value *Vec); 551 552 /// Returns (and creates if needed) the original loop trip count. 553 Value *getOrCreateTripCount(Loop *NewLoop); 554 555 /// Returns (and creates if needed) the trip count of the widened loop. 556 Value *getOrCreateVectorTripCount(Loop *NewLoop); 557 558 /// Emit a bypass check to see if the trip count would overflow, or we 559 /// wouldn't have enough iterations to execute one vector loop. 560 void emitMinimumIterationCountCheck(Loop *L, BasicBlock *Bypass); 561 /// Emit a bypass check to see if the vector trip count is nonzero. 562 void emitVectorLoopEnteredCheck(Loop *L, BasicBlock *Bypass); 563 /// Emit a bypass check to see if all of the SCEV assumptions we've 564 /// had to make are correct. 565 void emitSCEVChecks(Loop *L, BasicBlock *Bypass); 566 /// Emit bypass checks to check any memory assumptions we may have made. 567 void emitMemRuntimeChecks(Loop *L, BasicBlock *Bypass); 568 569 /// Add additional metadata to \p To that was not present on \p Orig. 570 /// 571 /// Currently this is used to add the noalias annotations based on the 572 /// inserted memchecks. Use this for instructions that are *cloned* into the 573 /// vector loop. 574 void addNewMetadata(Instruction *To, const Instruction *Orig); 575 576 /// Add metadata from one instruction to another. 577 /// 578 /// This includes both the original MDs from \p From and additional ones (\see 579 /// addNewMetadata). Use this for *newly created* instructions in the vector 580 /// loop. 581 void addMetadata(Instruction *To, Instruction *From); 582 583 /// \brief Similar to the previous function but it adds the metadata to a 584 /// vector of instructions. 585 void addMetadata(ArrayRef<Value *> To, Instruction *From); 586 587 /// This is a helper class for maintaining vectorization state. It's used for 588 /// mapping values from the original loop to their corresponding values in 589 /// the new loop. Two mappings are maintained: one for vectorized values and 590 /// one for scalarized values. Vectorized values are represented with UF 591 /// vector values in the new loop, and scalarized values are represented with 592 /// UF x VF scalar values in the new loop. UF and VF are the unroll and 593 /// vectorization factors, respectively. 594 /// 595 /// Entries can be added to either map with initVector and initScalar, which 596 /// initialize and return a constant reference to the new entry. If a 597 /// non-constant reference to a vector entry is required, getVector can be 598 /// used to retrieve a mutable entry. We currently directly modify the mapped 599 /// values during "fix-up" operations that occur once the first phase of 600 /// widening is complete. These operations include type truncation and the 601 /// second phase of recurrence widening. 602 /// 603 /// Otherwise, entries from either map should be accessed using the 604 /// getVectorValue or getScalarValue functions from InnerLoopVectorizer. 605 /// getVectorValue and getScalarValue coordinate to generate a vector or 606 /// scalar value on-demand if one is not yet available. When vectorizing a 607 /// loop, we visit the definition of an instruction before its uses. When 608 /// visiting the definition, we either vectorize or scalarize the 609 /// instruction, creating an entry for it in the corresponding map. (In some 610 /// cases, such as induction variables, we will create both vector and scalar 611 /// entries.) Then, as we encounter uses of the definition, we derive values 612 /// for each scalar or vector use unless such a value is already available. 613 /// For example, if we scalarize a definition and one of its uses is vector, 614 /// we build the required vector on-demand with an insertelement sequence 615 /// when visiting the use. Otherwise, if the use is scalar, we can use the 616 /// existing scalar definition. 617 struct ValueMap { 618 619 /// Construct an empty map with the given unroll and vectorization factors. 620 ValueMap(unsigned UnrollFactor, unsigned VecWidth) 621 : UF(UnrollFactor), VF(VecWidth) { 622 // The unroll and vectorization factors are only used in asserts builds 623 // to verify map entries are sized appropriately. 624 (void)UF; 625 (void)VF; 626 } 627 628 /// \return True if the map has a vector entry for \p Key. 629 bool hasVector(Value *Key) const { return VectorMapStorage.count(Key); } 630 631 /// \return True if the map has a scalar entry for \p Key. 632 bool hasScalar(Value *Key) const { return ScalarMapStorage.count(Key); } 633 634 /// \brief Map \p Key to the given VectorParts \p Entry, and return a 635 /// constant reference to the new vector map entry. The given key should 636 /// not already be in the map, and the given VectorParts should be 637 /// correctly sized for the current unroll factor. 638 const VectorParts &initVector(Value *Key, const VectorParts &Entry) { 639 assert(!hasVector(Key) && "Vector entry already initialized"); 640 assert(Entry.size() == UF && "VectorParts has wrong dimensions"); 641 VectorMapStorage[Key] = Entry; 642 return VectorMapStorage[Key]; 643 } 644 645 /// \brief Map \p Key to the given ScalarParts \p Entry, and return a 646 /// constant reference to the new scalar map entry. The given key should 647 /// not already be in the map, and the given ScalarParts should be 648 /// correctly sized for the current unroll and vectorization factors. 649 const ScalarParts &initScalar(Value *Key, const ScalarParts &Entry) { 650 assert(!hasScalar(Key) && "Scalar entry already initialized"); 651 assert(Entry.size() == UF && 652 all_of(make_range(Entry.begin(), Entry.end()), 653 [&](const SmallVectorImpl<Value *> &Values) -> bool { 654 return Values.size() == VF; 655 }) && 656 "ScalarParts has wrong dimensions"); 657 ScalarMapStorage[Key] = Entry; 658 return ScalarMapStorage[Key]; 659 } 660 661 /// \return A reference to the vector map entry corresponding to \p Key. 662 /// The key should already be in the map. This function should only be used 663 /// when it's necessary to update values that have already been vectorized. 664 /// This is the case for "fix-up" operations including type truncation and 665 /// the second phase of recurrence vectorization. If a non-const reference 666 /// isn't required, getVectorValue should be used instead. 667 VectorParts &getVector(Value *Key) { 668 assert(hasVector(Key) && "Vector entry not initialized"); 669 return VectorMapStorage.find(Key)->second; 670 } 671 672 /// Retrieve an entry from the vector or scalar maps. The preferred way to 673 /// access an existing mapped entry is with getVectorValue or 674 /// getScalarValue from InnerLoopVectorizer. Until those functions can be 675 /// moved inside ValueMap, we have to declare them as friends. 676 friend const VectorParts &InnerLoopVectorizer::getVectorValue(Value *V); 677 friend Value *InnerLoopVectorizer::getScalarValue(Value *V, unsigned Part, 678 unsigned Lane); 679 680 private: 681 /// The unroll factor. Each entry in the vector map contains UF vector 682 /// values. 683 unsigned UF; 684 685 /// The vectorization factor. Each entry in the scalar map contains UF x VF 686 /// scalar values. 687 unsigned VF; 688 689 /// The vector and scalar map storage. We use std::map and not DenseMap 690 /// because insertions to DenseMap invalidate its iterators. 691 std::map<Value *, VectorParts> VectorMapStorage; 692 std::map<Value *, ScalarParts> ScalarMapStorage; 693 }; 694 695 /// The original loop. 696 Loop *OrigLoop; 697 /// A wrapper around ScalarEvolution used to add runtime SCEV checks. Applies 698 /// dynamic knowledge to simplify SCEV expressions and converts them to a 699 /// more usable form. 700 PredicatedScalarEvolution &PSE; 701 /// Loop Info. 702 LoopInfo *LI; 703 /// Dominator Tree. 704 DominatorTree *DT; 705 /// Alias Analysis. 706 AliasAnalysis *AA; 707 /// Target Library Info. 708 const TargetLibraryInfo *TLI; 709 /// Target Transform Info. 710 const TargetTransformInfo *TTI; 711 /// Assumption Cache. 712 AssumptionCache *AC; 713 /// Interface to emit optimization remarks. 714 OptimizationRemarkEmitter *ORE; 715 716 /// \brief LoopVersioning. It's only set up (non-null) if memchecks were 717 /// used. 718 /// 719 /// This is currently only used to add no-alias metadata based on the 720 /// memchecks. The actually versioning is performed manually. 721 std::unique_ptr<LoopVersioning> LVer; 722 723 /// The vectorization SIMD factor to use. Each vector will have this many 724 /// vector elements. 725 unsigned VF; 726 727 protected: 728 /// The vectorization unroll factor to use. Each scalar is vectorized to this 729 /// many different vector instructions. 730 unsigned UF; 731 732 /// The builder that we use 733 IRBuilder<> Builder; 734 735 // --- Vectorization state --- 736 737 /// The vector-loop preheader. 738 BasicBlock *LoopVectorPreHeader; 739 /// The scalar-loop preheader. 740 BasicBlock *LoopScalarPreHeader; 741 /// Middle Block between the vector and the scalar. 742 BasicBlock *LoopMiddleBlock; 743 /// The ExitBlock of the scalar loop. 744 BasicBlock *LoopExitBlock; 745 /// The vector loop body. 746 BasicBlock *LoopVectorBody; 747 /// The scalar loop body. 748 BasicBlock *LoopScalarBody; 749 /// A list of all bypass blocks. The first block is the entry of the loop. 750 SmallVector<BasicBlock *, 4> LoopBypassBlocks; 751 752 /// The new Induction variable which was added to the new block. 753 PHINode *Induction; 754 /// The induction variable of the old basic block. 755 PHINode *OldInduction; 756 757 /// Maps values from the original loop to their corresponding values in the 758 /// vectorized loop. A key value can map to either vector values, scalar 759 /// values or both kinds of values, depending on whether the key was 760 /// vectorized and scalarized. 761 ValueMap VectorLoopValueMap; 762 763 /// Store instructions that should be predicated, as a pair 764 /// <StoreInst, Predicate> 765 SmallVector<std::pair<Instruction *, Value *>, 4> PredicatedInstructions; 766 EdgeMaskCache MaskCache; 767 /// Trip count of the original loop. 768 Value *TripCount; 769 /// Trip count of the widened loop (TripCount - TripCount % (VF*UF)) 770 Value *VectorTripCount; 771 772 /// The legality analysis. 773 LoopVectorizationLegality *Legal; 774 775 /// The profitablity analysis. 776 LoopVectorizationCostModel *Cost; 777 778 // Record whether runtime checks are added. 779 bool AddedSafetyChecks; 780 781 // Holds instructions from the original loop whose counterparts in the 782 // vectorized loop would be trivially dead if generated. For example, 783 // original induction update instructions can become dead because we 784 // separately emit induction "steps" when generating code for the new loop. 785 // Similarly, we create a new latch condition when setting up the structure 786 // of the new loop, so the old one can become dead. 787 SmallPtrSet<Instruction *, 4> DeadInstructions; 788 789 // Holds the end values for each induction variable. We save the end values 790 // so we can later fix-up the external users of the induction variables. 791 DenseMap<PHINode *, Value *> IVEndValues; 792 }; 793 794 class InnerLoopUnroller : public InnerLoopVectorizer { 795 public: 796 InnerLoopUnroller(Loop *OrigLoop, PredicatedScalarEvolution &PSE, 797 LoopInfo *LI, DominatorTree *DT, 798 const TargetLibraryInfo *TLI, 799 const TargetTransformInfo *TTI, AssumptionCache *AC, 800 OptimizationRemarkEmitter *ORE, unsigned UnrollFactor, 801 LoopVectorizationLegality *LVL, 802 LoopVectorizationCostModel *CM) 803 : InnerLoopVectorizer(OrigLoop, PSE, LI, DT, TLI, TTI, AC, ORE, 1, 804 UnrollFactor, LVL, CM) {} 805 806 private: 807 void scalarizeInstruction(Instruction *Instr, 808 bool IfPredicateInstr = false) override; 809 void vectorizeMemoryInstruction(Instruction *Instr) override; 810 Value *getBroadcastInstrs(Value *V) override; 811 Value *getStepVector(Value *Val, int StartIdx, Value *Step, 812 Instruction::BinaryOps Opcode = 813 Instruction::BinaryOpsEnd) override; 814 Value *reverseVector(Value *Vec) override; 815 }; 816 817 /// \brief Look for a meaningful debug location on the instruction or it's 818 /// operands. 819 static Instruction *getDebugLocFromInstOrOperands(Instruction *I) { 820 if (!I) 821 return I; 822 823 DebugLoc Empty; 824 if (I->getDebugLoc() != Empty) 825 return I; 826 827 for (User::op_iterator OI = I->op_begin(), OE = I->op_end(); OI != OE; ++OI) { 828 if (Instruction *OpInst = dyn_cast<Instruction>(*OI)) 829 if (OpInst->getDebugLoc() != Empty) 830 return OpInst; 831 } 832 833 return I; 834 } 835 836 /// \brief Set the debug location in the builder using the debug location in the 837 /// instruction. 838 static void setDebugLocFromInst(IRBuilder<> &B, const Value *Ptr) { 839 if (const Instruction *Inst = dyn_cast_or_null<Instruction>(Ptr)) 840 B.SetCurrentDebugLocation(Inst->getDebugLoc()); 841 else 842 B.SetCurrentDebugLocation(DebugLoc()); 843 } 844 845 #ifndef NDEBUG 846 /// \return string containing a file name and a line # for the given loop. 847 static std::string getDebugLocString(const Loop *L) { 848 std::string Result; 849 if (L) { 850 raw_string_ostream OS(Result); 851 if (const DebugLoc LoopDbgLoc = L->getStartLoc()) 852 LoopDbgLoc.print(OS); 853 else 854 // Just print the module name. 855 OS << L->getHeader()->getParent()->getParent()->getModuleIdentifier(); 856 OS.flush(); 857 } 858 return Result; 859 } 860 #endif 861 862 void InnerLoopVectorizer::addNewMetadata(Instruction *To, 863 const Instruction *Orig) { 864 // If the loop was versioned with memchecks, add the corresponding no-alias 865 // metadata. 866 if (LVer && (isa<LoadInst>(Orig) || isa<StoreInst>(Orig))) 867 LVer->annotateInstWithNoAlias(To, Orig); 868 } 869 870 void InnerLoopVectorizer::addMetadata(Instruction *To, 871 Instruction *From) { 872 propagateMetadata(To, From); 873 addNewMetadata(To, From); 874 } 875 876 void InnerLoopVectorizer::addMetadata(ArrayRef<Value *> To, 877 Instruction *From) { 878 for (Value *V : To) { 879 if (Instruction *I = dyn_cast<Instruction>(V)) 880 addMetadata(I, From); 881 } 882 } 883 884 /// \brief The group of interleaved loads/stores sharing the same stride and 885 /// close to each other. 886 /// 887 /// Each member in this group has an index starting from 0, and the largest 888 /// index should be less than interleaved factor, which is equal to the absolute 889 /// value of the access's stride. 890 /// 891 /// E.g. An interleaved load group of factor 4: 892 /// for (unsigned i = 0; i < 1024; i+=4) { 893 /// a = A[i]; // Member of index 0 894 /// b = A[i+1]; // Member of index 1 895 /// d = A[i+3]; // Member of index 3 896 /// ... 897 /// } 898 /// 899 /// An interleaved store group of factor 4: 900 /// for (unsigned i = 0; i < 1024; i+=4) { 901 /// ... 902 /// A[i] = a; // Member of index 0 903 /// A[i+1] = b; // Member of index 1 904 /// A[i+2] = c; // Member of index 2 905 /// A[i+3] = d; // Member of index 3 906 /// } 907 /// 908 /// Note: the interleaved load group could have gaps (missing members), but 909 /// the interleaved store group doesn't allow gaps. 910 class InterleaveGroup { 911 public: 912 InterleaveGroup(Instruction *Instr, int Stride, unsigned Align) 913 : Align(Align), SmallestKey(0), LargestKey(0), InsertPos(Instr) { 914 assert(Align && "The alignment should be non-zero"); 915 916 Factor = std::abs(Stride); 917 assert(Factor > 1 && "Invalid interleave factor"); 918 919 Reverse = Stride < 0; 920 Members[0] = Instr; 921 } 922 923 bool isReverse() const { return Reverse; } 924 unsigned getFactor() const { return Factor; } 925 unsigned getAlignment() const { return Align; } 926 unsigned getNumMembers() const { return Members.size(); } 927 928 /// \brief Try to insert a new member \p Instr with index \p Index and 929 /// alignment \p NewAlign. The index is related to the leader and it could be 930 /// negative if it is the new leader. 931 /// 932 /// \returns false if the instruction doesn't belong to the group. 933 bool insertMember(Instruction *Instr, int Index, unsigned NewAlign) { 934 assert(NewAlign && "The new member's alignment should be non-zero"); 935 936 int Key = Index + SmallestKey; 937 938 // Skip if there is already a member with the same index. 939 if (Members.count(Key)) 940 return false; 941 942 if (Key > LargestKey) { 943 // The largest index is always less than the interleave factor. 944 if (Index >= static_cast<int>(Factor)) 945 return false; 946 947 LargestKey = Key; 948 } else if (Key < SmallestKey) { 949 // The largest index is always less than the interleave factor. 950 if (LargestKey - Key >= static_cast<int>(Factor)) 951 return false; 952 953 SmallestKey = Key; 954 } 955 956 // It's always safe to select the minimum alignment. 957 Align = std::min(Align, NewAlign); 958 Members[Key] = Instr; 959 return true; 960 } 961 962 /// \brief Get the member with the given index \p Index 963 /// 964 /// \returns nullptr if contains no such member. 965 Instruction *getMember(unsigned Index) const { 966 int Key = SmallestKey + Index; 967 if (!Members.count(Key)) 968 return nullptr; 969 970 return Members.find(Key)->second; 971 } 972 973 /// \brief Get the index for the given member. Unlike the key in the member 974 /// map, the index starts from 0. 975 unsigned getIndex(Instruction *Instr) const { 976 for (auto I : Members) 977 if (I.second == Instr) 978 return I.first - SmallestKey; 979 980 llvm_unreachable("InterleaveGroup contains no such member"); 981 } 982 983 Instruction *getInsertPos() const { return InsertPos; } 984 void setInsertPos(Instruction *Inst) { InsertPos = Inst; } 985 986 private: 987 unsigned Factor; // Interleave Factor. 988 bool Reverse; 989 unsigned Align; 990 DenseMap<int, Instruction *> Members; 991 int SmallestKey; 992 int LargestKey; 993 994 // To avoid breaking dependences, vectorized instructions of an interleave 995 // group should be inserted at either the first load or the last store in 996 // program order. 997 // 998 // E.g. %even = load i32 // Insert Position 999 // %add = add i32 %even // Use of %even 1000 // %odd = load i32 1001 // 1002 // store i32 %even 1003 // %odd = add i32 // Def of %odd 1004 // store i32 %odd // Insert Position 1005 Instruction *InsertPos; 1006 }; 1007 1008 /// \brief Drive the analysis of interleaved memory accesses in the loop. 1009 /// 1010 /// Use this class to analyze interleaved accesses only when we can vectorize 1011 /// a loop. Otherwise it's meaningless to do analysis as the vectorization 1012 /// on interleaved accesses is unsafe. 1013 /// 1014 /// The analysis collects interleave groups and records the relationships 1015 /// between the member and the group in a map. 1016 class InterleavedAccessInfo { 1017 public: 1018 InterleavedAccessInfo(PredicatedScalarEvolution &PSE, Loop *L, 1019 DominatorTree *DT, LoopInfo *LI) 1020 : PSE(PSE), TheLoop(L), DT(DT), LI(LI), LAI(nullptr), 1021 RequiresScalarEpilogue(false) {} 1022 1023 ~InterleavedAccessInfo() { 1024 SmallSet<InterleaveGroup *, 4> DelSet; 1025 // Avoid releasing a pointer twice. 1026 for (auto &I : InterleaveGroupMap) 1027 DelSet.insert(I.second); 1028 for (auto *Ptr : DelSet) 1029 delete Ptr; 1030 } 1031 1032 /// \brief Analyze the interleaved accesses and collect them in interleave 1033 /// groups. Substitute symbolic strides using \p Strides. 1034 void analyzeInterleaving(const ValueToValueMap &Strides); 1035 1036 /// \brief Check if \p Instr belongs to any interleave group. 1037 bool isInterleaved(Instruction *Instr) const { 1038 return InterleaveGroupMap.count(Instr); 1039 } 1040 1041 /// \brief Return the maximum interleave factor of all interleaved groups. 1042 unsigned getMaxInterleaveFactor() const { 1043 unsigned MaxFactor = 1; 1044 for (auto &Entry : InterleaveGroupMap) 1045 MaxFactor = std::max(MaxFactor, Entry.second->getFactor()); 1046 return MaxFactor; 1047 } 1048 1049 /// \brief Get the interleave group that \p Instr belongs to. 1050 /// 1051 /// \returns nullptr if doesn't have such group. 1052 InterleaveGroup *getInterleaveGroup(Instruction *Instr) const { 1053 if (InterleaveGroupMap.count(Instr)) 1054 return InterleaveGroupMap.find(Instr)->second; 1055 return nullptr; 1056 } 1057 1058 /// \brief Returns true if an interleaved group that may access memory 1059 /// out-of-bounds requires a scalar epilogue iteration for correctness. 1060 bool requiresScalarEpilogue() const { return RequiresScalarEpilogue; } 1061 1062 /// \brief Initialize the LoopAccessInfo used for dependence checking. 1063 void setLAI(const LoopAccessInfo *Info) { LAI = Info; } 1064 1065 private: 1066 /// A wrapper around ScalarEvolution, used to add runtime SCEV checks. 1067 /// Simplifies SCEV expressions in the context of existing SCEV assumptions. 1068 /// The interleaved access analysis can also add new predicates (for example 1069 /// by versioning strides of pointers). 1070 PredicatedScalarEvolution &PSE; 1071 Loop *TheLoop; 1072 DominatorTree *DT; 1073 LoopInfo *LI; 1074 const LoopAccessInfo *LAI; 1075 1076 /// True if the loop may contain non-reversed interleaved groups with 1077 /// out-of-bounds accesses. We ensure we don't speculatively access memory 1078 /// out-of-bounds by executing at least one scalar epilogue iteration. 1079 bool RequiresScalarEpilogue; 1080 1081 /// Holds the relationships between the members and the interleave group. 1082 DenseMap<Instruction *, InterleaveGroup *> InterleaveGroupMap; 1083 1084 /// Holds dependences among the memory accesses in the loop. It maps a source 1085 /// access to a set of dependent sink accesses. 1086 DenseMap<Instruction *, SmallPtrSet<Instruction *, 2>> Dependences; 1087 1088 /// \brief The descriptor for a strided memory access. 1089 struct StrideDescriptor { 1090 StrideDescriptor(int64_t Stride, const SCEV *Scev, uint64_t Size, 1091 unsigned Align) 1092 : Stride(Stride), Scev(Scev), Size(Size), Align(Align) {} 1093 1094 StrideDescriptor() = default; 1095 1096 // The access's stride. It is negative for a reverse access. 1097 int64_t Stride = 0; 1098 const SCEV *Scev = nullptr; // The scalar expression of this access 1099 uint64_t Size = 0; // The size of the memory object. 1100 unsigned Align = 0; // The alignment of this access. 1101 }; 1102 1103 /// \brief A type for holding instructions and their stride descriptors. 1104 typedef std::pair<Instruction *, StrideDescriptor> StrideEntry; 1105 1106 /// \brief Create a new interleave group with the given instruction \p Instr, 1107 /// stride \p Stride and alignment \p Align. 1108 /// 1109 /// \returns the newly created interleave group. 1110 InterleaveGroup *createInterleaveGroup(Instruction *Instr, int Stride, 1111 unsigned Align) { 1112 assert(!InterleaveGroupMap.count(Instr) && 1113 "Already in an interleaved access group"); 1114 InterleaveGroupMap[Instr] = new InterleaveGroup(Instr, Stride, Align); 1115 return InterleaveGroupMap[Instr]; 1116 } 1117 1118 /// \brief Release the group and remove all the relationships. 1119 void releaseGroup(InterleaveGroup *Group) { 1120 for (unsigned i = 0; i < Group->getFactor(); i++) 1121 if (Instruction *Member = Group->getMember(i)) 1122 InterleaveGroupMap.erase(Member); 1123 1124 delete Group; 1125 } 1126 1127 /// \brief Collect all the accesses with a constant stride in program order. 1128 void collectConstStrideAccesses( 1129 MapVector<Instruction *, StrideDescriptor> &AccessStrideInfo, 1130 const ValueToValueMap &Strides); 1131 1132 /// \brief Returns true if \p Stride is allowed in an interleaved group. 1133 static bool isStrided(int Stride) { 1134 unsigned Factor = std::abs(Stride); 1135 return Factor >= 2 && Factor <= MaxInterleaveGroupFactor; 1136 } 1137 1138 /// \brief Returns true if \p BB is a predicated block. 1139 bool isPredicated(BasicBlock *BB) const { 1140 return LoopAccessInfo::blockNeedsPredication(BB, TheLoop, DT); 1141 } 1142 1143 /// \brief Returns true if LoopAccessInfo can be used for dependence queries. 1144 bool areDependencesValid() const { 1145 return LAI && LAI->getDepChecker().getDependences(); 1146 } 1147 1148 /// \brief Returns true if memory accesses \p A and \p B can be reordered, if 1149 /// necessary, when constructing interleaved groups. 1150 /// 1151 /// \p A must precede \p B in program order. We return false if reordering is 1152 /// not necessary or is prevented because \p A and \p B may be dependent. 1153 bool canReorderMemAccessesForInterleavedGroups(StrideEntry *A, 1154 StrideEntry *B) const { 1155 1156 // Code motion for interleaved accesses can potentially hoist strided loads 1157 // and sink strided stores. The code below checks the legality of the 1158 // following two conditions: 1159 // 1160 // 1. Potentially moving a strided load (B) before any store (A) that 1161 // precedes B, or 1162 // 1163 // 2. Potentially moving a strided store (A) after any load or store (B) 1164 // that A precedes. 1165 // 1166 // It's legal to reorder A and B if we know there isn't a dependence from A 1167 // to B. Note that this determination is conservative since some 1168 // dependences could potentially be reordered safely. 1169 1170 // A is potentially the source of a dependence. 1171 auto *Src = A->first; 1172 auto SrcDes = A->second; 1173 1174 // B is potentially the sink of a dependence. 1175 auto *Sink = B->first; 1176 auto SinkDes = B->second; 1177 1178 // Code motion for interleaved accesses can't violate WAR dependences. 1179 // Thus, reordering is legal if the source isn't a write. 1180 if (!Src->mayWriteToMemory()) 1181 return true; 1182 1183 // At least one of the accesses must be strided. 1184 if (!isStrided(SrcDes.Stride) && !isStrided(SinkDes.Stride)) 1185 return true; 1186 1187 // If dependence information is not available from LoopAccessInfo, 1188 // conservatively assume the instructions can't be reordered. 1189 if (!areDependencesValid()) 1190 return false; 1191 1192 // If we know there is a dependence from source to sink, assume the 1193 // instructions can't be reordered. Otherwise, reordering is legal. 1194 return !Dependences.count(Src) || !Dependences.lookup(Src).count(Sink); 1195 } 1196 1197 /// \brief Collect the dependences from LoopAccessInfo. 1198 /// 1199 /// We process the dependences once during the interleaved access analysis to 1200 /// enable constant-time dependence queries. 1201 void collectDependences() { 1202 if (!areDependencesValid()) 1203 return; 1204 auto *Deps = LAI->getDepChecker().getDependences(); 1205 for (auto Dep : *Deps) 1206 Dependences[Dep.getSource(*LAI)].insert(Dep.getDestination(*LAI)); 1207 } 1208 }; 1209 1210 /// Utility class for getting and setting loop vectorizer hints in the form 1211 /// of loop metadata. 1212 /// This class keeps a number of loop annotations locally (as member variables) 1213 /// and can, upon request, write them back as metadata on the loop. It will 1214 /// initially scan the loop for existing metadata, and will update the local 1215 /// values based on information in the loop. 1216 /// We cannot write all values to metadata, as the mere presence of some info, 1217 /// for example 'force', means a decision has been made. So, we need to be 1218 /// careful NOT to add them if the user hasn't specifically asked so. 1219 class LoopVectorizeHints { 1220 enum HintKind { HK_WIDTH, HK_UNROLL, HK_FORCE }; 1221 1222 /// Hint - associates name and validation with the hint value. 1223 struct Hint { 1224 const char *Name; 1225 unsigned Value; // This may have to change for non-numeric values. 1226 HintKind Kind; 1227 1228 Hint(const char *Name, unsigned Value, HintKind Kind) 1229 : Name(Name), Value(Value), Kind(Kind) {} 1230 1231 bool validate(unsigned Val) { 1232 switch (Kind) { 1233 case HK_WIDTH: 1234 return isPowerOf2_32(Val) && Val <= VectorizerParams::MaxVectorWidth; 1235 case HK_UNROLL: 1236 return isPowerOf2_32(Val) && Val <= MaxInterleaveFactor; 1237 case HK_FORCE: 1238 return (Val <= 1); 1239 } 1240 return false; 1241 } 1242 }; 1243 1244 /// Vectorization width. 1245 Hint Width; 1246 /// Vectorization interleave factor. 1247 Hint Interleave; 1248 /// Vectorization forced 1249 Hint Force; 1250 1251 /// Return the loop metadata prefix. 1252 static StringRef Prefix() { return "llvm.loop."; } 1253 1254 /// True if there is any unsafe math in the loop. 1255 bool PotentiallyUnsafe; 1256 1257 public: 1258 enum ForceKind { 1259 FK_Undefined = -1, ///< Not selected. 1260 FK_Disabled = 0, ///< Forcing disabled. 1261 FK_Enabled = 1, ///< Forcing enabled. 1262 }; 1263 1264 LoopVectorizeHints(const Loop *L, bool DisableInterleaving, 1265 OptimizationRemarkEmitter &ORE) 1266 : Width("vectorize.width", VectorizerParams::VectorizationFactor, 1267 HK_WIDTH), 1268 Interleave("interleave.count", DisableInterleaving, HK_UNROLL), 1269 Force("vectorize.enable", FK_Undefined, HK_FORCE), 1270 PotentiallyUnsafe(false), TheLoop(L), ORE(ORE) { 1271 // Populate values with existing loop metadata. 1272 getHintsFromMetadata(); 1273 1274 // force-vector-interleave overrides DisableInterleaving. 1275 if (VectorizerParams::isInterleaveForced()) 1276 Interleave.Value = VectorizerParams::VectorizationInterleave; 1277 1278 DEBUG(if (DisableInterleaving && Interleave.Value == 1) dbgs() 1279 << "LV: Interleaving disabled by the pass manager\n"); 1280 } 1281 1282 /// Mark the loop L as already vectorized by setting the width to 1. 1283 void setAlreadyVectorized() { 1284 Width.Value = Interleave.Value = 1; 1285 Hint Hints[] = {Width, Interleave}; 1286 writeHintsToMetadata(Hints); 1287 } 1288 1289 bool allowVectorization(Function *F, Loop *L, bool AlwaysVectorize) const { 1290 if (getForce() == LoopVectorizeHints::FK_Disabled) { 1291 DEBUG(dbgs() << "LV: Not vectorizing: #pragma vectorize disable.\n"); 1292 emitRemarkWithHints(); 1293 return false; 1294 } 1295 1296 if (!AlwaysVectorize && getForce() != LoopVectorizeHints::FK_Enabled) { 1297 DEBUG(dbgs() << "LV: Not vectorizing: No #pragma vectorize enable.\n"); 1298 emitRemarkWithHints(); 1299 return false; 1300 } 1301 1302 if (getWidth() == 1 && getInterleave() == 1) { 1303 // FIXME: Add a separate metadata to indicate when the loop has already 1304 // been vectorized instead of setting width and count to 1. 1305 DEBUG(dbgs() << "LV: Not vectorizing: Disabled/already vectorized.\n"); 1306 // FIXME: Add interleave.disable metadata. This will allow 1307 // vectorize.disable to be used without disabling the pass and errors 1308 // to differentiate between disabled vectorization and a width of 1. 1309 ORE.emit(OptimizationRemarkAnalysis(vectorizeAnalysisPassName(), 1310 "AllDisabled", L->getStartLoc(), 1311 L->getHeader()) 1312 << "loop not vectorized: vectorization and interleaving are " 1313 "explicitly disabled, or vectorize width and interleave " 1314 "count are both set to 1"); 1315 return false; 1316 } 1317 1318 return true; 1319 } 1320 1321 /// Dumps all the hint information. 1322 void emitRemarkWithHints() const { 1323 using namespace ore; 1324 if (Force.Value == LoopVectorizeHints::FK_Disabled) 1325 ORE.emit(OptimizationRemarkMissed(LV_NAME, "MissedExplicitlyDisabled", 1326 TheLoop->getStartLoc(), 1327 TheLoop->getHeader()) 1328 << "loop not vectorized: vectorization is explicitly disabled"); 1329 else { 1330 OptimizationRemarkMissed R(LV_NAME, "MissedDetails", 1331 TheLoop->getStartLoc(), TheLoop->getHeader()); 1332 R << "loop not vectorized"; 1333 if (Force.Value == LoopVectorizeHints::FK_Enabled) { 1334 R << " (Force=" << NV("Force", true); 1335 if (Width.Value != 0) 1336 R << ", Vector Width=" << NV("VectorWidth", Width.Value); 1337 if (Interleave.Value != 0) 1338 R << ", Interleave Count=" << NV("InterleaveCount", Interleave.Value); 1339 R << ")"; 1340 } 1341 ORE.emit(R); 1342 } 1343 } 1344 1345 unsigned getWidth() const { return Width.Value; } 1346 unsigned getInterleave() const { return Interleave.Value; } 1347 enum ForceKind getForce() const { return (ForceKind)Force.Value; } 1348 1349 /// \brief If hints are provided that force vectorization, use the AlwaysPrint 1350 /// pass name to force the frontend to print the diagnostic. 1351 const char *vectorizeAnalysisPassName() const { 1352 if (getWidth() == 1) 1353 return LV_NAME; 1354 if (getForce() == LoopVectorizeHints::FK_Disabled) 1355 return LV_NAME; 1356 if (getForce() == LoopVectorizeHints::FK_Undefined && getWidth() == 0) 1357 return LV_NAME; 1358 return OptimizationRemarkAnalysis::AlwaysPrint; 1359 } 1360 1361 bool allowReordering() const { 1362 // When enabling loop hints are provided we allow the vectorizer to change 1363 // the order of operations that is given by the scalar loop. This is not 1364 // enabled by default because can be unsafe or inefficient. For example, 1365 // reordering floating-point operations will change the way round-off 1366 // error accumulates in the loop. 1367 return getForce() == LoopVectorizeHints::FK_Enabled || getWidth() > 1; 1368 } 1369 1370 bool isPotentiallyUnsafe() const { 1371 // Avoid FP vectorization if the target is unsure about proper support. 1372 // This may be related to the SIMD unit in the target not handling 1373 // IEEE 754 FP ops properly, or bad single-to-double promotions. 1374 // Otherwise, a sequence of vectorized loops, even without reduction, 1375 // could lead to different end results on the destination vectors. 1376 return getForce() != LoopVectorizeHints::FK_Enabled && PotentiallyUnsafe; 1377 } 1378 1379 void setPotentiallyUnsafe() { PotentiallyUnsafe = true; } 1380 1381 private: 1382 /// Find hints specified in the loop metadata and update local values. 1383 void getHintsFromMetadata() { 1384 MDNode *LoopID = TheLoop->getLoopID(); 1385 if (!LoopID) 1386 return; 1387 1388 // First operand should refer to the loop id itself. 1389 assert(LoopID->getNumOperands() > 0 && "requires at least one operand"); 1390 assert(LoopID->getOperand(0) == LoopID && "invalid loop id"); 1391 1392 for (unsigned i = 1, ie = LoopID->getNumOperands(); i < ie; ++i) { 1393 const MDString *S = nullptr; 1394 SmallVector<Metadata *, 4> Args; 1395 1396 // The expected hint is either a MDString or a MDNode with the first 1397 // operand a MDString. 1398 if (const MDNode *MD = dyn_cast<MDNode>(LoopID->getOperand(i))) { 1399 if (!MD || MD->getNumOperands() == 0) 1400 continue; 1401 S = dyn_cast<MDString>(MD->getOperand(0)); 1402 for (unsigned i = 1, ie = MD->getNumOperands(); i < ie; ++i) 1403 Args.push_back(MD->getOperand(i)); 1404 } else { 1405 S = dyn_cast<MDString>(LoopID->getOperand(i)); 1406 assert(Args.size() == 0 && "too many arguments for MDString"); 1407 } 1408 1409 if (!S) 1410 continue; 1411 1412 // Check if the hint starts with the loop metadata prefix. 1413 StringRef Name = S->getString(); 1414 if (Args.size() == 1) 1415 setHint(Name, Args[0]); 1416 } 1417 } 1418 1419 /// Checks string hint with one operand and set value if valid. 1420 void setHint(StringRef Name, Metadata *Arg) { 1421 if (!Name.startswith(Prefix())) 1422 return; 1423 Name = Name.substr(Prefix().size(), StringRef::npos); 1424 1425 const ConstantInt *C = mdconst::dyn_extract<ConstantInt>(Arg); 1426 if (!C) 1427 return; 1428 unsigned Val = C->getZExtValue(); 1429 1430 Hint *Hints[] = {&Width, &Interleave, &Force}; 1431 for (auto H : Hints) { 1432 if (Name == H->Name) { 1433 if (H->validate(Val)) 1434 H->Value = Val; 1435 else 1436 DEBUG(dbgs() << "LV: ignoring invalid hint '" << Name << "'\n"); 1437 break; 1438 } 1439 } 1440 } 1441 1442 /// Create a new hint from name / value pair. 1443 MDNode *createHintMetadata(StringRef Name, unsigned V) const { 1444 LLVMContext &Context = TheLoop->getHeader()->getContext(); 1445 Metadata *MDs[] = {MDString::get(Context, Name), 1446 ConstantAsMetadata::get( 1447 ConstantInt::get(Type::getInt32Ty(Context), V))}; 1448 return MDNode::get(Context, MDs); 1449 } 1450 1451 /// Matches metadata with hint name. 1452 bool matchesHintMetadataName(MDNode *Node, ArrayRef<Hint> HintTypes) { 1453 MDString *Name = dyn_cast<MDString>(Node->getOperand(0)); 1454 if (!Name) 1455 return false; 1456 1457 for (auto H : HintTypes) 1458 if (Name->getString().endswith(H.Name)) 1459 return true; 1460 return false; 1461 } 1462 1463 /// Sets current hints into loop metadata, keeping other values intact. 1464 void writeHintsToMetadata(ArrayRef<Hint> HintTypes) { 1465 if (HintTypes.size() == 0) 1466 return; 1467 1468 // Reserve the first element to LoopID (see below). 1469 SmallVector<Metadata *, 4> MDs(1); 1470 // If the loop already has metadata, then ignore the existing operands. 1471 MDNode *LoopID = TheLoop->getLoopID(); 1472 if (LoopID) { 1473 for (unsigned i = 1, ie = LoopID->getNumOperands(); i < ie; ++i) { 1474 MDNode *Node = cast<MDNode>(LoopID->getOperand(i)); 1475 // If node in update list, ignore old value. 1476 if (!matchesHintMetadataName(Node, HintTypes)) 1477 MDs.push_back(Node); 1478 } 1479 } 1480 1481 // Now, add the missing hints. 1482 for (auto H : HintTypes) 1483 MDs.push_back(createHintMetadata(Twine(Prefix(), H.Name).str(), H.Value)); 1484 1485 // Replace current metadata node with new one. 1486 LLVMContext &Context = TheLoop->getHeader()->getContext(); 1487 MDNode *NewLoopID = MDNode::get(Context, MDs); 1488 // Set operand 0 to refer to the loop id itself. 1489 NewLoopID->replaceOperandWith(0, NewLoopID); 1490 1491 TheLoop->setLoopID(NewLoopID); 1492 } 1493 1494 /// The loop these hints belong to. 1495 const Loop *TheLoop; 1496 1497 /// Interface to emit optimization remarks. 1498 OptimizationRemarkEmitter &ORE; 1499 }; 1500 1501 static void emitAnalysisDiag(const Loop *TheLoop, 1502 const LoopVectorizeHints &Hints, 1503 OptimizationRemarkEmitter &ORE, 1504 const LoopAccessReport &Message) { 1505 const char *Name = Hints.vectorizeAnalysisPassName(); 1506 LoopAccessReport::emitAnalysis(Message, TheLoop, Name, ORE); 1507 } 1508 1509 static void emitMissedWarning(Function *F, Loop *L, 1510 const LoopVectorizeHints &LH, 1511 OptimizationRemarkEmitter *ORE) { 1512 LH.emitRemarkWithHints(); 1513 1514 if (LH.getForce() == LoopVectorizeHints::FK_Enabled) { 1515 if (LH.getWidth() != 1) 1516 emitLoopVectorizeWarning( 1517 F->getContext(), *F, L->getStartLoc(), 1518 "failed explicitly specified loop vectorization"); 1519 else if (LH.getInterleave() != 1) 1520 emitLoopInterleaveWarning( 1521 F->getContext(), *F, L->getStartLoc(), 1522 "failed explicitly specified loop interleaving"); 1523 } 1524 } 1525 1526 /// LoopVectorizationLegality checks if it is legal to vectorize a loop, and 1527 /// to what vectorization factor. 1528 /// This class does not look at the profitability of vectorization, only the 1529 /// legality. This class has two main kinds of checks: 1530 /// * Memory checks - The code in canVectorizeMemory checks if vectorization 1531 /// will change the order of memory accesses in a way that will change the 1532 /// correctness of the program. 1533 /// * Scalars checks - The code in canVectorizeInstrs and canVectorizeMemory 1534 /// checks for a number of different conditions, such as the availability of a 1535 /// single induction variable, that all types are supported and vectorize-able, 1536 /// etc. This code reflects the capabilities of InnerLoopVectorizer. 1537 /// This class is also used by InnerLoopVectorizer for identifying 1538 /// induction variable and the different reduction variables. 1539 class LoopVectorizationLegality { 1540 public: 1541 LoopVectorizationLegality( 1542 Loop *L, PredicatedScalarEvolution &PSE, DominatorTree *DT, 1543 TargetLibraryInfo *TLI, AliasAnalysis *AA, Function *F, 1544 const TargetTransformInfo *TTI, 1545 std::function<const LoopAccessInfo &(Loop &)> *GetLAA, LoopInfo *LI, 1546 OptimizationRemarkEmitter *ORE, LoopVectorizationRequirements *R, 1547 LoopVectorizeHints *H) 1548 : NumPredStores(0), TheLoop(L), PSE(PSE), TLI(TLI), TTI(TTI), DT(DT), 1549 GetLAA(GetLAA), LAI(nullptr), ORE(ORE), InterleaveInfo(PSE, L, DT, LI), 1550 Induction(nullptr), WidestIndTy(nullptr), HasFunNoNaNAttr(false), 1551 Requirements(R), Hints(H) {} 1552 1553 /// ReductionList contains the reduction descriptors for all 1554 /// of the reductions that were found in the loop. 1555 typedef DenseMap<PHINode *, RecurrenceDescriptor> ReductionList; 1556 1557 /// InductionList saves induction variables and maps them to the 1558 /// induction descriptor. 1559 typedef MapVector<PHINode *, InductionDescriptor> InductionList; 1560 1561 /// RecurrenceSet contains the phi nodes that are recurrences other than 1562 /// inductions and reductions. 1563 typedef SmallPtrSet<const PHINode *, 8> RecurrenceSet; 1564 1565 /// Returns true if it is legal to vectorize this loop. 1566 /// This does not mean that it is profitable to vectorize this 1567 /// loop, only that it is legal to do so. 1568 bool canVectorize(); 1569 1570 /// Returns the Induction variable. 1571 PHINode *getInduction() { return Induction; } 1572 1573 /// Returns the reduction variables found in the loop. 1574 ReductionList *getReductionVars() { return &Reductions; } 1575 1576 /// Returns the induction variables found in the loop. 1577 InductionList *getInductionVars() { return &Inductions; } 1578 1579 /// Return the first-order recurrences found in the loop. 1580 RecurrenceSet *getFirstOrderRecurrences() { return &FirstOrderRecurrences; } 1581 1582 /// Returns the widest induction type. 1583 Type *getWidestInductionType() { return WidestIndTy; } 1584 1585 /// Returns True if V is an induction variable in this loop. 1586 bool isInductionVariable(const Value *V); 1587 1588 /// Returns True if PN is a reduction variable in this loop. 1589 bool isReductionVariable(PHINode *PN) { return Reductions.count(PN); } 1590 1591 /// Returns True if Phi is a first-order recurrence in this loop. 1592 bool isFirstOrderRecurrence(const PHINode *Phi); 1593 1594 /// Return true if the block BB needs to be predicated in order for the loop 1595 /// to be vectorized. 1596 bool blockNeedsPredication(BasicBlock *BB); 1597 1598 /// Check if this pointer is consecutive when vectorizing. This happens 1599 /// when the last index of the GEP is the induction variable, or that the 1600 /// pointer itself is an induction variable. 1601 /// This check allows us to vectorize A[idx] into a wide load/store. 1602 /// Returns: 1603 /// 0 - Stride is unknown or non-consecutive. 1604 /// 1 - Address is consecutive. 1605 /// -1 - Address is consecutive, and decreasing. 1606 int isConsecutivePtr(Value *Ptr); 1607 1608 /// Returns true if the value V is uniform within the loop. 1609 bool isUniform(Value *V); 1610 1611 /// Returns true if \p I is known to be uniform after vectorization. 1612 bool isUniformAfterVectorization(Instruction *I) { return Uniforms.count(I); } 1613 1614 /// Returns true if \p I is known to be scalar after vectorization. 1615 bool isScalarAfterVectorization(Instruction *I) { return Scalars.count(I); } 1616 1617 /// Returns the information that we collected about runtime memory check. 1618 const RuntimePointerChecking *getRuntimePointerChecking() const { 1619 return LAI->getRuntimePointerChecking(); 1620 } 1621 1622 const LoopAccessInfo *getLAI() const { return LAI; } 1623 1624 /// \brief Check if \p Instr belongs to any interleaved access group. 1625 bool isAccessInterleaved(Instruction *Instr) { 1626 return InterleaveInfo.isInterleaved(Instr); 1627 } 1628 1629 /// \brief Return the maximum interleave factor of all interleaved groups. 1630 unsigned getMaxInterleaveFactor() const { 1631 return InterleaveInfo.getMaxInterleaveFactor(); 1632 } 1633 1634 /// \brief Get the interleaved access group that \p Instr belongs to. 1635 const InterleaveGroup *getInterleavedAccessGroup(Instruction *Instr) { 1636 return InterleaveInfo.getInterleaveGroup(Instr); 1637 } 1638 1639 /// \brief Returns true if an interleaved group requires a scalar iteration 1640 /// to handle accesses with gaps. 1641 bool requiresScalarEpilogue() const { 1642 return InterleaveInfo.requiresScalarEpilogue(); 1643 } 1644 1645 unsigned getMaxSafeDepDistBytes() { return LAI->getMaxSafeDepDistBytes(); } 1646 1647 bool hasStride(Value *V) { return LAI->hasStride(V); } 1648 1649 /// Returns true if the target machine supports masked store operation 1650 /// for the given \p DataType and kind of access to \p Ptr. 1651 bool isLegalMaskedStore(Type *DataType, Value *Ptr) { 1652 return isConsecutivePtr(Ptr) && TTI->isLegalMaskedStore(DataType); 1653 } 1654 /// Returns true if the target machine supports masked load operation 1655 /// for the given \p DataType and kind of access to \p Ptr. 1656 bool isLegalMaskedLoad(Type *DataType, Value *Ptr) { 1657 return isConsecutivePtr(Ptr) && TTI->isLegalMaskedLoad(DataType); 1658 } 1659 /// Returns true if the target machine supports masked scatter operation 1660 /// for the given \p DataType. 1661 bool isLegalMaskedScatter(Type *DataType) { 1662 return TTI->isLegalMaskedScatter(DataType); 1663 } 1664 /// Returns true if the target machine supports masked gather operation 1665 /// for the given \p DataType. 1666 bool isLegalMaskedGather(Type *DataType) { 1667 return TTI->isLegalMaskedGather(DataType); 1668 } 1669 /// Returns true if the target machine can represent \p V as a masked gather 1670 /// or scatter operation. 1671 bool isLegalGatherOrScatter(Value *V) { 1672 auto *LI = dyn_cast<LoadInst>(V); 1673 auto *SI = dyn_cast<StoreInst>(V); 1674 if (!LI && !SI) 1675 return false; 1676 auto *Ptr = getPointerOperand(V); 1677 auto *Ty = cast<PointerType>(Ptr->getType())->getElementType(); 1678 return (LI && isLegalMaskedGather(Ty)) || (SI && isLegalMaskedScatter(Ty)); 1679 } 1680 1681 /// Returns true if vector representation of the instruction \p I 1682 /// requires mask. 1683 bool isMaskRequired(const Instruction *I) { return (MaskedOp.count(I) != 0); } 1684 unsigned getNumStores() const { return LAI->getNumStores(); } 1685 unsigned getNumLoads() const { return LAI->getNumLoads(); } 1686 unsigned getNumPredStores() const { return NumPredStores; } 1687 1688 /// Returns true if \p I is an instruction that will be scalarized with 1689 /// predication. Such instructions include conditional stores and 1690 /// instructions that may divide by zero. 1691 bool isScalarWithPredication(Instruction *I); 1692 1693 /// Returns true if \p I is a memory instruction that has a consecutive or 1694 /// consecutive-like pointer operand. Consecutive-like pointers are pointers 1695 /// that are treated like consecutive pointers during vectorization. The 1696 /// pointer operands of interleaved accesses are an example. 1697 bool hasConsecutiveLikePtrOperand(Instruction *I); 1698 1699 /// Returns true if \p I is a memory instruction that must be scalarized 1700 /// during vectorization. 1701 bool memoryInstructionMustBeScalarized(Instruction *I, unsigned VF = 1); 1702 1703 private: 1704 /// Check if a single basic block loop is vectorizable. 1705 /// At this point we know that this is a loop with a constant trip count 1706 /// and we only need to check individual instructions. 1707 bool canVectorizeInstrs(); 1708 1709 /// When we vectorize loops we may change the order in which 1710 /// we read and write from memory. This method checks if it is 1711 /// legal to vectorize the code, considering only memory constrains. 1712 /// Returns true if the loop is vectorizable 1713 bool canVectorizeMemory(); 1714 1715 /// Return true if we can vectorize this loop using the IF-conversion 1716 /// transformation. 1717 bool canVectorizeWithIfConvert(); 1718 1719 /// Collect the instructions that are uniform after vectorization. An 1720 /// instruction is uniform if we represent it with a single scalar value in 1721 /// the vectorized loop corresponding to each vector iteration. Examples of 1722 /// uniform instructions include pointer operands of consecutive or 1723 /// interleaved memory accesses. Note that although uniformity implies an 1724 /// instruction will be scalar, the reverse is not true. In general, a 1725 /// scalarized instruction will be represented by VF scalar values in the 1726 /// vectorized loop, each corresponding to an iteration of the original 1727 /// scalar loop. 1728 void collectLoopUniforms(); 1729 1730 /// Collect the instructions that are scalar after vectorization. An 1731 /// instruction is scalar if it is known to be uniform or will be scalarized 1732 /// during vectorization. Non-uniform scalarized instructions will be 1733 /// represented by VF values in the vectorized loop, each corresponding to an 1734 /// iteration of the original scalar loop. 1735 void collectLoopScalars(); 1736 1737 /// Return true if all of the instructions in the block can be speculatively 1738 /// executed. \p SafePtrs is a list of addresses that are known to be legal 1739 /// and we know that we can read from them without segfault. 1740 bool blockCanBePredicated(BasicBlock *BB, SmallPtrSetImpl<Value *> &SafePtrs); 1741 1742 /// Updates the vectorization state by adding \p Phi to the inductions list. 1743 /// This can set \p Phi as the main induction of the loop if \p Phi is a 1744 /// better choice for the main induction than the existing one. 1745 void addInductionPhi(PHINode *Phi, const InductionDescriptor &ID, 1746 SmallPtrSetImpl<Value *> &AllowedExit); 1747 1748 /// Report an analysis message to assist the user in diagnosing loops that are 1749 /// not vectorized. These are handled as LoopAccessReport rather than 1750 /// VectorizationReport because the << operator of VectorizationReport returns 1751 /// LoopAccessReport. 1752 void emitAnalysis(const LoopAccessReport &Message) const { 1753 emitAnalysisDiag(TheLoop, *Hints, *ORE, Message); 1754 } 1755 1756 /// Create an analysis remark that explains why vectorization failed 1757 /// 1758 /// \p RemarkName is the identifier for the remark. If \p I is passed it is 1759 /// an instruction that prevents vectorization. Otherwise the loop is used 1760 /// for the location of the remark. \return the remark object that can be 1761 /// streamed to. 1762 OptimizationRemarkAnalysis 1763 createMissedAnalysis(StringRef RemarkName, Instruction *I = nullptr) const { 1764 return ::createMissedAnalysis(Hints->vectorizeAnalysisPassName(), 1765 RemarkName, TheLoop, I); 1766 } 1767 1768 /// \brief If an access has a symbolic strides, this maps the pointer value to 1769 /// the stride symbol. 1770 const ValueToValueMap *getSymbolicStrides() { 1771 // FIXME: Currently, the set of symbolic strides is sometimes queried before 1772 // it's collected. This happens from canVectorizeWithIfConvert, when the 1773 // pointer is checked to reference consecutive elements suitable for a 1774 // masked access. 1775 return LAI ? &LAI->getSymbolicStrides() : nullptr; 1776 } 1777 1778 unsigned NumPredStores; 1779 1780 /// The loop that we evaluate. 1781 Loop *TheLoop; 1782 /// A wrapper around ScalarEvolution used to add runtime SCEV checks. 1783 /// Applies dynamic knowledge to simplify SCEV expressions in the context 1784 /// of existing SCEV assumptions. The analysis will also add a minimal set 1785 /// of new predicates if this is required to enable vectorization and 1786 /// unrolling. 1787 PredicatedScalarEvolution &PSE; 1788 /// Target Library Info. 1789 TargetLibraryInfo *TLI; 1790 /// Target Transform Info 1791 const TargetTransformInfo *TTI; 1792 /// Dominator Tree. 1793 DominatorTree *DT; 1794 // LoopAccess analysis. 1795 std::function<const LoopAccessInfo &(Loop &)> *GetLAA; 1796 // And the loop-accesses info corresponding to this loop. This pointer is 1797 // null until canVectorizeMemory sets it up. 1798 const LoopAccessInfo *LAI; 1799 /// Interface to emit optimization remarks. 1800 OptimizationRemarkEmitter *ORE; 1801 1802 /// The interleave access information contains groups of interleaved accesses 1803 /// with the same stride and close to each other. 1804 InterleavedAccessInfo InterleaveInfo; 1805 1806 // --- vectorization state --- // 1807 1808 /// Holds the integer induction variable. This is the counter of the 1809 /// loop. 1810 PHINode *Induction; 1811 /// Holds the reduction variables. 1812 ReductionList Reductions; 1813 /// Holds all of the induction variables that we found in the loop. 1814 /// Notice that inductions don't need to start at zero and that induction 1815 /// variables can be pointers. 1816 InductionList Inductions; 1817 /// Holds the phi nodes that are first-order recurrences. 1818 RecurrenceSet FirstOrderRecurrences; 1819 /// Holds the widest induction type encountered. 1820 Type *WidestIndTy; 1821 1822 /// Allowed outside users. This holds the induction and reduction 1823 /// vars which can be accessed from outside the loop. 1824 SmallPtrSet<Value *, 4> AllowedExit; 1825 1826 /// Holds the instructions known to be uniform after vectorization. 1827 SmallPtrSet<Instruction *, 4> Uniforms; 1828 1829 /// Holds the instructions known to be scalar after vectorization. 1830 SmallPtrSet<Instruction *, 4> Scalars; 1831 1832 /// Can we assume the absence of NaNs. 1833 bool HasFunNoNaNAttr; 1834 1835 /// Vectorization requirements that will go through late-evaluation. 1836 LoopVectorizationRequirements *Requirements; 1837 1838 /// Used to emit an analysis of any legality issues. 1839 LoopVectorizeHints *Hints; 1840 1841 /// While vectorizing these instructions we have to generate a 1842 /// call to the appropriate masked intrinsic 1843 SmallPtrSet<const Instruction *, 8> MaskedOp; 1844 }; 1845 1846 /// LoopVectorizationCostModel - estimates the expected speedups due to 1847 /// vectorization. 1848 /// In many cases vectorization is not profitable. This can happen because of 1849 /// a number of reasons. In this class we mainly attempt to predict the 1850 /// expected speedup/slowdowns due to the supported instruction set. We use the 1851 /// TargetTransformInfo to query the different backends for the cost of 1852 /// different operations. 1853 class LoopVectorizationCostModel { 1854 public: 1855 LoopVectorizationCostModel(Loop *L, PredicatedScalarEvolution &PSE, 1856 LoopInfo *LI, LoopVectorizationLegality *Legal, 1857 const TargetTransformInfo &TTI, 1858 const TargetLibraryInfo *TLI, DemandedBits *DB, 1859 AssumptionCache *AC, 1860 OptimizationRemarkEmitter *ORE, const Function *F, 1861 const LoopVectorizeHints *Hints) 1862 : TheLoop(L), PSE(PSE), LI(LI), Legal(Legal), TTI(TTI), TLI(TLI), DB(DB), 1863 AC(AC), ORE(ORE), TheFunction(F), Hints(Hints) {} 1864 1865 /// Information about vectorization costs 1866 struct VectorizationFactor { 1867 unsigned Width; // Vector width with best cost 1868 unsigned Cost; // Cost of the loop with that width 1869 }; 1870 /// \return The most profitable vectorization factor and the cost of that VF. 1871 /// This method checks every power of two up to VF. If UserVF is not ZERO 1872 /// then this vectorization factor will be selected if vectorization is 1873 /// possible. 1874 VectorizationFactor selectVectorizationFactor(bool OptForSize); 1875 1876 /// \return The size (in bits) of the smallest and widest types in the code 1877 /// that needs to be vectorized. We ignore values that remain scalar such as 1878 /// 64 bit loop indices. 1879 std::pair<unsigned, unsigned> getSmallestAndWidestTypes(); 1880 1881 /// \return The desired interleave count. 1882 /// If interleave count has been specified by metadata it will be returned. 1883 /// Otherwise, the interleave count is computed and returned. VF and LoopCost 1884 /// are the selected vectorization factor and the cost of the selected VF. 1885 unsigned selectInterleaveCount(bool OptForSize, unsigned VF, 1886 unsigned LoopCost); 1887 1888 /// \brief A struct that represents some properties of the register usage 1889 /// of a loop. 1890 struct RegisterUsage { 1891 /// Holds the number of loop invariant values that are used in the loop. 1892 unsigned LoopInvariantRegs; 1893 /// Holds the maximum number of concurrent live intervals in the loop. 1894 unsigned MaxLocalUsers; 1895 /// Holds the number of instructions in the loop. 1896 unsigned NumInstructions; 1897 }; 1898 1899 /// \return Returns information about the register usages of the loop for the 1900 /// given vectorization factors. 1901 SmallVector<RegisterUsage, 8> calculateRegisterUsage(ArrayRef<unsigned> VFs); 1902 1903 /// Collect values we want to ignore in the cost model. 1904 void collectValuesToIgnore(); 1905 1906 /// \returns The smallest bitwidth each instruction can be represented with. 1907 /// The vector equivalents of these instructions should be truncated to this 1908 /// type. 1909 const MapVector<Instruction *, uint64_t> &getMinimalBitwidths() const { 1910 return MinBWs; 1911 } 1912 1913 /// \returns True if it is more profitable to scalarize instruction \p I for 1914 /// vectorization factor \p VF. 1915 bool isProfitableToScalarize(Instruction *I, unsigned VF) const { 1916 auto Scalars = InstsToScalarize.find(VF); 1917 assert(Scalars != InstsToScalarize.end() && 1918 "VF not yet analyzed for scalarization profitability"); 1919 return Scalars->second.count(I); 1920 } 1921 1922 /// \returns True if instruction \p I can be truncated to a smaller bitwidth 1923 /// for vectorization factor \p VF. 1924 bool canTruncateToMinimalBitwidth(Instruction *I, unsigned VF) const { 1925 return VF > 1 && MinBWs.count(I) && !isProfitableToScalarize(I, VF) && 1926 !Legal->isScalarAfterVectorization(I); 1927 } 1928 1929 private: 1930 /// The vectorization cost is a combination of the cost itself and a boolean 1931 /// indicating whether any of the contributing operations will actually 1932 /// operate on 1933 /// vector values after type legalization in the backend. If this latter value 1934 /// is 1935 /// false, then all operations will be scalarized (i.e. no vectorization has 1936 /// actually taken place). 1937 typedef std::pair<unsigned, bool> VectorizationCostTy; 1938 1939 /// Returns the expected execution cost. The unit of the cost does 1940 /// not matter because we use the 'cost' units to compare different 1941 /// vector widths. The cost that is returned is *not* normalized by 1942 /// the factor width. 1943 VectorizationCostTy expectedCost(unsigned VF); 1944 1945 /// Returns the execution time cost of an instruction for a given vector 1946 /// width. Vector width of one means scalar. 1947 VectorizationCostTy getInstructionCost(Instruction *I, unsigned VF); 1948 1949 /// The cost-computation logic from getInstructionCost which provides 1950 /// the vector type as an output parameter. 1951 unsigned getInstructionCost(Instruction *I, unsigned VF, Type *&VectorTy); 1952 1953 /// Returns whether the instruction is a load or store and will be a emitted 1954 /// as a vector operation. 1955 bool isConsecutiveLoadOrStore(Instruction *I); 1956 1957 /// Create an analysis remark that explains why vectorization failed 1958 /// 1959 /// \p RemarkName is the identifier for the remark. \return the remark object 1960 /// that can be streamed to. 1961 OptimizationRemarkAnalysis createMissedAnalysis(StringRef RemarkName) { 1962 return ::createMissedAnalysis(Hints->vectorizeAnalysisPassName(), 1963 RemarkName, TheLoop); 1964 } 1965 1966 /// Map of scalar integer values to the smallest bitwidth they can be legally 1967 /// represented as. The vector equivalents of these values should be truncated 1968 /// to this type. 1969 MapVector<Instruction *, uint64_t> MinBWs; 1970 1971 /// A type representing the costs for instructions if they were to be 1972 /// scalarized rather than vectorized. The entries are Instruction-Cost 1973 /// pairs. 1974 typedef DenseMap<Instruction *, unsigned> ScalarCostsTy; 1975 1976 /// A map holding scalar costs for different vectorization factors. The 1977 /// presence of a cost for an instruction in the mapping indicates that the 1978 /// instruction will be scalarized when vectorizing with the associated 1979 /// vectorization factor. The entries are VF-ScalarCostTy pairs. 1980 DenseMap<unsigned, ScalarCostsTy> InstsToScalarize; 1981 1982 /// Returns the expected difference in cost from scalarizing the expression 1983 /// feeding a predicated instruction \p PredInst. The instructions to 1984 /// scalarize and their scalar costs are collected in \p ScalarCosts. A 1985 /// non-negative return value implies the expression will be scalarized. 1986 /// Currently, only single-use chains are considered for scalarization. 1987 int computePredInstDiscount(Instruction *PredInst, ScalarCostsTy &ScalarCosts, 1988 unsigned VF); 1989 1990 /// Collects the instructions to scalarize for each predicated instruction in 1991 /// the loop. 1992 void collectInstsToScalarize(unsigned VF); 1993 1994 public: 1995 /// The loop that we evaluate. 1996 Loop *TheLoop; 1997 /// Predicated scalar evolution analysis. 1998 PredicatedScalarEvolution &PSE; 1999 /// Loop Info analysis. 2000 LoopInfo *LI; 2001 /// Vectorization legality. 2002 LoopVectorizationLegality *Legal; 2003 /// Vector target information. 2004 const TargetTransformInfo &TTI; 2005 /// Target Library Info. 2006 const TargetLibraryInfo *TLI; 2007 /// Demanded bits analysis. 2008 DemandedBits *DB; 2009 /// Assumption cache. 2010 AssumptionCache *AC; 2011 /// Interface to emit optimization remarks. 2012 OptimizationRemarkEmitter *ORE; 2013 2014 const Function *TheFunction; 2015 /// Loop Vectorize Hint. 2016 const LoopVectorizeHints *Hints; 2017 /// Values to ignore in the cost model. 2018 SmallPtrSet<const Value *, 16> ValuesToIgnore; 2019 /// Values to ignore in the cost model when VF > 1. 2020 SmallPtrSet<const Value *, 16> VecValuesToIgnore; 2021 }; 2022 2023 /// \brief This holds vectorization requirements that must be verified late in 2024 /// the process. The requirements are set by legalize and costmodel. Once 2025 /// vectorization has been determined to be possible and profitable the 2026 /// requirements can be verified by looking for metadata or compiler options. 2027 /// For example, some loops require FP commutativity which is only allowed if 2028 /// vectorization is explicitly specified or if the fast-math compiler option 2029 /// has been provided. 2030 /// Late evaluation of these requirements allows helpful diagnostics to be 2031 /// composed that tells the user what need to be done to vectorize the loop. For 2032 /// example, by specifying #pragma clang loop vectorize or -ffast-math. Late 2033 /// evaluation should be used only when diagnostics can generated that can be 2034 /// followed by a non-expert user. 2035 class LoopVectorizationRequirements { 2036 public: 2037 LoopVectorizationRequirements(OptimizationRemarkEmitter &ORE) 2038 : NumRuntimePointerChecks(0), UnsafeAlgebraInst(nullptr), ORE(ORE) {} 2039 2040 void addUnsafeAlgebraInst(Instruction *I) { 2041 // First unsafe algebra instruction. 2042 if (!UnsafeAlgebraInst) 2043 UnsafeAlgebraInst = I; 2044 } 2045 2046 void addRuntimePointerChecks(unsigned Num) { NumRuntimePointerChecks = Num; } 2047 2048 bool doesNotMeet(Function *F, Loop *L, const LoopVectorizeHints &Hints) { 2049 const char *PassName = Hints.vectorizeAnalysisPassName(); 2050 bool Failed = false; 2051 if (UnsafeAlgebraInst && !Hints.allowReordering()) { 2052 ORE.emit( 2053 OptimizationRemarkAnalysisFPCommute(PassName, "CantReorderFPOps", 2054 UnsafeAlgebraInst->getDebugLoc(), 2055 UnsafeAlgebraInst->getParent()) 2056 << "loop not vectorized: cannot prove it is safe to reorder " 2057 "floating-point operations"); 2058 Failed = true; 2059 } 2060 2061 // Test if runtime memcheck thresholds are exceeded. 2062 bool PragmaThresholdReached = 2063 NumRuntimePointerChecks > PragmaVectorizeMemoryCheckThreshold; 2064 bool ThresholdReached = 2065 NumRuntimePointerChecks > VectorizerParams::RuntimeMemoryCheckThreshold; 2066 if ((ThresholdReached && !Hints.allowReordering()) || 2067 PragmaThresholdReached) { 2068 ORE.emit(OptimizationRemarkAnalysisAliasing(PassName, "CantReorderMemOps", 2069 L->getStartLoc(), 2070 L->getHeader()) 2071 << "loop not vectorized: cannot prove it is safe to reorder " 2072 "memory operations"); 2073 DEBUG(dbgs() << "LV: Too many memory checks needed.\n"); 2074 Failed = true; 2075 } 2076 2077 return Failed; 2078 } 2079 2080 private: 2081 unsigned NumRuntimePointerChecks; 2082 Instruction *UnsafeAlgebraInst; 2083 2084 /// Interface to emit optimization remarks. 2085 OptimizationRemarkEmitter &ORE; 2086 }; 2087 2088 static void addAcyclicInnerLoop(Loop &L, SmallVectorImpl<Loop *> &V) { 2089 if (L.empty()) { 2090 if (!hasCyclesInLoopBody(L)) 2091 V.push_back(&L); 2092 return; 2093 } 2094 for (Loop *InnerL : L) 2095 addAcyclicInnerLoop(*InnerL, V); 2096 } 2097 2098 /// The LoopVectorize Pass. 2099 struct LoopVectorize : public FunctionPass { 2100 /// Pass identification, replacement for typeid 2101 static char ID; 2102 2103 explicit LoopVectorize(bool NoUnrolling = false, bool AlwaysVectorize = true) 2104 : FunctionPass(ID) { 2105 Impl.DisableUnrolling = NoUnrolling; 2106 Impl.AlwaysVectorize = AlwaysVectorize; 2107 initializeLoopVectorizePass(*PassRegistry::getPassRegistry()); 2108 } 2109 2110 LoopVectorizePass Impl; 2111 2112 bool runOnFunction(Function &F) override { 2113 if (skipFunction(F)) 2114 return false; 2115 2116 auto *SE = &getAnalysis<ScalarEvolutionWrapperPass>().getSE(); 2117 auto *LI = &getAnalysis<LoopInfoWrapperPass>().getLoopInfo(); 2118 auto *TTI = &getAnalysis<TargetTransformInfoWrapperPass>().getTTI(F); 2119 auto *DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree(); 2120 auto *BFI = &getAnalysis<BlockFrequencyInfoWrapperPass>().getBFI(); 2121 auto *TLIP = getAnalysisIfAvailable<TargetLibraryInfoWrapperPass>(); 2122 auto *TLI = TLIP ? &TLIP->getTLI() : nullptr; 2123 auto *AA = &getAnalysis<AAResultsWrapperPass>().getAAResults(); 2124 auto *AC = &getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F); 2125 auto *LAA = &getAnalysis<LoopAccessLegacyAnalysis>(); 2126 auto *DB = &getAnalysis<DemandedBitsWrapperPass>().getDemandedBits(); 2127 auto *ORE = &getAnalysis<OptimizationRemarkEmitterWrapperPass>().getORE(); 2128 2129 std::function<const LoopAccessInfo &(Loop &)> GetLAA = 2130 [&](Loop &L) -> const LoopAccessInfo & { return LAA->getInfo(&L); }; 2131 2132 return Impl.runImpl(F, *SE, *LI, *TTI, *DT, *BFI, TLI, *DB, *AA, *AC, 2133 GetLAA, *ORE); 2134 } 2135 2136 void getAnalysisUsage(AnalysisUsage &AU) const override { 2137 AU.addRequired<AssumptionCacheTracker>(); 2138 AU.addRequired<BlockFrequencyInfoWrapperPass>(); 2139 AU.addRequired<DominatorTreeWrapperPass>(); 2140 AU.addRequired<LoopInfoWrapperPass>(); 2141 AU.addRequired<ScalarEvolutionWrapperPass>(); 2142 AU.addRequired<TargetTransformInfoWrapperPass>(); 2143 AU.addRequired<AAResultsWrapperPass>(); 2144 AU.addRequired<LoopAccessLegacyAnalysis>(); 2145 AU.addRequired<DemandedBitsWrapperPass>(); 2146 AU.addRequired<OptimizationRemarkEmitterWrapperPass>(); 2147 AU.addPreserved<LoopInfoWrapperPass>(); 2148 AU.addPreserved<DominatorTreeWrapperPass>(); 2149 AU.addPreserved<BasicAAWrapperPass>(); 2150 AU.addPreserved<GlobalsAAWrapperPass>(); 2151 } 2152 }; 2153 2154 } // end anonymous namespace 2155 2156 //===----------------------------------------------------------------------===// 2157 // Implementation of LoopVectorizationLegality, InnerLoopVectorizer and 2158 // LoopVectorizationCostModel. 2159 //===----------------------------------------------------------------------===// 2160 2161 Value *InnerLoopVectorizer::getBroadcastInstrs(Value *V) { 2162 // We need to place the broadcast of invariant variables outside the loop. 2163 Instruction *Instr = dyn_cast<Instruction>(V); 2164 bool NewInstr = (Instr && Instr->getParent() == LoopVectorBody); 2165 bool Invariant = OrigLoop->isLoopInvariant(V) && !NewInstr; 2166 2167 // Place the code for broadcasting invariant variables in the new preheader. 2168 IRBuilder<>::InsertPointGuard Guard(Builder); 2169 if (Invariant) 2170 Builder.SetInsertPoint(LoopVectorPreHeader->getTerminator()); 2171 2172 // Broadcast the scalar into all locations in the vector. 2173 Value *Shuf = Builder.CreateVectorSplat(VF, V, "broadcast"); 2174 2175 return Shuf; 2176 } 2177 2178 void InnerLoopVectorizer::createVectorIntInductionPHI( 2179 const InductionDescriptor &II, Instruction *EntryVal) { 2180 Value *Start = II.getStartValue(); 2181 ConstantInt *Step = II.getConstIntStepValue(); 2182 assert(Step && "Can not widen an IV with a non-constant step"); 2183 2184 // Construct the initial value of the vector IV in the vector loop preheader 2185 auto CurrIP = Builder.saveIP(); 2186 Builder.SetInsertPoint(LoopVectorPreHeader->getTerminator()); 2187 if (isa<TruncInst>(EntryVal)) { 2188 auto *TruncType = cast<IntegerType>(EntryVal->getType()); 2189 Step = ConstantInt::getSigned(TruncType, Step->getSExtValue()); 2190 Start = Builder.CreateCast(Instruction::Trunc, Start, TruncType); 2191 } 2192 Value *SplatStart = Builder.CreateVectorSplat(VF, Start); 2193 Value *SteppedStart = getStepVector(SplatStart, 0, Step); 2194 Builder.restoreIP(CurrIP); 2195 2196 Value *SplatVF = 2197 ConstantVector::getSplat(VF, ConstantInt::getSigned(Start->getType(), 2198 VF * Step->getSExtValue())); 2199 // We may need to add the step a number of times, depending on the unroll 2200 // factor. The last of those goes into the PHI. 2201 PHINode *VecInd = PHINode::Create(SteppedStart->getType(), 2, "vec.ind", 2202 &*LoopVectorBody->getFirstInsertionPt()); 2203 Instruction *LastInduction = VecInd; 2204 VectorParts Entry(UF); 2205 for (unsigned Part = 0; Part < UF; ++Part) { 2206 Entry[Part] = LastInduction; 2207 LastInduction = cast<Instruction>( 2208 Builder.CreateAdd(LastInduction, SplatVF, "step.add")); 2209 } 2210 VectorLoopValueMap.initVector(EntryVal, Entry); 2211 if (isa<TruncInst>(EntryVal)) 2212 addMetadata(Entry, EntryVal); 2213 2214 // Move the last step to the end of the latch block. This ensures consistent 2215 // placement of all induction updates. 2216 auto *LoopVectorLatch = LI->getLoopFor(LoopVectorBody)->getLoopLatch(); 2217 auto *Br = cast<BranchInst>(LoopVectorLatch->getTerminator()); 2218 auto *ICmp = cast<Instruction>(Br->getCondition()); 2219 LastInduction->moveBefore(ICmp); 2220 LastInduction->setName("vec.ind.next"); 2221 2222 VecInd->addIncoming(SteppedStart, LoopVectorPreHeader); 2223 VecInd->addIncoming(LastInduction, LoopVectorLatch); 2224 } 2225 2226 bool InnerLoopVectorizer::shouldScalarizeInstruction(Instruction *I) const { 2227 return Legal->isScalarAfterVectorization(I) || 2228 Cost->isProfitableToScalarize(I, VF); 2229 } 2230 2231 bool InnerLoopVectorizer::needsScalarInduction(Instruction *IV) const { 2232 if (shouldScalarizeInstruction(IV)) 2233 return true; 2234 auto isScalarInst = [&](User *U) -> bool { 2235 auto *I = cast<Instruction>(U); 2236 return (OrigLoop->contains(I) && shouldScalarizeInstruction(I)); 2237 }; 2238 return any_of(IV->users(), isScalarInst); 2239 } 2240 2241 void InnerLoopVectorizer::widenIntInduction(PHINode *IV, TruncInst *Trunc) { 2242 2243 auto II = Legal->getInductionVars()->find(IV); 2244 assert(II != Legal->getInductionVars()->end() && "IV is not an induction"); 2245 2246 auto ID = II->second; 2247 assert(IV->getType() == ID.getStartValue()->getType() && "Types must match"); 2248 2249 // The scalar value to broadcast. This will be derived from the canonical 2250 // induction variable. 2251 Value *ScalarIV = nullptr; 2252 2253 // The step of the induction. 2254 Value *Step = nullptr; 2255 2256 // The value from the original loop to which we are mapping the new induction 2257 // variable. 2258 Instruction *EntryVal = Trunc ? cast<Instruction>(Trunc) : IV; 2259 2260 // True if we have vectorized the induction variable. 2261 auto VectorizedIV = false; 2262 2263 // Determine if we want a scalar version of the induction variable. This is 2264 // true if the induction variable itself is not widened, or if it has at 2265 // least one user in the loop that is not widened. 2266 auto NeedsScalarIV = VF > 1 && needsScalarInduction(EntryVal); 2267 2268 // If the induction variable has a constant integer step value, go ahead and 2269 // get it now. 2270 if (ID.getConstIntStepValue()) 2271 Step = ID.getConstIntStepValue(); 2272 2273 // Try to create a new independent vector induction variable. If we can't 2274 // create the phi node, we will splat the scalar induction variable in each 2275 // loop iteration. 2276 if (VF > 1 && IV->getType() == Induction->getType() && Step && 2277 !shouldScalarizeInstruction(EntryVal)) { 2278 createVectorIntInductionPHI(ID, EntryVal); 2279 VectorizedIV = true; 2280 } 2281 2282 // If we haven't yet vectorized the induction variable, or if we will create 2283 // a scalar one, we need to define the scalar induction variable and step 2284 // values. If we were given a truncation type, truncate the canonical 2285 // induction variable and constant step. Otherwise, derive these values from 2286 // the induction descriptor. 2287 if (!VectorizedIV || NeedsScalarIV) { 2288 if (Trunc) { 2289 auto *TruncType = cast<IntegerType>(Trunc->getType()); 2290 assert(Step && "Truncation requires constant integer step"); 2291 auto StepInt = cast<ConstantInt>(Step)->getSExtValue(); 2292 ScalarIV = Builder.CreateCast(Instruction::Trunc, Induction, TruncType); 2293 Step = ConstantInt::getSigned(TruncType, StepInt); 2294 } else { 2295 ScalarIV = Induction; 2296 auto &DL = OrigLoop->getHeader()->getModule()->getDataLayout(); 2297 if (IV != OldInduction) { 2298 ScalarIV = Builder.CreateSExtOrTrunc(ScalarIV, IV->getType()); 2299 ScalarIV = ID.transform(Builder, ScalarIV, PSE.getSE(), DL); 2300 ScalarIV->setName("offset.idx"); 2301 } 2302 if (!Step) { 2303 SCEVExpander Exp(*PSE.getSE(), DL, "induction"); 2304 Step = Exp.expandCodeFor(ID.getStep(), ID.getStep()->getType(), 2305 &*Builder.GetInsertPoint()); 2306 } 2307 } 2308 } 2309 2310 // If we haven't yet vectorized the induction variable, splat the scalar 2311 // induction variable, and build the necessary step vectors. 2312 if (!VectorizedIV) { 2313 Value *Broadcasted = getBroadcastInstrs(ScalarIV); 2314 VectorParts Entry(UF); 2315 for (unsigned Part = 0; Part < UF; ++Part) 2316 Entry[Part] = getStepVector(Broadcasted, VF * Part, Step); 2317 VectorLoopValueMap.initVector(EntryVal, Entry); 2318 if (Trunc) 2319 addMetadata(Entry, Trunc); 2320 } 2321 2322 // If an induction variable is only used for counting loop iterations or 2323 // calculating addresses, it doesn't need to be widened. Create scalar steps 2324 // that can be used by instructions we will later scalarize. Note that the 2325 // addition of the scalar steps will not increase the number of instructions 2326 // in the loop in the common case prior to InstCombine. We will be trading 2327 // one vector extract for each scalar step. 2328 if (NeedsScalarIV) 2329 buildScalarSteps(ScalarIV, Step, EntryVal); 2330 } 2331 2332 Value *InnerLoopVectorizer::getStepVector(Value *Val, int StartIdx, Value *Step, 2333 Instruction::BinaryOps BinOp) { 2334 // Create and check the types. 2335 assert(Val->getType()->isVectorTy() && "Must be a vector"); 2336 int VLen = Val->getType()->getVectorNumElements(); 2337 2338 Type *STy = Val->getType()->getScalarType(); 2339 assert((STy->isIntegerTy() || STy->isFloatingPointTy()) && 2340 "Induction Step must be an integer or FP"); 2341 assert(Step->getType() == STy && "Step has wrong type"); 2342 2343 SmallVector<Constant *, 8> Indices; 2344 2345 if (STy->isIntegerTy()) { 2346 // Create a vector of consecutive numbers from zero to VF. 2347 for (int i = 0; i < VLen; ++i) 2348 Indices.push_back(ConstantInt::get(STy, StartIdx + i)); 2349 2350 // Add the consecutive indices to the vector value. 2351 Constant *Cv = ConstantVector::get(Indices); 2352 assert(Cv->getType() == Val->getType() && "Invalid consecutive vec"); 2353 Step = Builder.CreateVectorSplat(VLen, Step); 2354 assert(Step->getType() == Val->getType() && "Invalid step vec"); 2355 // FIXME: The newly created binary instructions should contain nsw/nuw flags, 2356 // which can be found from the original scalar operations. 2357 Step = Builder.CreateMul(Cv, Step); 2358 return Builder.CreateAdd(Val, Step, "induction"); 2359 } 2360 2361 // Floating point induction. 2362 assert((BinOp == Instruction::FAdd || BinOp == Instruction::FSub) && 2363 "Binary Opcode should be specified for FP induction"); 2364 // Create a vector of consecutive numbers from zero to VF. 2365 for (int i = 0; i < VLen; ++i) 2366 Indices.push_back(ConstantFP::get(STy, (double)(StartIdx + i))); 2367 2368 // Add the consecutive indices to the vector value. 2369 Constant *Cv = ConstantVector::get(Indices); 2370 2371 Step = Builder.CreateVectorSplat(VLen, Step); 2372 2373 // Floating point operations had to be 'fast' to enable the induction. 2374 FastMathFlags Flags; 2375 Flags.setUnsafeAlgebra(); 2376 2377 Value *MulOp = Builder.CreateFMul(Cv, Step); 2378 if (isa<Instruction>(MulOp)) 2379 // Have to check, MulOp may be a constant 2380 cast<Instruction>(MulOp)->setFastMathFlags(Flags); 2381 2382 Value *BOp = Builder.CreateBinOp(BinOp, Val, MulOp, "induction"); 2383 if (isa<Instruction>(BOp)) 2384 cast<Instruction>(BOp)->setFastMathFlags(Flags); 2385 return BOp; 2386 } 2387 2388 void InnerLoopVectorizer::buildScalarSteps(Value *ScalarIV, Value *Step, 2389 Value *EntryVal) { 2390 2391 // We shouldn't have to build scalar steps if we aren't vectorizing. 2392 assert(VF > 1 && "VF should be greater than one"); 2393 2394 // Get the value type and ensure it and the step have the same integer type. 2395 Type *ScalarIVTy = ScalarIV->getType()->getScalarType(); 2396 assert(ScalarIVTy->isIntegerTy() && ScalarIVTy == Step->getType() && 2397 "Val and Step should have the same integer type"); 2398 2399 // Determine the number of scalars we need to generate for each unroll 2400 // iteration. If EntryVal is uniform, we only need to generate the first 2401 // lane. Otherwise, we generate all VF values. 2402 unsigned Lanes = 2403 Legal->isUniformAfterVectorization(cast<Instruction>(EntryVal)) ? 1 : VF; 2404 2405 // Compute the scalar steps and save the results in VectorLoopValueMap. 2406 ScalarParts Entry(UF); 2407 for (unsigned Part = 0; Part < UF; ++Part) { 2408 Entry[Part].resize(VF); 2409 for (unsigned Lane = 0; Lane < Lanes; ++Lane) { 2410 auto *StartIdx = ConstantInt::get(ScalarIVTy, VF * Part + Lane); 2411 auto *Mul = Builder.CreateMul(StartIdx, Step); 2412 auto *Add = Builder.CreateAdd(ScalarIV, Mul); 2413 Entry[Part][Lane] = Add; 2414 } 2415 } 2416 VectorLoopValueMap.initScalar(EntryVal, Entry); 2417 } 2418 2419 int LoopVectorizationLegality::isConsecutivePtr(Value *Ptr) { 2420 2421 const ValueToValueMap &Strides = getSymbolicStrides() ? *getSymbolicStrides() : 2422 ValueToValueMap(); 2423 2424 int Stride = getPtrStride(PSE, Ptr, TheLoop, Strides, true, false); 2425 if (Stride == 1 || Stride == -1) 2426 return Stride; 2427 return 0; 2428 } 2429 2430 bool LoopVectorizationLegality::isUniform(Value *V) { 2431 return LAI->isUniform(V); 2432 } 2433 2434 const InnerLoopVectorizer::VectorParts & 2435 InnerLoopVectorizer::getVectorValue(Value *V) { 2436 assert(V != Induction && "The new induction variable should not be used."); 2437 assert(!V->getType()->isVectorTy() && "Can't widen a vector"); 2438 assert(!V->getType()->isVoidTy() && "Type does not produce a value"); 2439 2440 // If we have a stride that is replaced by one, do it here. 2441 if (Legal->hasStride(V)) 2442 V = ConstantInt::get(V->getType(), 1); 2443 2444 // If we have this scalar in the map, return it. 2445 if (VectorLoopValueMap.hasVector(V)) 2446 return VectorLoopValueMap.VectorMapStorage[V]; 2447 2448 // If the value has not been vectorized, check if it has been scalarized 2449 // instead. If it has been scalarized, and we actually need the value in 2450 // vector form, we will construct the vector values on demand. 2451 if (VectorLoopValueMap.hasScalar(V)) { 2452 2453 // Initialize a new vector map entry. 2454 VectorParts Entry(UF); 2455 2456 // If we've scalarized a value, that value should be an instruction. 2457 auto *I = cast<Instruction>(V); 2458 2459 // If we aren't vectorizing, we can just copy the scalar map values over to 2460 // the vector map. 2461 if (VF == 1) { 2462 for (unsigned Part = 0; Part < UF; ++Part) 2463 Entry[Part] = getScalarValue(V, Part, 0); 2464 return VectorLoopValueMap.initVector(V, Entry); 2465 } 2466 2467 // Get the last scalar instruction we generated for V. If the value is 2468 // known to be uniform after vectorization, this corresponds to lane zero 2469 // of the last unroll iteration. Otherwise, the last instruction is the one 2470 // we created for the last vector lane of the last unroll iteration. 2471 unsigned LastLane = Legal->isUniformAfterVectorization(I) ? 0 : VF - 1; 2472 auto *LastInst = cast<Instruction>(getScalarValue(V, UF - 1, LastLane)); 2473 2474 // Set the insert point after the last scalarized instruction. This ensures 2475 // the insertelement sequence will directly follow the scalar definitions. 2476 auto OldIP = Builder.saveIP(); 2477 auto NewIP = std::next(BasicBlock::iterator(LastInst)); 2478 Builder.SetInsertPoint(&*NewIP); 2479 2480 // However, if we are vectorizing, we need to construct the vector values. 2481 // If the value is known to be uniform after vectorization, we can just 2482 // broadcast the scalar value corresponding to lane zero for each unroll 2483 // iteration. Otherwise, we construct the vector values using insertelement 2484 // instructions. Since the resulting vectors are stored in 2485 // VectorLoopValueMap, we will only generate the insertelements once. 2486 for (unsigned Part = 0; Part < UF; ++Part) { 2487 Value *VectorValue = nullptr; 2488 if (Legal->isUniformAfterVectorization(I)) { 2489 VectorValue = getBroadcastInstrs(getScalarValue(V, Part, 0)); 2490 } else { 2491 VectorValue = UndefValue::get(VectorType::get(V->getType(), VF)); 2492 for (unsigned Lane = 0; Lane < VF; ++Lane) 2493 VectorValue = Builder.CreateInsertElement( 2494 VectorValue, getScalarValue(V, Part, Lane), 2495 Builder.getInt32(Lane)); 2496 } 2497 Entry[Part] = VectorValue; 2498 } 2499 Builder.restoreIP(OldIP); 2500 return VectorLoopValueMap.initVector(V, Entry); 2501 } 2502 2503 // If this scalar is unknown, assume that it is a constant or that it is 2504 // loop invariant. Broadcast V and save the value for future uses. 2505 Value *B = getBroadcastInstrs(V); 2506 return VectorLoopValueMap.initVector(V, VectorParts(UF, B)); 2507 } 2508 2509 Value *InnerLoopVectorizer::getScalarValue(Value *V, unsigned Part, 2510 unsigned Lane) { 2511 2512 // If the value is not an instruction contained in the loop, it should 2513 // already be scalar. 2514 if (OrigLoop->isLoopInvariant(V)) 2515 return V; 2516 2517 assert(Lane > 0 ? !Legal->isUniformAfterVectorization(cast<Instruction>(V)) 2518 : true && "Uniform values only have lane zero"); 2519 2520 // If the value from the original loop has not been vectorized, it is 2521 // represented by UF x VF scalar values in the new loop. Return the requested 2522 // scalar value. 2523 if (VectorLoopValueMap.hasScalar(V)) 2524 return VectorLoopValueMap.ScalarMapStorage[V][Part][Lane]; 2525 2526 // If the value has not been scalarized, get its entry in VectorLoopValueMap 2527 // for the given unroll part. If this entry is not a vector type (i.e., the 2528 // vectorization factor is one), there is no need to generate an 2529 // extractelement instruction. 2530 auto *U = getVectorValue(V)[Part]; 2531 if (!U->getType()->isVectorTy()) { 2532 assert(VF == 1 && "Value not scalarized has non-vector type"); 2533 return U; 2534 } 2535 2536 // Otherwise, the value from the original loop has been vectorized and is 2537 // represented by UF vector values. Extract and return the requested scalar 2538 // value from the appropriate vector lane. 2539 return Builder.CreateExtractElement(U, Builder.getInt32(Lane)); 2540 } 2541 2542 Value *InnerLoopVectorizer::reverseVector(Value *Vec) { 2543 assert(Vec->getType()->isVectorTy() && "Invalid type"); 2544 SmallVector<Constant *, 8> ShuffleMask; 2545 for (unsigned i = 0; i < VF; ++i) 2546 ShuffleMask.push_back(Builder.getInt32(VF - i - 1)); 2547 2548 return Builder.CreateShuffleVector(Vec, UndefValue::get(Vec->getType()), 2549 ConstantVector::get(ShuffleMask), 2550 "reverse"); 2551 } 2552 2553 // Get a mask to interleave \p NumVec vectors into a wide vector. 2554 // I.e. <0, VF, VF*2, ..., VF*(NumVec-1), 1, VF+1, VF*2+1, ...> 2555 // E.g. For 2 interleaved vectors, if VF is 4, the mask is: 2556 // <0, 4, 1, 5, 2, 6, 3, 7> 2557 static Constant *getInterleavedMask(IRBuilder<> &Builder, unsigned VF, 2558 unsigned NumVec) { 2559 SmallVector<Constant *, 16> Mask; 2560 for (unsigned i = 0; i < VF; i++) 2561 for (unsigned j = 0; j < NumVec; j++) 2562 Mask.push_back(Builder.getInt32(j * VF + i)); 2563 2564 return ConstantVector::get(Mask); 2565 } 2566 2567 // Get the strided mask starting from index \p Start. 2568 // I.e. <Start, Start + Stride, ..., Start + Stride*(VF-1)> 2569 static Constant *getStridedMask(IRBuilder<> &Builder, unsigned Start, 2570 unsigned Stride, unsigned VF) { 2571 SmallVector<Constant *, 16> Mask; 2572 for (unsigned i = 0; i < VF; i++) 2573 Mask.push_back(Builder.getInt32(Start + i * Stride)); 2574 2575 return ConstantVector::get(Mask); 2576 } 2577 2578 // Get a mask of two parts: The first part consists of sequential integers 2579 // starting from 0, The second part consists of UNDEFs. 2580 // I.e. <0, 1, 2, ..., NumInt - 1, undef, ..., undef> 2581 static Constant *getSequentialMask(IRBuilder<> &Builder, unsigned NumInt, 2582 unsigned NumUndef) { 2583 SmallVector<Constant *, 16> Mask; 2584 for (unsigned i = 0; i < NumInt; i++) 2585 Mask.push_back(Builder.getInt32(i)); 2586 2587 Constant *Undef = UndefValue::get(Builder.getInt32Ty()); 2588 for (unsigned i = 0; i < NumUndef; i++) 2589 Mask.push_back(Undef); 2590 2591 return ConstantVector::get(Mask); 2592 } 2593 2594 // Concatenate two vectors with the same element type. The 2nd vector should 2595 // not have more elements than the 1st vector. If the 2nd vector has less 2596 // elements, extend it with UNDEFs. 2597 static Value *ConcatenateTwoVectors(IRBuilder<> &Builder, Value *V1, 2598 Value *V2) { 2599 VectorType *VecTy1 = dyn_cast<VectorType>(V1->getType()); 2600 VectorType *VecTy2 = dyn_cast<VectorType>(V2->getType()); 2601 assert(VecTy1 && VecTy2 && 2602 VecTy1->getScalarType() == VecTy2->getScalarType() && 2603 "Expect two vectors with the same element type"); 2604 2605 unsigned NumElts1 = VecTy1->getNumElements(); 2606 unsigned NumElts2 = VecTy2->getNumElements(); 2607 assert(NumElts1 >= NumElts2 && "Unexpect the first vector has less elements"); 2608 2609 if (NumElts1 > NumElts2) { 2610 // Extend with UNDEFs. 2611 Constant *ExtMask = 2612 getSequentialMask(Builder, NumElts2, NumElts1 - NumElts2); 2613 V2 = Builder.CreateShuffleVector(V2, UndefValue::get(VecTy2), ExtMask); 2614 } 2615 2616 Constant *Mask = getSequentialMask(Builder, NumElts1 + NumElts2, 0); 2617 return Builder.CreateShuffleVector(V1, V2, Mask); 2618 } 2619 2620 // Concatenate vectors in the given list. All vectors have the same type. 2621 static Value *ConcatenateVectors(IRBuilder<> &Builder, 2622 ArrayRef<Value *> InputList) { 2623 unsigned NumVec = InputList.size(); 2624 assert(NumVec > 1 && "Should be at least two vectors"); 2625 2626 SmallVector<Value *, 8> ResList; 2627 ResList.append(InputList.begin(), InputList.end()); 2628 do { 2629 SmallVector<Value *, 8> TmpList; 2630 for (unsigned i = 0; i < NumVec - 1; i += 2) { 2631 Value *V0 = ResList[i], *V1 = ResList[i + 1]; 2632 assert((V0->getType() == V1->getType() || i == NumVec - 2) && 2633 "Only the last vector may have a different type"); 2634 2635 TmpList.push_back(ConcatenateTwoVectors(Builder, V0, V1)); 2636 } 2637 2638 // Push the last vector if the total number of vectors is odd. 2639 if (NumVec % 2 != 0) 2640 TmpList.push_back(ResList[NumVec - 1]); 2641 2642 ResList = TmpList; 2643 NumVec = ResList.size(); 2644 } while (NumVec > 1); 2645 2646 return ResList[0]; 2647 } 2648 2649 // Try to vectorize the interleave group that \p Instr belongs to. 2650 // 2651 // E.g. Translate following interleaved load group (factor = 3): 2652 // for (i = 0; i < N; i+=3) { 2653 // R = Pic[i]; // Member of index 0 2654 // G = Pic[i+1]; // Member of index 1 2655 // B = Pic[i+2]; // Member of index 2 2656 // ... // do something to R, G, B 2657 // } 2658 // To: 2659 // %wide.vec = load <12 x i32> ; Read 4 tuples of R,G,B 2660 // %R.vec = shuffle %wide.vec, undef, <0, 3, 6, 9> ; R elements 2661 // %G.vec = shuffle %wide.vec, undef, <1, 4, 7, 10> ; G elements 2662 // %B.vec = shuffle %wide.vec, undef, <2, 5, 8, 11> ; B elements 2663 // 2664 // Or translate following interleaved store group (factor = 3): 2665 // for (i = 0; i < N; i+=3) { 2666 // ... do something to R, G, B 2667 // Pic[i] = R; // Member of index 0 2668 // Pic[i+1] = G; // Member of index 1 2669 // Pic[i+2] = B; // Member of index 2 2670 // } 2671 // To: 2672 // %R_G.vec = shuffle %R.vec, %G.vec, <0, 1, 2, ..., 7> 2673 // %B_U.vec = shuffle %B.vec, undef, <0, 1, 2, 3, u, u, u, u> 2674 // %interleaved.vec = shuffle %R_G.vec, %B_U.vec, 2675 // <0, 4, 8, 1, 5, 9, 2, 6, 10, 3, 7, 11> ; Interleave R,G,B elements 2676 // store <12 x i32> %interleaved.vec ; Write 4 tuples of R,G,B 2677 void InnerLoopVectorizer::vectorizeInterleaveGroup(Instruction *Instr) { 2678 const InterleaveGroup *Group = Legal->getInterleavedAccessGroup(Instr); 2679 assert(Group && "Fail to get an interleaved access group."); 2680 2681 // Skip if current instruction is not the insert position. 2682 if (Instr != Group->getInsertPos()) 2683 return; 2684 2685 LoadInst *LI = dyn_cast<LoadInst>(Instr); 2686 StoreInst *SI = dyn_cast<StoreInst>(Instr); 2687 Value *Ptr = getPointerOperand(Instr); 2688 2689 // Prepare for the vector type of the interleaved load/store. 2690 Type *ScalarTy = LI ? LI->getType() : SI->getValueOperand()->getType(); 2691 unsigned InterleaveFactor = Group->getFactor(); 2692 Type *VecTy = VectorType::get(ScalarTy, InterleaveFactor * VF); 2693 Type *PtrTy = VecTy->getPointerTo(Ptr->getType()->getPointerAddressSpace()); 2694 2695 // Prepare for the new pointers. 2696 setDebugLocFromInst(Builder, Ptr); 2697 SmallVector<Value *, 2> NewPtrs; 2698 unsigned Index = Group->getIndex(Instr); 2699 2700 // If the group is reverse, adjust the index to refer to the last vector lane 2701 // instead of the first. We adjust the index from the first vector lane, 2702 // rather than directly getting the pointer for lane VF - 1, because the 2703 // pointer operand of the interleaved access is supposed to be uniform. For 2704 // uniform instructions, we're only required to generate a value for the 2705 // first vector lane in each unroll iteration. 2706 if (Group->isReverse()) 2707 Index += (VF - 1) * Group->getFactor(); 2708 2709 for (unsigned Part = 0; Part < UF; Part++) { 2710 Value *NewPtr = getScalarValue(Ptr, Part, 0); 2711 2712 // Notice current instruction could be any index. Need to adjust the address 2713 // to the member of index 0. 2714 // 2715 // E.g. a = A[i+1]; // Member of index 1 (Current instruction) 2716 // b = A[i]; // Member of index 0 2717 // Current pointer is pointed to A[i+1], adjust it to A[i]. 2718 // 2719 // E.g. A[i+1] = a; // Member of index 1 2720 // A[i] = b; // Member of index 0 2721 // A[i+2] = c; // Member of index 2 (Current instruction) 2722 // Current pointer is pointed to A[i+2], adjust it to A[i]. 2723 NewPtr = Builder.CreateGEP(NewPtr, Builder.getInt32(-Index)); 2724 2725 // Cast to the vector pointer type. 2726 NewPtrs.push_back(Builder.CreateBitCast(NewPtr, PtrTy)); 2727 } 2728 2729 setDebugLocFromInst(Builder, Instr); 2730 Value *UndefVec = UndefValue::get(VecTy); 2731 2732 // Vectorize the interleaved load group. 2733 if (LI) { 2734 2735 // For each unroll part, create a wide load for the group. 2736 SmallVector<Value *, 2> NewLoads; 2737 for (unsigned Part = 0; Part < UF; Part++) { 2738 auto *NewLoad = Builder.CreateAlignedLoad( 2739 NewPtrs[Part], Group->getAlignment(), "wide.vec"); 2740 addMetadata(NewLoad, Instr); 2741 NewLoads.push_back(NewLoad); 2742 } 2743 2744 // For each member in the group, shuffle out the appropriate data from the 2745 // wide loads. 2746 for (unsigned I = 0; I < InterleaveFactor; ++I) { 2747 Instruction *Member = Group->getMember(I); 2748 2749 // Skip the gaps in the group. 2750 if (!Member) 2751 continue; 2752 2753 VectorParts Entry(UF); 2754 Constant *StrideMask = getStridedMask(Builder, I, InterleaveFactor, VF); 2755 for (unsigned Part = 0; Part < UF; Part++) { 2756 Value *StridedVec = Builder.CreateShuffleVector( 2757 NewLoads[Part], UndefVec, StrideMask, "strided.vec"); 2758 2759 // If this member has different type, cast the result type. 2760 if (Member->getType() != ScalarTy) { 2761 VectorType *OtherVTy = VectorType::get(Member->getType(), VF); 2762 StridedVec = Builder.CreateBitOrPointerCast(StridedVec, OtherVTy); 2763 } 2764 2765 Entry[Part] = 2766 Group->isReverse() ? reverseVector(StridedVec) : StridedVec; 2767 } 2768 VectorLoopValueMap.initVector(Member, Entry); 2769 } 2770 return; 2771 } 2772 2773 // The sub vector type for current instruction. 2774 VectorType *SubVT = VectorType::get(ScalarTy, VF); 2775 2776 // Vectorize the interleaved store group. 2777 for (unsigned Part = 0; Part < UF; Part++) { 2778 // Collect the stored vector from each member. 2779 SmallVector<Value *, 4> StoredVecs; 2780 for (unsigned i = 0; i < InterleaveFactor; i++) { 2781 // Interleaved store group doesn't allow a gap, so each index has a member 2782 Instruction *Member = Group->getMember(i); 2783 assert(Member && "Fail to get a member from an interleaved store group"); 2784 2785 Value *StoredVec = 2786 getVectorValue(cast<StoreInst>(Member)->getValueOperand())[Part]; 2787 if (Group->isReverse()) 2788 StoredVec = reverseVector(StoredVec); 2789 2790 // If this member has different type, cast it to an unified type. 2791 if (StoredVec->getType() != SubVT) 2792 StoredVec = Builder.CreateBitOrPointerCast(StoredVec, SubVT); 2793 2794 StoredVecs.push_back(StoredVec); 2795 } 2796 2797 // Concatenate all vectors into a wide vector. 2798 Value *WideVec = ConcatenateVectors(Builder, StoredVecs); 2799 2800 // Interleave the elements in the wide vector. 2801 Constant *IMask = getInterleavedMask(Builder, VF, InterleaveFactor); 2802 Value *IVec = Builder.CreateShuffleVector(WideVec, UndefVec, IMask, 2803 "interleaved.vec"); 2804 2805 Instruction *NewStoreInstr = 2806 Builder.CreateAlignedStore(IVec, NewPtrs[Part], Group->getAlignment()); 2807 addMetadata(NewStoreInstr, Instr); 2808 } 2809 } 2810 2811 void InnerLoopVectorizer::vectorizeMemoryInstruction(Instruction *Instr) { 2812 // Attempt to issue a wide load. 2813 LoadInst *LI = dyn_cast<LoadInst>(Instr); 2814 StoreInst *SI = dyn_cast<StoreInst>(Instr); 2815 2816 assert((LI || SI) && "Invalid Load/Store instruction"); 2817 2818 // Try to vectorize the interleave group if this access is interleaved. 2819 if (Legal->isAccessInterleaved(Instr)) 2820 return vectorizeInterleaveGroup(Instr); 2821 2822 Type *ScalarDataTy = LI ? LI->getType() : SI->getValueOperand()->getType(); 2823 Type *DataTy = VectorType::get(ScalarDataTy, VF); 2824 Value *Ptr = getPointerOperand(Instr); 2825 unsigned Alignment = LI ? LI->getAlignment() : SI->getAlignment(); 2826 // An alignment of 0 means target abi alignment. We need to use the scalar's 2827 // target abi alignment in such a case. 2828 const DataLayout &DL = Instr->getModule()->getDataLayout(); 2829 if (!Alignment) 2830 Alignment = DL.getABITypeAlignment(ScalarDataTy); 2831 unsigned AddressSpace = Ptr->getType()->getPointerAddressSpace(); 2832 2833 // Scalarize the memory instruction if necessary. 2834 if (Legal->memoryInstructionMustBeScalarized(Instr, VF)) 2835 return scalarizeInstruction(Instr, Legal->isScalarWithPredication(Instr)); 2836 2837 // Determine if the pointer operand of the access is either consecutive or 2838 // reverse consecutive. 2839 int ConsecutiveStride = Legal->isConsecutivePtr(Ptr); 2840 bool Reverse = ConsecutiveStride < 0; 2841 2842 // Determine if either a gather or scatter operation is legal. 2843 bool CreateGatherScatter = 2844 !ConsecutiveStride && Legal->isLegalGatherOrScatter(Instr); 2845 2846 VectorParts VectorGep; 2847 2848 // Handle consecutive loads/stores. 2849 GetElementPtrInst *Gep = getGEPInstruction(Ptr); 2850 if (ConsecutiveStride) { 2851 if (Gep) { 2852 unsigned NumOperands = Gep->getNumOperands(); 2853 #ifndef NDEBUG 2854 // The original GEP that identified as a consecutive memory access 2855 // should have only one loop-variant operand. 2856 unsigned NumOfLoopVariantOps = 0; 2857 for (unsigned i = 0; i < NumOperands; ++i) 2858 if (!PSE.getSE()->isLoopInvariant(PSE.getSCEV(Gep->getOperand(i)), 2859 OrigLoop)) 2860 NumOfLoopVariantOps++; 2861 assert(NumOfLoopVariantOps == 1 && 2862 "Consecutive GEP should have only one loop-variant operand"); 2863 #endif 2864 GetElementPtrInst *Gep2 = cast<GetElementPtrInst>(Gep->clone()); 2865 Gep2->setName("gep.indvar"); 2866 2867 // A new GEP is created for a 0-lane value of the first unroll iteration. 2868 // The GEPs for the rest of the unroll iterations are computed below as an 2869 // offset from this GEP. 2870 for (unsigned i = 0; i < NumOperands; ++i) 2871 // We can apply getScalarValue() for all GEP indices. It returns an 2872 // original value for loop-invariant operand and 0-lane for consecutive 2873 // operand. 2874 Gep2->setOperand(i, getScalarValue(Gep->getOperand(i), 2875 0, /* First unroll iteration */ 2876 0 /* 0-lane of the vector */ )); 2877 setDebugLocFromInst(Builder, Gep); 2878 Ptr = Builder.Insert(Gep2); 2879 2880 } else { // No GEP 2881 setDebugLocFromInst(Builder, Ptr); 2882 Ptr = getScalarValue(Ptr, 0, 0); 2883 } 2884 } else { 2885 // At this point we should vector version of GEP for Gather or Scatter 2886 assert(CreateGatherScatter && "The instruction should be scalarized"); 2887 if (Gep) { 2888 // Vectorizing GEP, across UF parts. We want to get a vector value for base 2889 // and each index that's defined inside the loop, even if it is 2890 // loop-invariant but wasn't hoisted out. Otherwise we want to keep them 2891 // scalar. 2892 SmallVector<VectorParts, 4> OpsV; 2893 for (Value *Op : Gep->operands()) { 2894 Instruction *SrcInst = dyn_cast<Instruction>(Op); 2895 if (SrcInst && OrigLoop->contains(SrcInst)) 2896 OpsV.push_back(getVectorValue(Op)); 2897 else 2898 OpsV.push_back(VectorParts(UF, Op)); 2899 } 2900 for (unsigned Part = 0; Part < UF; ++Part) { 2901 SmallVector<Value *, 4> Ops; 2902 Value *GEPBasePtr = OpsV[0][Part]; 2903 for (unsigned i = 1; i < Gep->getNumOperands(); i++) 2904 Ops.push_back(OpsV[i][Part]); 2905 Value *NewGep = Builder.CreateGEP(GEPBasePtr, Ops, "VectorGep"); 2906 cast<GetElementPtrInst>(NewGep)->setIsInBounds(Gep->isInBounds()); 2907 assert(NewGep->getType()->isVectorTy() && "Expected vector GEP"); 2908 2909 NewGep = 2910 Builder.CreateBitCast(NewGep, VectorType::get(Ptr->getType(), VF)); 2911 VectorGep.push_back(NewGep); 2912 } 2913 } else 2914 VectorGep = getVectorValue(Ptr); 2915 } 2916 2917 VectorParts Mask = createBlockInMask(Instr->getParent()); 2918 // Handle Stores: 2919 if (SI) { 2920 assert(!Legal->isUniform(SI->getPointerOperand()) && 2921 "We do not allow storing to uniform addresses"); 2922 setDebugLocFromInst(Builder, SI); 2923 // We don't want to update the value in the map as it might be used in 2924 // another expression. So don't use a reference type for "StoredVal". 2925 VectorParts StoredVal = getVectorValue(SI->getValueOperand()); 2926 2927 for (unsigned Part = 0; Part < UF; ++Part) { 2928 Instruction *NewSI = nullptr; 2929 if (CreateGatherScatter) { 2930 Value *MaskPart = Legal->isMaskRequired(SI) ? Mask[Part] : nullptr; 2931 NewSI = Builder.CreateMaskedScatter(StoredVal[Part], VectorGep[Part], 2932 Alignment, MaskPart); 2933 } else { 2934 // Calculate the pointer for the specific unroll-part. 2935 Value *PartPtr = 2936 Builder.CreateGEP(nullptr, Ptr, Builder.getInt32(Part * VF)); 2937 2938 if (Reverse) { 2939 // If we store to reverse consecutive memory locations, then we need 2940 // to reverse the order of elements in the stored value. 2941 StoredVal[Part] = reverseVector(StoredVal[Part]); 2942 // If the address is consecutive but reversed, then the 2943 // wide store needs to start at the last vector element. 2944 PartPtr = 2945 Builder.CreateGEP(nullptr, Ptr, Builder.getInt32(-Part * VF)); 2946 PartPtr = 2947 Builder.CreateGEP(nullptr, PartPtr, Builder.getInt32(1 - VF)); 2948 Mask[Part] = reverseVector(Mask[Part]); 2949 } 2950 2951 Value *VecPtr = 2952 Builder.CreateBitCast(PartPtr, DataTy->getPointerTo(AddressSpace)); 2953 2954 if (Legal->isMaskRequired(SI)) 2955 NewSI = Builder.CreateMaskedStore(StoredVal[Part], VecPtr, Alignment, 2956 Mask[Part]); 2957 else 2958 NewSI = 2959 Builder.CreateAlignedStore(StoredVal[Part], VecPtr, Alignment); 2960 } 2961 addMetadata(NewSI, SI); 2962 } 2963 return; 2964 } 2965 2966 // Handle loads. 2967 assert(LI && "Must have a load instruction"); 2968 setDebugLocFromInst(Builder, LI); 2969 VectorParts Entry(UF); 2970 for (unsigned Part = 0; Part < UF; ++Part) { 2971 Instruction *NewLI; 2972 if (CreateGatherScatter) { 2973 Value *MaskPart = Legal->isMaskRequired(LI) ? Mask[Part] : nullptr; 2974 NewLI = Builder.CreateMaskedGather(VectorGep[Part], Alignment, MaskPart, 2975 0, "wide.masked.gather"); 2976 Entry[Part] = NewLI; 2977 } else { 2978 // Calculate the pointer for the specific unroll-part. 2979 Value *PartPtr = 2980 Builder.CreateGEP(nullptr, Ptr, Builder.getInt32(Part * VF)); 2981 2982 if (Reverse) { 2983 // If the address is consecutive but reversed, then the 2984 // wide load needs to start at the last vector element. 2985 PartPtr = Builder.CreateGEP(nullptr, Ptr, Builder.getInt32(-Part * VF)); 2986 PartPtr = Builder.CreateGEP(nullptr, PartPtr, Builder.getInt32(1 - VF)); 2987 Mask[Part] = reverseVector(Mask[Part]); 2988 } 2989 2990 Value *VecPtr = 2991 Builder.CreateBitCast(PartPtr, DataTy->getPointerTo(AddressSpace)); 2992 if (Legal->isMaskRequired(LI)) 2993 NewLI = Builder.CreateMaskedLoad(VecPtr, Alignment, Mask[Part], 2994 UndefValue::get(DataTy), 2995 "wide.masked.load"); 2996 else 2997 NewLI = Builder.CreateAlignedLoad(VecPtr, Alignment, "wide.load"); 2998 Entry[Part] = Reverse ? reverseVector(NewLI) : NewLI; 2999 } 3000 addMetadata(NewLI, LI); 3001 } 3002 VectorLoopValueMap.initVector(Instr, Entry); 3003 } 3004 3005 void InnerLoopVectorizer::scalarizeInstruction(Instruction *Instr, 3006 bool IfPredicateInstr) { 3007 assert(!Instr->getType()->isAggregateType() && "Can't handle vectors"); 3008 DEBUG(dbgs() << "LV: Scalarizing" 3009 << (IfPredicateInstr ? " and predicating:" : ":") << *Instr 3010 << '\n'); 3011 // Holds vector parameters or scalars, in case of uniform vals. 3012 SmallVector<VectorParts, 4> Params; 3013 3014 setDebugLocFromInst(Builder, Instr); 3015 3016 // Does this instruction return a value ? 3017 bool IsVoidRetTy = Instr->getType()->isVoidTy(); 3018 3019 // Initialize a new scalar map entry. 3020 ScalarParts Entry(UF); 3021 3022 VectorParts Cond; 3023 if (IfPredicateInstr) 3024 Cond = createBlockInMask(Instr->getParent()); 3025 3026 // Determine the number of scalars we need to generate for each unroll 3027 // iteration. If the instruction is uniform, we only need to generate the 3028 // first lane. Otherwise, we generate all VF values. 3029 unsigned Lanes = Legal->isUniformAfterVectorization(Instr) ? 1 : VF; 3030 3031 // For each vector unroll 'part': 3032 for (unsigned Part = 0; Part < UF; ++Part) { 3033 Entry[Part].resize(VF); 3034 // For each scalar that we create: 3035 for (unsigned Lane = 0; Lane < Lanes; ++Lane) { 3036 3037 // Start if-block. 3038 Value *Cmp = nullptr; 3039 if (IfPredicateInstr) { 3040 Cmp = Builder.CreateExtractElement(Cond[Part], Builder.getInt32(Lane)); 3041 Cmp = Builder.CreateICmp(ICmpInst::ICMP_EQ, Cmp, 3042 ConstantInt::get(Cmp->getType(), 1)); 3043 } 3044 3045 Instruction *Cloned = Instr->clone(); 3046 if (!IsVoidRetTy) 3047 Cloned->setName(Instr->getName() + ".cloned"); 3048 3049 // Replace the operands of the cloned instructions with their scalar 3050 // equivalents in the new loop. 3051 for (unsigned op = 0, e = Instr->getNumOperands(); op != e; ++op) { 3052 auto *NewOp = getScalarValue(Instr->getOperand(op), Part, Lane); 3053 Cloned->setOperand(op, NewOp); 3054 } 3055 addNewMetadata(Cloned, Instr); 3056 3057 // Place the cloned scalar in the new loop. 3058 Builder.Insert(Cloned); 3059 3060 // Add the cloned scalar to the scalar map entry. 3061 Entry[Part][Lane] = Cloned; 3062 3063 // If we just cloned a new assumption, add it the assumption cache. 3064 if (auto *II = dyn_cast<IntrinsicInst>(Cloned)) 3065 if (II->getIntrinsicID() == Intrinsic::assume) 3066 AC->registerAssumption(II); 3067 3068 // End if-block. 3069 if (IfPredicateInstr) 3070 PredicatedInstructions.push_back(std::make_pair(Cloned, Cmp)); 3071 } 3072 } 3073 VectorLoopValueMap.initScalar(Instr, Entry); 3074 } 3075 3076 PHINode *InnerLoopVectorizer::createInductionVariable(Loop *L, Value *Start, 3077 Value *End, Value *Step, 3078 Instruction *DL) { 3079 BasicBlock *Header = L->getHeader(); 3080 BasicBlock *Latch = L->getLoopLatch(); 3081 // As we're just creating this loop, it's possible no latch exists 3082 // yet. If so, use the header as this will be a single block loop. 3083 if (!Latch) 3084 Latch = Header; 3085 3086 IRBuilder<> Builder(&*Header->getFirstInsertionPt()); 3087 Instruction *OldInst = getDebugLocFromInstOrOperands(OldInduction); 3088 setDebugLocFromInst(Builder, OldInst); 3089 auto *Induction = Builder.CreatePHI(Start->getType(), 2, "index"); 3090 3091 Builder.SetInsertPoint(Latch->getTerminator()); 3092 setDebugLocFromInst(Builder, OldInst); 3093 3094 // Create i+1 and fill the PHINode. 3095 Value *Next = Builder.CreateAdd(Induction, Step, "index.next"); 3096 Induction->addIncoming(Start, L->getLoopPreheader()); 3097 Induction->addIncoming(Next, Latch); 3098 // Create the compare. 3099 Value *ICmp = Builder.CreateICmpEQ(Next, End); 3100 Builder.CreateCondBr(ICmp, L->getExitBlock(), Header); 3101 3102 // Now we have two terminators. Remove the old one from the block. 3103 Latch->getTerminator()->eraseFromParent(); 3104 3105 return Induction; 3106 } 3107 3108 Value *InnerLoopVectorizer::getOrCreateTripCount(Loop *L) { 3109 if (TripCount) 3110 return TripCount; 3111 3112 IRBuilder<> Builder(L->getLoopPreheader()->getTerminator()); 3113 // Find the loop boundaries. 3114 ScalarEvolution *SE = PSE.getSE(); 3115 const SCEV *BackedgeTakenCount = PSE.getBackedgeTakenCount(); 3116 assert(BackedgeTakenCount != SE->getCouldNotCompute() && 3117 "Invalid loop count"); 3118 3119 Type *IdxTy = Legal->getWidestInductionType(); 3120 3121 // The exit count might have the type of i64 while the phi is i32. This can 3122 // happen if we have an induction variable that is sign extended before the 3123 // compare. The only way that we get a backedge taken count is that the 3124 // induction variable was signed and as such will not overflow. In such a case 3125 // truncation is legal. 3126 if (BackedgeTakenCount->getType()->getPrimitiveSizeInBits() > 3127 IdxTy->getPrimitiveSizeInBits()) 3128 BackedgeTakenCount = SE->getTruncateOrNoop(BackedgeTakenCount, IdxTy); 3129 BackedgeTakenCount = SE->getNoopOrZeroExtend(BackedgeTakenCount, IdxTy); 3130 3131 // Get the total trip count from the count by adding 1. 3132 const SCEV *ExitCount = SE->getAddExpr( 3133 BackedgeTakenCount, SE->getOne(BackedgeTakenCount->getType())); 3134 3135 const DataLayout &DL = L->getHeader()->getModule()->getDataLayout(); 3136 3137 // Expand the trip count and place the new instructions in the preheader. 3138 // Notice that the pre-header does not change, only the loop body. 3139 SCEVExpander Exp(*SE, DL, "induction"); 3140 3141 // Count holds the overall loop count (N). 3142 TripCount = Exp.expandCodeFor(ExitCount, ExitCount->getType(), 3143 L->getLoopPreheader()->getTerminator()); 3144 3145 if (TripCount->getType()->isPointerTy()) 3146 TripCount = 3147 CastInst::CreatePointerCast(TripCount, IdxTy, "exitcount.ptrcnt.to.int", 3148 L->getLoopPreheader()->getTerminator()); 3149 3150 return TripCount; 3151 } 3152 3153 Value *InnerLoopVectorizer::getOrCreateVectorTripCount(Loop *L) { 3154 if (VectorTripCount) 3155 return VectorTripCount; 3156 3157 Value *TC = getOrCreateTripCount(L); 3158 IRBuilder<> Builder(L->getLoopPreheader()->getTerminator()); 3159 3160 // Now we need to generate the expression for the part of the loop that the 3161 // vectorized body will execute. This is equal to N - (N % Step) if scalar 3162 // iterations are not required for correctness, or N - Step, otherwise. Step 3163 // is equal to the vectorization factor (number of SIMD elements) times the 3164 // unroll factor (number of SIMD instructions). 3165 Constant *Step = ConstantInt::get(TC->getType(), VF * UF); 3166 Value *R = Builder.CreateURem(TC, Step, "n.mod.vf"); 3167 3168 // If there is a non-reversed interleaved group that may speculatively access 3169 // memory out-of-bounds, we need to ensure that there will be at least one 3170 // iteration of the scalar epilogue loop. Thus, if the step evenly divides 3171 // the trip count, we set the remainder to be equal to the step. If the step 3172 // does not evenly divide the trip count, no adjustment is necessary since 3173 // there will already be scalar iterations. Note that the minimum iterations 3174 // check ensures that N >= Step. 3175 if (VF > 1 && Legal->requiresScalarEpilogue()) { 3176 auto *IsZero = Builder.CreateICmpEQ(R, ConstantInt::get(R->getType(), 0)); 3177 R = Builder.CreateSelect(IsZero, Step, R); 3178 } 3179 3180 VectorTripCount = Builder.CreateSub(TC, R, "n.vec"); 3181 3182 return VectorTripCount; 3183 } 3184 3185 void InnerLoopVectorizer::emitMinimumIterationCountCheck(Loop *L, 3186 BasicBlock *Bypass) { 3187 Value *Count = getOrCreateTripCount(L); 3188 BasicBlock *BB = L->getLoopPreheader(); 3189 IRBuilder<> Builder(BB->getTerminator()); 3190 3191 // Generate code to check that the loop's trip count that we computed by 3192 // adding one to the backedge-taken count will not overflow. 3193 Value *CheckMinIters = Builder.CreateICmpULT( 3194 Count, ConstantInt::get(Count->getType(), VF * UF), "min.iters.check"); 3195 3196 BasicBlock *NewBB = 3197 BB->splitBasicBlock(BB->getTerminator(), "min.iters.checked"); 3198 // Update dominator tree immediately if the generated block is a 3199 // LoopBypassBlock because SCEV expansions to generate loop bypass 3200 // checks may query it before the current function is finished. 3201 DT->addNewBlock(NewBB, BB); 3202 if (L->getParentLoop()) 3203 L->getParentLoop()->addBasicBlockToLoop(NewBB, *LI); 3204 ReplaceInstWithInst(BB->getTerminator(), 3205 BranchInst::Create(Bypass, NewBB, CheckMinIters)); 3206 LoopBypassBlocks.push_back(BB); 3207 } 3208 3209 void InnerLoopVectorizer::emitVectorLoopEnteredCheck(Loop *L, 3210 BasicBlock *Bypass) { 3211 Value *TC = getOrCreateVectorTripCount(L); 3212 BasicBlock *BB = L->getLoopPreheader(); 3213 IRBuilder<> Builder(BB->getTerminator()); 3214 3215 // Now, compare the new count to zero. If it is zero skip the vector loop and 3216 // jump to the scalar loop. 3217 Value *Cmp = Builder.CreateICmpEQ(TC, Constant::getNullValue(TC->getType()), 3218 "cmp.zero"); 3219 3220 // Generate code to check that the loop's trip count that we computed by 3221 // adding one to the backedge-taken count will not overflow. 3222 BasicBlock *NewBB = BB->splitBasicBlock(BB->getTerminator(), "vector.ph"); 3223 // Update dominator tree immediately if the generated block is a 3224 // LoopBypassBlock because SCEV expansions to generate loop bypass 3225 // checks may query it before the current function is finished. 3226 DT->addNewBlock(NewBB, BB); 3227 if (L->getParentLoop()) 3228 L->getParentLoop()->addBasicBlockToLoop(NewBB, *LI); 3229 ReplaceInstWithInst(BB->getTerminator(), 3230 BranchInst::Create(Bypass, NewBB, Cmp)); 3231 LoopBypassBlocks.push_back(BB); 3232 } 3233 3234 void InnerLoopVectorizer::emitSCEVChecks(Loop *L, BasicBlock *Bypass) { 3235 BasicBlock *BB = L->getLoopPreheader(); 3236 3237 // Generate the code to check that the SCEV assumptions that we made. 3238 // We want the new basic block to start at the first instruction in a 3239 // sequence of instructions that form a check. 3240 SCEVExpander Exp(*PSE.getSE(), Bypass->getModule()->getDataLayout(), 3241 "scev.check"); 3242 Value *SCEVCheck = 3243 Exp.expandCodeForPredicate(&PSE.getUnionPredicate(), BB->getTerminator()); 3244 3245 if (auto *C = dyn_cast<ConstantInt>(SCEVCheck)) 3246 if (C->isZero()) 3247 return; 3248 3249 // Create a new block containing the stride check. 3250 BB->setName("vector.scevcheck"); 3251 auto *NewBB = BB->splitBasicBlock(BB->getTerminator(), "vector.ph"); 3252 // Update dominator tree immediately if the generated block is a 3253 // LoopBypassBlock because SCEV expansions to generate loop bypass 3254 // checks may query it before the current function is finished. 3255 DT->addNewBlock(NewBB, BB); 3256 if (L->getParentLoop()) 3257 L->getParentLoop()->addBasicBlockToLoop(NewBB, *LI); 3258 ReplaceInstWithInst(BB->getTerminator(), 3259 BranchInst::Create(Bypass, NewBB, SCEVCheck)); 3260 LoopBypassBlocks.push_back(BB); 3261 AddedSafetyChecks = true; 3262 } 3263 3264 void InnerLoopVectorizer::emitMemRuntimeChecks(Loop *L, BasicBlock *Bypass) { 3265 BasicBlock *BB = L->getLoopPreheader(); 3266 3267 // Generate the code that checks in runtime if arrays overlap. We put the 3268 // checks into a separate block to make the more common case of few elements 3269 // faster. 3270 Instruction *FirstCheckInst; 3271 Instruction *MemRuntimeCheck; 3272 std::tie(FirstCheckInst, MemRuntimeCheck) = 3273 Legal->getLAI()->addRuntimeChecks(BB->getTerminator()); 3274 if (!MemRuntimeCheck) 3275 return; 3276 3277 // Create a new block containing the memory check. 3278 BB->setName("vector.memcheck"); 3279 auto *NewBB = BB->splitBasicBlock(BB->getTerminator(), "vector.ph"); 3280 // Update dominator tree immediately if the generated block is a 3281 // LoopBypassBlock because SCEV expansions to generate loop bypass 3282 // checks may query it before the current function is finished. 3283 DT->addNewBlock(NewBB, BB); 3284 if (L->getParentLoop()) 3285 L->getParentLoop()->addBasicBlockToLoop(NewBB, *LI); 3286 ReplaceInstWithInst(BB->getTerminator(), 3287 BranchInst::Create(Bypass, NewBB, MemRuntimeCheck)); 3288 LoopBypassBlocks.push_back(BB); 3289 AddedSafetyChecks = true; 3290 3291 // We currently don't use LoopVersioning for the actual loop cloning but we 3292 // still use it to add the noalias metadata. 3293 LVer = llvm::make_unique<LoopVersioning>(*Legal->getLAI(), OrigLoop, LI, DT, 3294 PSE.getSE()); 3295 LVer->prepareNoAliasMetadata(); 3296 } 3297 3298 void InnerLoopVectorizer::createEmptyLoop() { 3299 /* 3300 In this function we generate a new loop. The new loop will contain 3301 the vectorized instructions while the old loop will continue to run the 3302 scalar remainder. 3303 3304 [ ] <-- loop iteration number check. 3305 / | 3306 / v 3307 | [ ] <-- vector loop bypass (may consist of multiple blocks). 3308 | / | 3309 | / v 3310 || [ ] <-- vector pre header. 3311 |/ | 3312 | v 3313 | [ ] \ 3314 | [ ]_| <-- vector loop. 3315 | | 3316 | v 3317 | -[ ] <--- middle-block. 3318 | / | 3319 | / v 3320 -|- >[ ] <--- new preheader. 3321 | | 3322 | v 3323 | [ ] \ 3324 | [ ]_| <-- old scalar loop to handle remainder. 3325 \ | 3326 \ v 3327 >[ ] <-- exit block. 3328 ... 3329 */ 3330 3331 BasicBlock *OldBasicBlock = OrigLoop->getHeader(); 3332 BasicBlock *VectorPH = OrigLoop->getLoopPreheader(); 3333 BasicBlock *ExitBlock = OrigLoop->getExitBlock(); 3334 assert(VectorPH && "Invalid loop structure"); 3335 assert(ExitBlock && "Must have an exit block"); 3336 3337 // Some loops have a single integer induction variable, while other loops 3338 // don't. One example is c++ iterators that often have multiple pointer 3339 // induction variables. In the code below we also support a case where we 3340 // don't have a single induction variable. 3341 // 3342 // We try to obtain an induction variable from the original loop as hard 3343 // as possible. However if we don't find one that: 3344 // - is an integer 3345 // - counts from zero, stepping by one 3346 // - is the size of the widest induction variable type 3347 // then we create a new one. 3348 OldInduction = Legal->getInduction(); 3349 Type *IdxTy = Legal->getWidestInductionType(); 3350 3351 // Split the single block loop into the two loop structure described above. 3352 BasicBlock *VecBody = 3353 VectorPH->splitBasicBlock(VectorPH->getTerminator(), "vector.body"); 3354 BasicBlock *MiddleBlock = 3355 VecBody->splitBasicBlock(VecBody->getTerminator(), "middle.block"); 3356 BasicBlock *ScalarPH = 3357 MiddleBlock->splitBasicBlock(MiddleBlock->getTerminator(), "scalar.ph"); 3358 3359 // Create and register the new vector loop. 3360 Loop *Lp = new Loop(); 3361 Loop *ParentLoop = OrigLoop->getParentLoop(); 3362 3363 // Insert the new loop into the loop nest and register the new basic blocks 3364 // before calling any utilities such as SCEV that require valid LoopInfo. 3365 if (ParentLoop) { 3366 ParentLoop->addChildLoop(Lp); 3367 ParentLoop->addBasicBlockToLoop(ScalarPH, *LI); 3368 ParentLoop->addBasicBlockToLoop(MiddleBlock, *LI); 3369 } else { 3370 LI->addTopLevelLoop(Lp); 3371 } 3372 Lp->addBasicBlockToLoop(VecBody, *LI); 3373 3374 // Find the loop boundaries. 3375 Value *Count = getOrCreateTripCount(Lp); 3376 3377 Value *StartIdx = ConstantInt::get(IdxTy, 0); 3378 3379 // We need to test whether the backedge-taken count is uint##_max. Adding one 3380 // to it will cause overflow and an incorrect loop trip count in the vector 3381 // body. In case of overflow we want to directly jump to the scalar remainder 3382 // loop. 3383 emitMinimumIterationCountCheck(Lp, ScalarPH); 3384 // Now, compare the new count to zero. If it is zero skip the vector loop and 3385 // jump to the scalar loop. 3386 emitVectorLoopEnteredCheck(Lp, ScalarPH); 3387 // Generate the code to check any assumptions that we've made for SCEV 3388 // expressions. 3389 emitSCEVChecks(Lp, ScalarPH); 3390 3391 // Generate the code that checks in runtime if arrays overlap. We put the 3392 // checks into a separate block to make the more common case of few elements 3393 // faster. 3394 emitMemRuntimeChecks(Lp, ScalarPH); 3395 3396 // Generate the induction variable. 3397 // The loop step is equal to the vectorization factor (num of SIMD elements) 3398 // times the unroll factor (num of SIMD instructions). 3399 Value *CountRoundDown = getOrCreateVectorTripCount(Lp); 3400 Constant *Step = ConstantInt::get(IdxTy, VF * UF); 3401 Induction = 3402 createInductionVariable(Lp, StartIdx, CountRoundDown, Step, 3403 getDebugLocFromInstOrOperands(OldInduction)); 3404 3405 // We are going to resume the execution of the scalar loop. 3406 // Go over all of the induction variables that we found and fix the 3407 // PHIs that are left in the scalar version of the loop. 3408 // The starting values of PHI nodes depend on the counter of the last 3409 // iteration in the vectorized loop. 3410 // If we come from a bypass edge then we need to start from the original 3411 // start value. 3412 3413 // This variable saves the new starting index for the scalar loop. It is used 3414 // to test if there are any tail iterations left once the vector loop has 3415 // completed. 3416 LoopVectorizationLegality::InductionList *List = Legal->getInductionVars(); 3417 for (auto &InductionEntry : *List) { 3418 PHINode *OrigPhi = InductionEntry.first; 3419 InductionDescriptor II = InductionEntry.second; 3420 3421 // Create phi nodes to merge from the backedge-taken check block. 3422 PHINode *BCResumeVal = PHINode::Create( 3423 OrigPhi->getType(), 3, "bc.resume.val", ScalarPH->getTerminator()); 3424 Value *&EndValue = IVEndValues[OrigPhi]; 3425 if (OrigPhi == OldInduction) { 3426 // We know what the end value is. 3427 EndValue = CountRoundDown; 3428 } else { 3429 IRBuilder<> B(LoopBypassBlocks.back()->getTerminator()); 3430 Type *StepType = II.getStep()->getType(); 3431 Instruction::CastOps CastOp = 3432 CastInst::getCastOpcode(CountRoundDown, true, StepType, true); 3433 Value *CRD = B.CreateCast(CastOp, CountRoundDown, StepType, "cast.crd"); 3434 const DataLayout &DL = OrigLoop->getHeader()->getModule()->getDataLayout(); 3435 EndValue = II.transform(B, CRD, PSE.getSE(), DL); 3436 EndValue->setName("ind.end"); 3437 } 3438 3439 // The new PHI merges the original incoming value, in case of a bypass, 3440 // or the value at the end of the vectorized loop. 3441 BCResumeVal->addIncoming(EndValue, MiddleBlock); 3442 3443 // Fix the scalar body counter (PHI node). 3444 unsigned BlockIdx = OrigPhi->getBasicBlockIndex(ScalarPH); 3445 3446 // The old induction's phi node in the scalar body needs the truncated 3447 // value. 3448 for (BasicBlock *BB : LoopBypassBlocks) 3449 BCResumeVal->addIncoming(II.getStartValue(), BB); 3450 OrigPhi->setIncomingValue(BlockIdx, BCResumeVal); 3451 } 3452 3453 // Add a check in the middle block to see if we have completed 3454 // all of the iterations in the first vector loop. 3455 // If (N - N%VF) == N, then we *don't* need to run the remainder. 3456 Value *CmpN = 3457 CmpInst::Create(Instruction::ICmp, CmpInst::ICMP_EQ, Count, 3458 CountRoundDown, "cmp.n", MiddleBlock->getTerminator()); 3459 ReplaceInstWithInst(MiddleBlock->getTerminator(), 3460 BranchInst::Create(ExitBlock, ScalarPH, CmpN)); 3461 3462 // Get ready to start creating new instructions into the vectorized body. 3463 Builder.SetInsertPoint(&*VecBody->getFirstInsertionPt()); 3464 3465 // Save the state. 3466 LoopVectorPreHeader = Lp->getLoopPreheader(); 3467 LoopScalarPreHeader = ScalarPH; 3468 LoopMiddleBlock = MiddleBlock; 3469 LoopExitBlock = ExitBlock; 3470 LoopVectorBody = VecBody; 3471 LoopScalarBody = OldBasicBlock; 3472 3473 // Keep all loop hints from the original loop on the vector loop (we'll 3474 // replace the vectorizer-specific hints below). 3475 if (MDNode *LID = OrigLoop->getLoopID()) 3476 Lp->setLoopID(LID); 3477 3478 LoopVectorizeHints Hints(Lp, true, *ORE); 3479 Hints.setAlreadyVectorized(); 3480 } 3481 3482 // Fix up external users of the induction variable. At this point, we are 3483 // in LCSSA form, with all external PHIs that use the IV having one input value, 3484 // coming from the remainder loop. We need those PHIs to also have a correct 3485 // value for the IV when arriving directly from the middle block. 3486 void InnerLoopVectorizer::fixupIVUsers(PHINode *OrigPhi, 3487 const InductionDescriptor &II, 3488 Value *CountRoundDown, Value *EndValue, 3489 BasicBlock *MiddleBlock) { 3490 // There are two kinds of external IV usages - those that use the value 3491 // computed in the last iteration (the PHI) and those that use the penultimate 3492 // value (the value that feeds into the phi from the loop latch). 3493 // We allow both, but they, obviously, have different values. 3494 3495 assert(OrigLoop->getExitBlock() && "Expected a single exit block"); 3496 3497 DenseMap<Value *, Value *> MissingVals; 3498 3499 // An external user of the last iteration's value should see the value that 3500 // the remainder loop uses to initialize its own IV. 3501 Value *PostInc = OrigPhi->getIncomingValueForBlock(OrigLoop->getLoopLatch()); 3502 for (User *U : PostInc->users()) { 3503 Instruction *UI = cast<Instruction>(U); 3504 if (!OrigLoop->contains(UI)) { 3505 assert(isa<PHINode>(UI) && "Expected LCSSA form"); 3506 MissingVals[UI] = EndValue; 3507 } 3508 } 3509 3510 // An external user of the penultimate value need to see EndValue - Step. 3511 // The simplest way to get this is to recompute it from the constituent SCEVs, 3512 // that is Start + (Step * (CRD - 1)). 3513 for (User *U : OrigPhi->users()) { 3514 auto *UI = cast<Instruction>(U); 3515 if (!OrigLoop->contains(UI)) { 3516 const DataLayout &DL = 3517 OrigLoop->getHeader()->getModule()->getDataLayout(); 3518 assert(isa<PHINode>(UI) && "Expected LCSSA form"); 3519 3520 IRBuilder<> B(MiddleBlock->getTerminator()); 3521 Value *CountMinusOne = B.CreateSub( 3522 CountRoundDown, ConstantInt::get(CountRoundDown->getType(), 1)); 3523 Value *CMO = B.CreateSExtOrTrunc(CountMinusOne, II.getStep()->getType(), 3524 "cast.cmo"); 3525 Value *Escape = II.transform(B, CMO, PSE.getSE(), DL); 3526 Escape->setName("ind.escape"); 3527 MissingVals[UI] = Escape; 3528 } 3529 } 3530 3531 for (auto &I : MissingVals) { 3532 PHINode *PHI = cast<PHINode>(I.first); 3533 // One corner case we have to handle is two IVs "chasing" each-other, 3534 // that is %IV2 = phi [...], [ %IV1, %latch ] 3535 // In this case, if IV1 has an external use, we need to avoid adding both 3536 // "last value of IV1" and "penultimate value of IV2". So, verify that we 3537 // don't already have an incoming value for the middle block. 3538 if (PHI->getBasicBlockIndex(MiddleBlock) == -1) 3539 PHI->addIncoming(I.second, MiddleBlock); 3540 } 3541 } 3542 3543 namespace { 3544 struct CSEDenseMapInfo { 3545 static bool canHandle(Instruction *I) { 3546 return isa<InsertElementInst>(I) || isa<ExtractElementInst>(I) || 3547 isa<ShuffleVectorInst>(I) || isa<GetElementPtrInst>(I); 3548 } 3549 static inline Instruction *getEmptyKey() { 3550 return DenseMapInfo<Instruction *>::getEmptyKey(); 3551 } 3552 static inline Instruction *getTombstoneKey() { 3553 return DenseMapInfo<Instruction *>::getTombstoneKey(); 3554 } 3555 static unsigned getHashValue(Instruction *I) { 3556 assert(canHandle(I) && "Unknown instruction!"); 3557 return hash_combine(I->getOpcode(), hash_combine_range(I->value_op_begin(), 3558 I->value_op_end())); 3559 } 3560 static bool isEqual(Instruction *LHS, Instruction *RHS) { 3561 if (LHS == getEmptyKey() || RHS == getEmptyKey() || 3562 LHS == getTombstoneKey() || RHS == getTombstoneKey()) 3563 return LHS == RHS; 3564 return LHS->isIdenticalTo(RHS); 3565 } 3566 }; 3567 } 3568 3569 ///\brief Perform cse of induction variable instructions. 3570 static void cse(BasicBlock *BB) { 3571 // Perform simple cse. 3572 SmallDenseMap<Instruction *, Instruction *, 4, CSEDenseMapInfo> CSEMap; 3573 for (BasicBlock::iterator I = BB->begin(), E = BB->end(); I != E;) { 3574 Instruction *In = &*I++; 3575 3576 if (!CSEDenseMapInfo::canHandle(In)) 3577 continue; 3578 3579 // Check if we can replace this instruction with any of the 3580 // visited instructions. 3581 if (Instruction *V = CSEMap.lookup(In)) { 3582 In->replaceAllUsesWith(V); 3583 In->eraseFromParent(); 3584 continue; 3585 } 3586 3587 CSEMap[In] = In; 3588 } 3589 } 3590 3591 /// \brief Adds a 'fast' flag to floating point operations. 3592 static Value *addFastMathFlag(Value *V) { 3593 if (isa<FPMathOperator>(V)) { 3594 FastMathFlags Flags; 3595 Flags.setUnsafeAlgebra(); 3596 cast<Instruction>(V)->setFastMathFlags(Flags); 3597 } 3598 return V; 3599 } 3600 3601 /// \brief Estimate the overhead of scalarizing an Instruction based on the 3602 /// types of its operands and return value. 3603 static unsigned getScalarizationOverhead(SmallVectorImpl<Type *> &OpTys, 3604 Type *RetTy, 3605 const TargetTransformInfo &TTI) { 3606 unsigned ScalarizationCost = 0; 3607 3608 if (!RetTy->isVoidTy()) 3609 ScalarizationCost += TTI.getScalarizationOverhead(RetTy, true, false); 3610 3611 for (Type *Ty : OpTys) 3612 ScalarizationCost += TTI.getScalarizationOverhead(Ty, false, true); 3613 3614 return ScalarizationCost; 3615 } 3616 3617 /// \brief Estimate the overhead of scalarizing an instruction. This is a 3618 /// convenience wrapper for the type-based getScalarizationOverhead API. 3619 static unsigned getScalarizationOverhead(Instruction *I, unsigned VF, 3620 const TargetTransformInfo &TTI) { 3621 if (VF == 1) 3622 return 0; 3623 3624 unsigned Cost = 0; 3625 Type *RetTy = ToVectorTy(I->getType(), VF); 3626 if (!RetTy->isVoidTy()) 3627 Cost += TTI.getScalarizationOverhead(RetTy, true, false); 3628 3629 SmallVector<const Value *, 4> Operands(I->operand_values()); 3630 Cost += TTI.getOperandsScalarizationOverhead(Operands, VF); 3631 3632 return Cost; 3633 } 3634 3635 // Estimate cost of a call instruction CI if it were vectorized with factor VF. 3636 // Return the cost of the instruction, including scalarization overhead if it's 3637 // needed. The flag NeedToScalarize shows if the call needs to be scalarized - 3638 // i.e. either vector version isn't available, or is too expensive. 3639 static unsigned getVectorCallCost(CallInst *CI, unsigned VF, 3640 const TargetTransformInfo &TTI, 3641 const TargetLibraryInfo *TLI, 3642 bool &NeedToScalarize) { 3643 Function *F = CI->getCalledFunction(); 3644 StringRef FnName = CI->getCalledFunction()->getName(); 3645 Type *ScalarRetTy = CI->getType(); 3646 SmallVector<Type *, 4> Tys, ScalarTys; 3647 for (auto &ArgOp : CI->arg_operands()) 3648 ScalarTys.push_back(ArgOp->getType()); 3649 3650 // Estimate cost of scalarized vector call. The source operands are assumed 3651 // to be vectors, so we need to extract individual elements from there, 3652 // execute VF scalar calls, and then gather the result into the vector return 3653 // value. 3654 unsigned ScalarCallCost = TTI.getCallInstrCost(F, ScalarRetTy, ScalarTys); 3655 if (VF == 1) 3656 return ScalarCallCost; 3657 3658 // Compute corresponding vector type for return value and arguments. 3659 Type *RetTy = ToVectorTy(ScalarRetTy, VF); 3660 for (Type *ScalarTy : ScalarTys) 3661 Tys.push_back(ToVectorTy(ScalarTy, VF)); 3662 3663 // Compute costs of unpacking argument values for the scalar calls and 3664 // packing the return values to a vector. 3665 unsigned ScalarizationCost = getScalarizationOverhead(Tys, RetTy, TTI); 3666 3667 unsigned Cost = ScalarCallCost * VF + ScalarizationCost; 3668 3669 // If we can't emit a vector call for this function, then the currently found 3670 // cost is the cost we need to return. 3671 NeedToScalarize = true; 3672 if (!TLI || !TLI->isFunctionVectorizable(FnName, VF) || CI->isNoBuiltin()) 3673 return Cost; 3674 3675 // If the corresponding vector cost is cheaper, return its cost. 3676 unsigned VectorCallCost = TTI.getCallInstrCost(nullptr, RetTy, Tys); 3677 if (VectorCallCost < Cost) { 3678 NeedToScalarize = false; 3679 return VectorCallCost; 3680 } 3681 return Cost; 3682 } 3683 3684 // Estimate cost of an intrinsic call instruction CI if it were vectorized with 3685 // factor VF. Return the cost of the instruction, including scalarization 3686 // overhead if it's needed. 3687 static unsigned getVectorIntrinsicCost(CallInst *CI, unsigned VF, 3688 const TargetTransformInfo &TTI, 3689 const TargetLibraryInfo *TLI) { 3690 Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI); 3691 assert(ID && "Expected intrinsic call!"); 3692 3693 Type *RetTy = ToVectorTy(CI->getType(), VF); 3694 SmallVector<Type *, 4> Tys; 3695 for (Value *ArgOperand : CI->arg_operands()) 3696 Tys.push_back(ToVectorTy(ArgOperand->getType(), VF)); 3697 3698 FastMathFlags FMF; 3699 if (auto *FPMO = dyn_cast<FPMathOperator>(CI)) 3700 FMF = FPMO->getFastMathFlags(); 3701 3702 return TTI.getIntrinsicInstrCost(ID, RetTy, Tys, FMF); 3703 } 3704 3705 static Type *smallestIntegerVectorType(Type *T1, Type *T2) { 3706 auto *I1 = cast<IntegerType>(T1->getVectorElementType()); 3707 auto *I2 = cast<IntegerType>(T2->getVectorElementType()); 3708 return I1->getBitWidth() < I2->getBitWidth() ? T1 : T2; 3709 } 3710 static Type *largestIntegerVectorType(Type *T1, Type *T2) { 3711 auto *I1 = cast<IntegerType>(T1->getVectorElementType()); 3712 auto *I2 = cast<IntegerType>(T2->getVectorElementType()); 3713 return I1->getBitWidth() > I2->getBitWidth() ? T1 : T2; 3714 } 3715 3716 void InnerLoopVectorizer::truncateToMinimalBitwidths() { 3717 // For every instruction `I` in MinBWs, truncate the operands, create a 3718 // truncated version of `I` and reextend its result. InstCombine runs 3719 // later and will remove any ext/trunc pairs. 3720 // 3721 SmallPtrSet<Value *, 4> Erased; 3722 for (const auto &KV : Cost->getMinimalBitwidths()) { 3723 // If the value wasn't vectorized, we must maintain the original scalar 3724 // type. The absence of the value from VectorLoopValueMap indicates that it 3725 // wasn't vectorized. 3726 if (!VectorLoopValueMap.hasVector(KV.first)) 3727 continue; 3728 VectorParts &Parts = VectorLoopValueMap.getVector(KV.first); 3729 for (Value *&I : Parts) { 3730 if (Erased.count(I) || I->use_empty() || !isa<Instruction>(I)) 3731 continue; 3732 Type *OriginalTy = I->getType(); 3733 Type *ScalarTruncatedTy = 3734 IntegerType::get(OriginalTy->getContext(), KV.second); 3735 Type *TruncatedTy = VectorType::get(ScalarTruncatedTy, 3736 OriginalTy->getVectorNumElements()); 3737 if (TruncatedTy == OriginalTy) 3738 continue; 3739 3740 IRBuilder<> B(cast<Instruction>(I)); 3741 auto ShrinkOperand = [&](Value *V) -> Value * { 3742 if (auto *ZI = dyn_cast<ZExtInst>(V)) 3743 if (ZI->getSrcTy() == TruncatedTy) 3744 return ZI->getOperand(0); 3745 return B.CreateZExtOrTrunc(V, TruncatedTy); 3746 }; 3747 3748 // The actual instruction modification depends on the instruction type, 3749 // unfortunately. 3750 Value *NewI = nullptr; 3751 if (auto *BO = dyn_cast<BinaryOperator>(I)) { 3752 NewI = B.CreateBinOp(BO->getOpcode(), ShrinkOperand(BO->getOperand(0)), 3753 ShrinkOperand(BO->getOperand(1))); 3754 cast<BinaryOperator>(NewI)->copyIRFlags(I); 3755 } else if (auto *CI = dyn_cast<ICmpInst>(I)) { 3756 NewI = 3757 B.CreateICmp(CI->getPredicate(), ShrinkOperand(CI->getOperand(0)), 3758 ShrinkOperand(CI->getOperand(1))); 3759 } else if (auto *SI = dyn_cast<SelectInst>(I)) { 3760 NewI = B.CreateSelect(SI->getCondition(), 3761 ShrinkOperand(SI->getTrueValue()), 3762 ShrinkOperand(SI->getFalseValue())); 3763 } else if (auto *CI = dyn_cast<CastInst>(I)) { 3764 switch (CI->getOpcode()) { 3765 default: 3766 llvm_unreachable("Unhandled cast!"); 3767 case Instruction::Trunc: 3768 NewI = ShrinkOperand(CI->getOperand(0)); 3769 break; 3770 case Instruction::SExt: 3771 NewI = B.CreateSExtOrTrunc( 3772 CI->getOperand(0), 3773 smallestIntegerVectorType(OriginalTy, TruncatedTy)); 3774 break; 3775 case Instruction::ZExt: 3776 NewI = B.CreateZExtOrTrunc( 3777 CI->getOperand(0), 3778 smallestIntegerVectorType(OriginalTy, TruncatedTy)); 3779 break; 3780 } 3781 } else if (auto *SI = dyn_cast<ShuffleVectorInst>(I)) { 3782 auto Elements0 = SI->getOperand(0)->getType()->getVectorNumElements(); 3783 auto *O0 = B.CreateZExtOrTrunc( 3784 SI->getOperand(0), VectorType::get(ScalarTruncatedTy, Elements0)); 3785 auto Elements1 = SI->getOperand(1)->getType()->getVectorNumElements(); 3786 auto *O1 = B.CreateZExtOrTrunc( 3787 SI->getOperand(1), VectorType::get(ScalarTruncatedTy, Elements1)); 3788 3789 NewI = B.CreateShuffleVector(O0, O1, SI->getMask()); 3790 } else if (isa<LoadInst>(I)) { 3791 // Don't do anything with the operands, just extend the result. 3792 continue; 3793 } else if (auto *IE = dyn_cast<InsertElementInst>(I)) { 3794 auto Elements = IE->getOperand(0)->getType()->getVectorNumElements(); 3795 auto *O0 = B.CreateZExtOrTrunc( 3796 IE->getOperand(0), VectorType::get(ScalarTruncatedTy, Elements)); 3797 auto *O1 = B.CreateZExtOrTrunc(IE->getOperand(1), ScalarTruncatedTy); 3798 NewI = B.CreateInsertElement(O0, O1, IE->getOperand(2)); 3799 } else if (auto *EE = dyn_cast<ExtractElementInst>(I)) { 3800 auto Elements = EE->getOperand(0)->getType()->getVectorNumElements(); 3801 auto *O0 = B.CreateZExtOrTrunc( 3802 EE->getOperand(0), VectorType::get(ScalarTruncatedTy, Elements)); 3803 NewI = B.CreateExtractElement(O0, EE->getOperand(2)); 3804 } else { 3805 llvm_unreachable("Unhandled instruction type!"); 3806 } 3807 3808 // Lastly, extend the result. 3809 NewI->takeName(cast<Instruction>(I)); 3810 Value *Res = B.CreateZExtOrTrunc(NewI, OriginalTy); 3811 I->replaceAllUsesWith(Res); 3812 cast<Instruction>(I)->eraseFromParent(); 3813 Erased.insert(I); 3814 I = Res; 3815 } 3816 } 3817 3818 // We'll have created a bunch of ZExts that are now parentless. Clean up. 3819 for (const auto &KV : Cost->getMinimalBitwidths()) { 3820 // If the value wasn't vectorized, we must maintain the original scalar 3821 // type. The absence of the value from VectorLoopValueMap indicates that it 3822 // wasn't vectorized. 3823 if (!VectorLoopValueMap.hasVector(KV.first)) 3824 continue; 3825 VectorParts &Parts = VectorLoopValueMap.getVector(KV.first); 3826 for (Value *&I : Parts) { 3827 ZExtInst *Inst = dyn_cast<ZExtInst>(I); 3828 if (Inst && Inst->use_empty()) { 3829 Value *NewI = Inst->getOperand(0); 3830 Inst->eraseFromParent(); 3831 I = NewI; 3832 } 3833 } 3834 } 3835 } 3836 3837 void InnerLoopVectorizer::vectorizeLoop() { 3838 //===------------------------------------------------===// 3839 // 3840 // Notice: any optimization or new instruction that go 3841 // into the code below should be also be implemented in 3842 // the cost-model. 3843 // 3844 //===------------------------------------------------===// 3845 Constant *Zero = Builder.getInt32(0); 3846 3847 // In order to support recurrences we need to be able to vectorize Phi nodes. 3848 // Phi nodes have cycles, so we need to vectorize them in two stages. First, 3849 // we create a new vector PHI node with no incoming edges. We use this value 3850 // when we vectorize all of the instructions that use the PHI. Next, after 3851 // all of the instructions in the block are complete we add the new incoming 3852 // edges to the PHI. At this point all of the instructions in the basic block 3853 // are vectorized, so we can use them to construct the PHI. 3854 PhiVector PHIsToFix; 3855 3856 // Collect instructions from the original loop that will become trivially 3857 // dead in the vectorized loop. We don't need to vectorize these 3858 // instructions. 3859 collectTriviallyDeadInstructions(); 3860 3861 // Scan the loop in a topological order to ensure that defs are vectorized 3862 // before users. 3863 LoopBlocksDFS DFS(OrigLoop); 3864 DFS.perform(LI); 3865 3866 // Vectorize all of the blocks in the original loop. 3867 for (BasicBlock *BB : make_range(DFS.beginRPO(), DFS.endRPO())) 3868 vectorizeBlockInLoop(BB, &PHIsToFix); 3869 3870 // Insert truncates and extends for any truncated instructions as hints to 3871 // InstCombine. 3872 if (VF > 1) 3873 truncateToMinimalBitwidths(); 3874 3875 // At this point every instruction in the original loop is widened to a 3876 // vector form. Now we need to fix the recurrences in PHIsToFix. These PHI 3877 // nodes are currently empty because we did not want to introduce cycles. 3878 // This is the second stage of vectorizing recurrences. 3879 for (PHINode *Phi : PHIsToFix) { 3880 assert(Phi && "Unable to recover vectorized PHI"); 3881 3882 // Handle first-order recurrences that need to be fixed. 3883 if (Legal->isFirstOrderRecurrence(Phi)) { 3884 fixFirstOrderRecurrence(Phi); 3885 continue; 3886 } 3887 3888 // If the phi node is not a first-order recurrence, it must be a reduction. 3889 // Get it's reduction variable descriptor. 3890 assert(Legal->isReductionVariable(Phi) && 3891 "Unable to find the reduction variable"); 3892 RecurrenceDescriptor RdxDesc = (*Legal->getReductionVars())[Phi]; 3893 3894 RecurrenceDescriptor::RecurrenceKind RK = RdxDesc.getRecurrenceKind(); 3895 TrackingVH<Value> ReductionStartValue = RdxDesc.getRecurrenceStartValue(); 3896 Instruction *LoopExitInst = RdxDesc.getLoopExitInstr(); 3897 RecurrenceDescriptor::MinMaxRecurrenceKind MinMaxKind = 3898 RdxDesc.getMinMaxRecurrenceKind(); 3899 setDebugLocFromInst(Builder, ReductionStartValue); 3900 3901 // We need to generate a reduction vector from the incoming scalar. 3902 // To do so, we need to generate the 'identity' vector and override 3903 // one of the elements with the incoming scalar reduction. We need 3904 // to do it in the vector-loop preheader. 3905 Builder.SetInsertPoint(LoopBypassBlocks[1]->getTerminator()); 3906 3907 // This is the vector-clone of the value that leaves the loop. 3908 const VectorParts &VectorExit = getVectorValue(LoopExitInst); 3909 Type *VecTy = VectorExit[0]->getType(); 3910 3911 // Find the reduction identity variable. Zero for addition, or, xor, 3912 // one for multiplication, -1 for And. 3913 Value *Identity; 3914 Value *VectorStart; 3915 if (RK == RecurrenceDescriptor::RK_IntegerMinMax || 3916 RK == RecurrenceDescriptor::RK_FloatMinMax) { 3917 // MinMax reduction have the start value as their identify. 3918 if (VF == 1) { 3919 VectorStart = Identity = ReductionStartValue; 3920 } else { 3921 VectorStart = Identity = 3922 Builder.CreateVectorSplat(VF, ReductionStartValue, "minmax.ident"); 3923 } 3924 } else { 3925 // Handle other reduction kinds: 3926 Constant *Iden = RecurrenceDescriptor::getRecurrenceIdentity( 3927 RK, VecTy->getScalarType()); 3928 if (VF == 1) { 3929 Identity = Iden; 3930 // This vector is the Identity vector where the first element is the 3931 // incoming scalar reduction. 3932 VectorStart = ReductionStartValue; 3933 } else { 3934 Identity = ConstantVector::getSplat(VF, Iden); 3935 3936 // This vector is the Identity vector where the first element is the 3937 // incoming scalar reduction. 3938 VectorStart = 3939 Builder.CreateInsertElement(Identity, ReductionStartValue, Zero); 3940 } 3941 } 3942 3943 // Fix the vector-loop phi. 3944 3945 // Reductions do not have to start at zero. They can start with 3946 // any loop invariant values. 3947 const VectorParts &VecRdxPhi = getVectorValue(Phi); 3948 BasicBlock *Latch = OrigLoop->getLoopLatch(); 3949 Value *LoopVal = Phi->getIncomingValueForBlock(Latch); 3950 const VectorParts &Val = getVectorValue(LoopVal); 3951 for (unsigned part = 0; part < UF; ++part) { 3952 // Make sure to add the reduction stat value only to the 3953 // first unroll part. 3954 Value *StartVal = (part == 0) ? VectorStart : Identity; 3955 cast<PHINode>(VecRdxPhi[part]) 3956 ->addIncoming(StartVal, LoopVectorPreHeader); 3957 cast<PHINode>(VecRdxPhi[part]) 3958 ->addIncoming(Val[part], LoopVectorBody); 3959 } 3960 3961 // Before each round, move the insertion point right between 3962 // the PHIs and the values we are going to write. 3963 // This allows us to write both PHINodes and the extractelement 3964 // instructions. 3965 Builder.SetInsertPoint(&*LoopMiddleBlock->getFirstInsertionPt()); 3966 3967 VectorParts &RdxParts = VectorLoopValueMap.getVector(LoopExitInst); 3968 setDebugLocFromInst(Builder, LoopExitInst); 3969 3970 // If the vector reduction can be performed in a smaller type, we truncate 3971 // then extend the loop exit value to enable InstCombine to evaluate the 3972 // entire expression in the smaller type. 3973 if (VF > 1 && Phi->getType() != RdxDesc.getRecurrenceType()) { 3974 Type *RdxVecTy = VectorType::get(RdxDesc.getRecurrenceType(), VF); 3975 Builder.SetInsertPoint(LoopVectorBody->getTerminator()); 3976 for (unsigned part = 0; part < UF; ++part) { 3977 Value *Trunc = Builder.CreateTrunc(RdxParts[part], RdxVecTy); 3978 Value *Extnd = RdxDesc.isSigned() ? Builder.CreateSExt(Trunc, VecTy) 3979 : Builder.CreateZExt(Trunc, VecTy); 3980 for (Value::user_iterator UI = RdxParts[part]->user_begin(); 3981 UI != RdxParts[part]->user_end();) 3982 if (*UI != Trunc) { 3983 (*UI++)->replaceUsesOfWith(RdxParts[part], Extnd); 3984 RdxParts[part] = Extnd; 3985 } else { 3986 ++UI; 3987 } 3988 } 3989 Builder.SetInsertPoint(&*LoopMiddleBlock->getFirstInsertionPt()); 3990 for (unsigned part = 0; part < UF; ++part) 3991 RdxParts[part] = Builder.CreateTrunc(RdxParts[part], RdxVecTy); 3992 } 3993 3994 // Reduce all of the unrolled parts into a single vector. 3995 Value *ReducedPartRdx = RdxParts[0]; 3996 unsigned Op = RecurrenceDescriptor::getRecurrenceBinOp(RK); 3997 setDebugLocFromInst(Builder, ReducedPartRdx); 3998 for (unsigned part = 1; part < UF; ++part) { 3999 if (Op != Instruction::ICmp && Op != Instruction::FCmp) 4000 // Floating point operations had to be 'fast' to enable the reduction. 4001 ReducedPartRdx = addFastMathFlag( 4002 Builder.CreateBinOp((Instruction::BinaryOps)Op, RdxParts[part], 4003 ReducedPartRdx, "bin.rdx")); 4004 else 4005 ReducedPartRdx = RecurrenceDescriptor::createMinMaxOp( 4006 Builder, MinMaxKind, ReducedPartRdx, RdxParts[part]); 4007 } 4008 4009 if (VF > 1) { 4010 // VF is a power of 2 so we can emit the reduction using log2(VF) shuffles 4011 // and vector ops, reducing the set of values being computed by half each 4012 // round. 4013 assert(isPowerOf2_32(VF) && 4014 "Reduction emission only supported for pow2 vectors!"); 4015 Value *TmpVec = ReducedPartRdx; 4016 SmallVector<Constant *, 32> ShuffleMask(VF, nullptr); 4017 for (unsigned i = VF; i != 1; i >>= 1) { 4018 // Move the upper half of the vector to the lower half. 4019 for (unsigned j = 0; j != i / 2; ++j) 4020 ShuffleMask[j] = Builder.getInt32(i / 2 + j); 4021 4022 // Fill the rest of the mask with undef. 4023 std::fill(&ShuffleMask[i / 2], ShuffleMask.end(), 4024 UndefValue::get(Builder.getInt32Ty())); 4025 4026 Value *Shuf = Builder.CreateShuffleVector( 4027 TmpVec, UndefValue::get(TmpVec->getType()), 4028 ConstantVector::get(ShuffleMask), "rdx.shuf"); 4029 4030 if (Op != Instruction::ICmp && Op != Instruction::FCmp) 4031 // Floating point operations had to be 'fast' to enable the reduction. 4032 TmpVec = addFastMathFlag(Builder.CreateBinOp( 4033 (Instruction::BinaryOps)Op, TmpVec, Shuf, "bin.rdx")); 4034 else 4035 TmpVec = RecurrenceDescriptor::createMinMaxOp(Builder, MinMaxKind, 4036 TmpVec, Shuf); 4037 } 4038 4039 // The result is in the first element of the vector. 4040 ReducedPartRdx = 4041 Builder.CreateExtractElement(TmpVec, Builder.getInt32(0)); 4042 4043 // If the reduction can be performed in a smaller type, we need to extend 4044 // the reduction to the wider type before we branch to the original loop. 4045 if (Phi->getType() != RdxDesc.getRecurrenceType()) 4046 ReducedPartRdx = 4047 RdxDesc.isSigned() 4048 ? Builder.CreateSExt(ReducedPartRdx, Phi->getType()) 4049 : Builder.CreateZExt(ReducedPartRdx, Phi->getType()); 4050 } 4051 4052 // Create a phi node that merges control-flow from the backedge-taken check 4053 // block and the middle block. 4054 PHINode *BCBlockPhi = PHINode::Create(Phi->getType(), 2, "bc.merge.rdx", 4055 LoopScalarPreHeader->getTerminator()); 4056 for (unsigned I = 0, E = LoopBypassBlocks.size(); I != E; ++I) 4057 BCBlockPhi->addIncoming(ReductionStartValue, LoopBypassBlocks[I]); 4058 BCBlockPhi->addIncoming(ReducedPartRdx, LoopMiddleBlock); 4059 4060 // Now, we need to fix the users of the reduction variable 4061 // inside and outside of the scalar remainder loop. 4062 // We know that the loop is in LCSSA form. We need to update the 4063 // PHI nodes in the exit blocks. 4064 for (BasicBlock::iterator LEI = LoopExitBlock->begin(), 4065 LEE = LoopExitBlock->end(); 4066 LEI != LEE; ++LEI) { 4067 PHINode *LCSSAPhi = dyn_cast<PHINode>(LEI); 4068 if (!LCSSAPhi) 4069 break; 4070 4071 // All PHINodes need to have a single entry edge, or two if 4072 // we already fixed them. 4073 assert(LCSSAPhi->getNumIncomingValues() < 3 && "Invalid LCSSA PHI"); 4074 4075 // We found a reduction value exit-PHI. Update it with the 4076 // incoming bypass edge. 4077 if (LCSSAPhi->getIncomingValue(0) == LoopExitInst) 4078 LCSSAPhi->addIncoming(ReducedPartRdx, LoopMiddleBlock); 4079 } // end of the LCSSA phi scan. 4080 4081 // Fix the scalar loop reduction variable with the incoming reduction sum 4082 // from the vector body and from the backedge value. 4083 int IncomingEdgeBlockIdx = 4084 Phi->getBasicBlockIndex(OrigLoop->getLoopLatch()); 4085 assert(IncomingEdgeBlockIdx >= 0 && "Invalid block index"); 4086 // Pick the other block. 4087 int SelfEdgeBlockIdx = (IncomingEdgeBlockIdx ? 0 : 1); 4088 Phi->setIncomingValue(SelfEdgeBlockIdx, BCBlockPhi); 4089 Phi->setIncomingValue(IncomingEdgeBlockIdx, LoopExitInst); 4090 } // end of for each Phi in PHIsToFix. 4091 4092 // Update the dominator tree. 4093 // 4094 // FIXME: After creating the structure of the new loop, the dominator tree is 4095 // no longer up-to-date, and it remains that way until we update it 4096 // here. An out-of-date dominator tree is problematic for SCEV, 4097 // because SCEVExpander uses it to guide code generation. The 4098 // vectorizer use SCEVExpanders in several places. Instead, we should 4099 // keep the dominator tree up-to-date as we go. 4100 updateAnalysis(); 4101 4102 // Fix-up external users of the induction variables. 4103 for (auto &Entry : *Legal->getInductionVars()) 4104 fixupIVUsers(Entry.first, Entry.second, 4105 getOrCreateVectorTripCount(LI->getLoopFor(LoopVectorBody)), 4106 IVEndValues[Entry.first], LoopMiddleBlock); 4107 4108 fixLCSSAPHIs(); 4109 predicateInstructions(); 4110 4111 // Remove redundant induction instructions. 4112 cse(LoopVectorBody); 4113 } 4114 4115 void InnerLoopVectorizer::fixFirstOrderRecurrence(PHINode *Phi) { 4116 4117 // This is the second phase of vectorizing first-order recurrences. An 4118 // overview of the transformation is described below. Suppose we have the 4119 // following loop. 4120 // 4121 // for (int i = 0; i < n; ++i) 4122 // b[i] = a[i] - a[i - 1]; 4123 // 4124 // There is a first-order recurrence on "a". For this loop, the shorthand 4125 // scalar IR looks like: 4126 // 4127 // scalar.ph: 4128 // s_init = a[-1] 4129 // br scalar.body 4130 // 4131 // scalar.body: 4132 // i = phi [0, scalar.ph], [i+1, scalar.body] 4133 // s1 = phi [s_init, scalar.ph], [s2, scalar.body] 4134 // s2 = a[i] 4135 // b[i] = s2 - s1 4136 // br cond, scalar.body, ... 4137 // 4138 // In this example, s1 is a recurrence because it's value depends on the 4139 // previous iteration. In the first phase of vectorization, we created a 4140 // temporary value for s1. We now complete the vectorization and produce the 4141 // shorthand vector IR shown below (for VF = 4, UF = 1). 4142 // 4143 // vector.ph: 4144 // v_init = vector(..., ..., ..., a[-1]) 4145 // br vector.body 4146 // 4147 // vector.body 4148 // i = phi [0, vector.ph], [i+4, vector.body] 4149 // v1 = phi [v_init, vector.ph], [v2, vector.body] 4150 // v2 = a[i, i+1, i+2, i+3]; 4151 // v3 = vector(v1(3), v2(0, 1, 2)) 4152 // b[i, i+1, i+2, i+3] = v2 - v3 4153 // br cond, vector.body, middle.block 4154 // 4155 // middle.block: 4156 // x = v2(3) 4157 // br scalar.ph 4158 // 4159 // scalar.ph: 4160 // s_init = phi [x, middle.block], [a[-1], otherwise] 4161 // br scalar.body 4162 // 4163 // After execution completes the vector loop, we extract the next value of 4164 // the recurrence (x) to use as the initial value in the scalar loop. 4165 4166 // Get the original loop preheader and single loop latch. 4167 auto *Preheader = OrigLoop->getLoopPreheader(); 4168 auto *Latch = OrigLoop->getLoopLatch(); 4169 4170 // Get the initial and previous values of the scalar recurrence. 4171 auto *ScalarInit = Phi->getIncomingValueForBlock(Preheader); 4172 auto *Previous = Phi->getIncomingValueForBlock(Latch); 4173 4174 // Create a vector from the initial value. 4175 auto *VectorInit = ScalarInit; 4176 if (VF > 1) { 4177 Builder.SetInsertPoint(LoopVectorPreHeader->getTerminator()); 4178 VectorInit = Builder.CreateInsertElement( 4179 UndefValue::get(VectorType::get(VectorInit->getType(), VF)), VectorInit, 4180 Builder.getInt32(VF - 1), "vector.recur.init"); 4181 } 4182 4183 // We constructed a temporary phi node in the first phase of vectorization. 4184 // This phi node will eventually be deleted. 4185 VectorParts &PhiParts = VectorLoopValueMap.getVector(Phi); 4186 Builder.SetInsertPoint(cast<Instruction>(PhiParts[0])); 4187 4188 // Create a phi node for the new recurrence. The current value will either be 4189 // the initial value inserted into a vector or loop-varying vector value. 4190 auto *VecPhi = Builder.CreatePHI(VectorInit->getType(), 2, "vector.recur"); 4191 VecPhi->addIncoming(VectorInit, LoopVectorPreHeader); 4192 4193 // Get the vectorized previous value. We ensured the previous values was an 4194 // instruction when detecting the recurrence. 4195 auto &PreviousParts = getVectorValue(Previous); 4196 4197 // Set the insertion point to be after this instruction. We ensured the 4198 // previous value dominated all uses of the phi when detecting the 4199 // recurrence. 4200 Builder.SetInsertPoint( 4201 &*++BasicBlock::iterator(cast<Instruction>(PreviousParts[UF - 1]))); 4202 4203 // We will construct a vector for the recurrence by combining the values for 4204 // the current and previous iterations. This is the required shuffle mask. 4205 SmallVector<Constant *, 8> ShuffleMask(VF); 4206 ShuffleMask[0] = Builder.getInt32(VF - 1); 4207 for (unsigned I = 1; I < VF; ++I) 4208 ShuffleMask[I] = Builder.getInt32(I + VF - 1); 4209 4210 // The vector from which to take the initial value for the current iteration 4211 // (actual or unrolled). Initially, this is the vector phi node. 4212 Value *Incoming = VecPhi; 4213 4214 // Shuffle the current and previous vector and update the vector parts. 4215 for (unsigned Part = 0; Part < UF; ++Part) { 4216 auto *Shuffle = 4217 VF > 1 4218 ? Builder.CreateShuffleVector(Incoming, PreviousParts[Part], 4219 ConstantVector::get(ShuffleMask)) 4220 : Incoming; 4221 PhiParts[Part]->replaceAllUsesWith(Shuffle); 4222 cast<Instruction>(PhiParts[Part])->eraseFromParent(); 4223 PhiParts[Part] = Shuffle; 4224 Incoming = PreviousParts[Part]; 4225 } 4226 4227 // Fix the latch value of the new recurrence in the vector loop. 4228 VecPhi->addIncoming(Incoming, LI->getLoopFor(LoopVectorBody)->getLoopLatch()); 4229 4230 // Extract the last vector element in the middle block. This will be the 4231 // initial value for the recurrence when jumping to the scalar loop. 4232 auto *Extract = Incoming; 4233 if (VF > 1) { 4234 Builder.SetInsertPoint(LoopMiddleBlock->getTerminator()); 4235 Extract = Builder.CreateExtractElement(Extract, Builder.getInt32(VF - 1), 4236 "vector.recur.extract"); 4237 } 4238 4239 // Fix the initial value of the original recurrence in the scalar loop. 4240 Builder.SetInsertPoint(&*LoopScalarPreHeader->begin()); 4241 auto *Start = Builder.CreatePHI(Phi->getType(), 2, "scalar.recur.init"); 4242 for (auto *BB : predecessors(LoopScalarPreHeader)) { 4243 auto *Incoming = BB == LoopMiddleBlock ? Extract : ScalarInit; 4244 Start->addIncoming(Incoming, BB); 4245 } 4246 4247 Phi->setIncomingValue(Phi->getBasicBlockIndex(LoopScalarPreHeader), Start); 4248 Phi->setName("scalar.recur"); 4249 4250 // Finally, fix users of the recurrence outside the loop. The users will need 4251 // either the last value of the scalar recurrence or the last value of the 4252 // vector recurrence we extracted in the middle block. Since the loop is in 4253 // LCSSA form, we just need to find the phi node for the original scalar 4254 // recurrence in the exit block, and then add an edge for the middle block. 4255 for (auto &I : *LoopExitBlock) { 4256 auto *LCSSAPhi = dyn_cast<PHINode>(&I); 4257 if (!LCSSAPhi) 4258 break; 4259 if (LCSSAPhi->getIncomingValue(0) == Phi) { 4260 LCSSAPhi->addIncoming(Extract, LoopMiddleBlock); 4261 break; 4262 } 4263 } 4264 } 4265 4266 void InnerLoopVectorizer::fixLCSSAPHIs() { 4267 for (Instruction &LEI : *LoopExitBlock) { 4268 auto *LCSSAPhi = dyn_cast<PHINode>(&LEI); 4269 if (!LCSSAPhi) 4270 break; 4271 if (LCSSAPhi->getNumIncomingValues() == 1) 4272 LCSSAPhi->addIncoming(UndefValue::get(LCSSAPhi->getType()), 4273 LoopMiddleBlock); 4274 } 4275 } 4276 4277 void InnerLoopVectorizer::collectTriviallyDeadInstructions() { 4278 BasicBlock *Latch = OrigLoop->getLoopLatch(); 4279 4280 // We create new control-flow for the vectorized loop, so the original 4281 // condition will be dead after vectorization if it's only used by the 4282 // branch. 4283 auto *Cmp = dyn_cast<Instruction>(Latch->getTerminator()->getOperand(0)); 4284 if (Cmp && Cmp->hasOneUse()) 4285 DeadInstructions.insert(Cmp); 4286 4287 // We create new "steps" for induction variable updates to which the original 4288 // induction variables map. An original update instruction will be dead if 4289 // all its users except the induction variable are dead. 4290 for (auto &Induction : *Legal->getInductionVars()) { 4291 PHINode *Ind = Induction.first; 4292 auto *IndUpdate = cast<Instruction>(Ind->getIncomingValueForBlock(Latch)); 4293 if (all_of(IndUpdate->users(), [&](User *U) -> bool { 4294 return U == Ind || DeadInstructions.count(cast<Instruction>(U)); 4295 })) 4296 DeadInstructions.insert(IndUpdate); 4297 } 4298 } 4299 4300 void InnerLoopVectorizer::sinkScalarOperands(Instruction *PredInst) { 4301 4302 // The basic block and loop containing the predicated instruction. 4303 auto *PredBB = PredInst->getParent(); 4304 auto *VectorLoop = LI->getLoopFor(PredBB); 4305 4306 // Initialize a worklist with the operands of the predicated instruction. 4307 SetVector<Value *> Worklist(PredInst->op_begin(), PredInst->op_end()); 4308 4309 // Holds instructions that we need to analyze again. An instruction may be 4310 // reanalyzed if we don't yet know if we can sink it or not. 4311 SmallVector<Instruction *, 8> InstsToReanalyze; 4312 4313 // Returns true if a given use occurs in the predicated block. Phi nodes use 4314 // their operands in their corresponding predecessor blocks. 4315 auto isBlockOfUsePredicated = [&](Use &U) -> bool { 4316 auto *I = cast<Instruction>(U.getUser()); 4317 BasicBlock *BB = I->getParent(); 4318 if (auto *Phi = dyn_cast<PHINode>(I)) 4319 BB = Phi->getIncomingBlock( 4320 PHINode::getIncomingValueNumForOperand(U.getOperandNo())); 4321 return BB == PredBB; 4322 }; 4323 4324 // Iteratively sink the scalarized operands of the predicated instruction 4325 // into the block we created for it. When an instruction is sunk, it's 4326 // operands are then added to the worklist. The algorithm ends after one pass 4327 // through the worklist doesn't sink a single instruction. 4328 bool Changed; 4329 do { 4330 4331 // Add the instructions that need to be reanalyzed to the worklist, and 4332 // reset the changed indicator. 4333 Worklist.insert(InstsToReanalyze.begin(), InstsToReanalyze.end()); 4334 InstsToReanalyze.clear(); 4335 Changed = false; 4336 4337 while (!Worklist.empty()) { 4338 auto *I = dyn_cast<Instruction>(Worklist.pop_back_val()); 4339 4340 // We can't sink an instruction if it is a phi node, is already in the 4341 // predicated block, is not in the loop, or may have side effects. 4342 if (!I || isa<PHINode>(I) || I->getParent() == PredBB || 4343 !VectorLoop->contains(I) || I->mayHaveSideEffects()) 4344 continue; 4345 4346 // It's legal to sink the instruction if all its uses occur in the 4347 // predicated block. Otherwise, there's nothing to do yet, and we may 4348 // need to reanalyze the instruction. 4349 if (!all_of(I->uses(), isBlockOfUsePredicated)) { 4350 InstsToReanalyze.push_back(I); 4351 continue; 4352 } 4353 4354 // Move the instruction to the beginning of the predicated block, and add 4355 // it's operands to the worklist. 4356 I->moveBefore(&*PredBB->getFirstInsertionPt()); 4357 Worklist.insert(I->op_begin(), I->op_end()); 4358 4359 // The sinking may have enabled other instructions to be sunk, so we will 4360 // need to iterate. 4361 Changed = true; 4362 } 4363 } while (Changed); 4364 } 4365 4366 void InnerLoopVectorizer::predicateInstructions() { 4367 4368 // For each instruction I marked for predication on value C, split I into its 4369 // own basic block to form an if-then construct over C. Since I may be fed by 4370 // an extractelement instruction or other scalar operand, we try to 4371 // iteratively sink its scalar operands into the predicated block. If I feeds 4372 // an insertelement instruction, we try to move this instruction into the 4373 // predicated block as well. For non-void types, a phi node will be created 4374 // for the resulting value (either vector or scalar). 4375 // 4376 // So for some predicated instruction, e.g. the conditional sdiv in: 4377 // 4378 // for.body: 4379 // ... 4380 // %add = add nsw i32 %mul, %0 4381 // %cmp5 = icmp sgt i32 %2, 7 4382 // br i1 %cmp5, label %if.then, label %if.end 4383 // 4384 // if.then: 4385 // %div = sdiv i32 %0, %1 4386 // br label %if.end 4387 // 4388 // if.end: 4389 // %x.0 = phi i32 [ %div, %if.then ], [ %add, %for.body ] 4390 // 4391 // the sdiv at this point is scalarized and if-converted using a select. 4392 // The inactive elements in the vector are not used, but the predicated 4393 // instruction is still executed for all vector elements, essentially: 4394 // 4395 // vector.body: 4396 // ... 4397 // %17 = add nsw <2 x i32> %16, %wide.load 4398 // %29 = extractelement <2 x i32> %wide.load, i32 0 4399 // %30 = extractelement <2 x i32> %wide.load51, i32 0 4400 // %31 = sdiv i32 %29, %30 4401 // %32 = insertelement <2 x i32> undef, i32 %31, i32 0 4402 // %35 = extractelement <2 x i32> %wide.load, i32 1 4403 // %36 = extractelement <2 x i32> %wide.load51, i32 1 4404 // %37 = sdiv i32 %35, %36 4405 // %38 = insertelement <2 x i32> %32, i32 %37, i32 1 4406 // %predphi = select <2 x i1> %26, <2 x i32> %38, <2 x i32> %17 4407 // 4408 // Predication will now re-introduce the original control flow to avoid false 4409 // side-effects by the sdiv instructions on the inactive elements, yielding 4410 // (after cleanup): 4411 // 4412 // vector.body: 4413 // ... 4414 // %5 = add nsw <2 x i32> %4, %wide.load 4415 // %8 = icmp sgt <2 x i32> %wide.load52, <i32 7, i32 7> 4416 // %9 = extractelement <2 x i1> %8, i32 0 4417 // br i1 %9, label %pred.sdiv.if, label %pred.sdiv.continue 4418 // 4419 // pred.sdiv.if: 4420 // %10 = extractelement <2 x i32> %wide.load, i32 0 4421 // %11 = extractelement <2 x i32> %wide.load51, i32 0 4422 // %12 = sdiv i32 %10, %11 4423 // %13 = insertelement <2 x i32> undef, i32 %12, i32 0 4424 // br label %pred.sdiv.continue 4425 // 4426 // pred.sdiv.continue: 4427 // %14 = phi <2 x i32> [ undef, %vector.body ], [ %13, %pred.sdiv.if ] 4428 // %15 = extractelement <2 x i1> %8, i32 1 4429 // br i1 %15, label %pred.sdiv.if54, label %pred.sdiv.continue55 4430 // 4431 // pred.sdiv.if54: 4432 // %16 = extractelement <2 x i32> %wide.load, i32 1 4433 // %17 = extractelement <2 x i32> %wide.load51, i32 1 4434 // %18 = sdiv i32 %16, %17 4435 // %19 = insertelement <2 x i32> %14, i32 %18, i32 1 4436 // br label %pred.sdiv.continue55 4437 // 4438 // pred.sdiv.continue55: 4439 // %20 = phi <2 x i32> [ %14, %pred.sdiv.continue ], [ %19, %pred.sdiv.if54 ] 4440 // %predphi = select <2 x i1> %8, <2 x i32> %20, <2 x i32> %5 4441 4442 for (auto KV : PredicatedInstructions) { 4443 BasicBlock::iterator I(KV.first); 4444 BasicBlock *Head = I->getParent(); 4445 auto *BB = SplitBlock(Head, &*std::next(I), DT, LI); 4446 auto *T = SplitBlockAndInsertIfThen(KV.second, &*I, /*Unreachable=*/false, 4447 /*BranchWeights=*/nullptr, DT, LI); 4448 I->moveBefore(T); 4449 sinkScalarOperands(&*I); 4450 4451 I->getParent()->setName(Twine("pred.") + I->getOpcodeName() + ".if"); 4452 BB->setName(Twine("pred.") + I->getOpcodeName() + ".continue"); 4453 4454 // If the instruction is non-void create a Phi node at reconvergence point. 4455 if (!I->getType()->isVoidTy()) { 4456 Value *IncomingTrue = nullptr; 4457 Value *IncomingFalse = nullptr; 4458 4459 if (I->hasOneUse() && isa<InsertElementInst>(*I->user_begin())) { 4460 // If the predicated instruction is feeding an insert-element, move it 4461 // into the Then block; Phi node will be created for the vector. 4462 InsertElementInst *IEI = cast<InsertElementInst>(*I->user_begin()); 4463 IEI->moveBefore(T); 4464 IncomingTrue = IEI; // the new vector with the inserted element. 4465 IncomingFalse = IEI->getOperand(0); // the unmodified vector 4466 } else { 4467 // Phi node will be created for the scalar predicated instruction. 4468 IncomingTrue = &*I; 4469 IncomingFalse = UndefValue::get(I->getType()); 4470 } 4471 4472 BasicBlock *PostDom = I->getParent()->getSingleSuccessor(); 4473 assert(PostDom && "Then block has multiple successors"); 4474 PHINode *Phi = 4475 PHINode::Create(IncomingTrue->getType(), 2, "", &PostDom->front()); 4476 IncomingTrue->replaceAllUsesWith(Phi); 4477 Phi->addIncoming(IncomingFalse, Head); 4478 Phi->addIncoming(IncomingTrue, I->getParent()); 4479 } 4480 } 4481 4482 DEBUG(DT->verifyDomTree()); 4483 } 4484 4485 InnerLoopVectorizer::VectorParts 4486 InnerLoopVectorizer::createEdgeMask(BasicBlock *Src, BasicBlock *Dst) { 4487 assert(is_contained(predecessors(Dst), Src) && "Invalid edge"); 4488 4489 // Look for cached value. 4490 std::pair<BasicBlock *, BasicBlock *> Edge(Src, Dst); 4491 EdgeMaskCache::iterator ECEntryIt = MaskCache.find(Edge); 4492 if (ECEntryIt != MaskCache.end()) 4493 return ECEntryIt->second; 4494 4495 VectorParts SrcMask = createBlockInMask(Src); 4496 4497 // The terminator has to be a branch inst! 4498 BranchInst *BI = dyn_cast<BranchInst>(Src->getTerminator()); 4499 assert(BI && "Unexpected terminator found"); 4500 4501 if (BI->isConditional()) { 4502 VectorParts EdgeMask = getVectorValue(BI->getCondition()); 4503 4504 if (BI->getSuccessor(0) != Dst) 4505 for (unsigned part = 0; part < UF; ++part) 4506 EdgeMask[part] = Builder.CreateNot(EdgeMask[part]); 4507 4508 for (unsigned part = 0; part < UF; ++part) 4509 EdgeMask[part] = Builder.CreateAnd(EdgeMask[part], SrcMask[part]); 4510 4511 MaskCache[Edge] = EdgeMask; 4512 return EdgeMask; 4513 } 4514 4515 MaskCache[Edge] = SrcMask; 4516 return SrcMask; 4517 } 4518 4519 InnerLoopVectorizer::VectorParts 4520 InnerLoopVectorizer::createBlockInMask(BasicBlock *BB) { 4521 assert(OrigLoop->contains(BB) && "Block is not a part of a loop"); 4522 4523 // Loop incoming mask is all-one. 4524 if (OrigLoop->getHeader() == BB) { 4525 Value *C = ConstantInt::get(IntegerType::getInt1Ty(BB->getContext()), 1); 4526 return getVectorValue(C); 4527 } 4528 4529 // This is the block mask. We OR all incoming edges, and with zero. 4530 Value *Zero = ConstantInt::get(IntegerType::getInt1Ty(BB->getContext()), 0); 4531 VectorParts BlockMask = getVectorValue(Zero); 4532 4533 // For each pred: 4534 for (pred_iterator it = pred_begin(BB), e = pred_end(BB); it != e; ++it) { 4535 VectorParts EM = createEdgeMask(*it, BB); 4536 for (unsigned part = 0; part < UF; ++part) 4537 BlockMask[part] = Builder.CreateOr(BlockMask[part], EM[part]); 4538 } 4539 4540 return BlockMask; 4541 } 4542 4543 void InnerLoopVectorizer::widenPHIInstruction(Instruction *PN, unsigned UF, 4544 unsigned VF, PhiVector *PV) { 4545 PHINode *P = cast<PHINode>(PN); 4546 // Handle recurrences. 4547 if (Legal->isReductionVariable(P) || Legal->isFirstOrderRecurrence(P)) { 4548 VectorParts Entry(UF); 4549 for (unsigned part = 0; part < UF; ++part) { 4550 // This is phase one of vectorizing PHIs. 4551 Type *VecTy = 4552 (VF == 1) ? PN->getType() : VectorType::get(PN->getType(), VF); 4553 Entry[part] = PHINode::Create( 4554 VecTy, 2, "vec.phi", &*LoopVectorBody->getFirstInsertionPt()); 4555 } 4556 VectorLoopValueMap.initVector(P, Entry); 4557 PV->push_back(P); 4558 return; 4559 } 4560 4561 setDebugLocFromInst(Builder, P); 4562 // Check for PHI nodes that are lowered to vector selects. 4563 if (P->getParent() != OrigLoop->getHeader()) { 4564 // We know that all PHIs in non-header blocks are converted into 4565 // selects, so we don't have to worry about the insertion order and we 4566 // can just use the builder. 4567 // At this point we generate the predication tree. There may be 4568 // duplications since this is a simple recursive scan, but future 4569 // optimizations will clean it up. 4570 4571 unsigned NumIncoming = P->getNumIncomingValues(); 4572 4573 // Generate a sequence of selects of the form: 4574 // SELECT(Mask3, In3, 4575 // SELECT(Mask2, In2, 4576 // ( ...))) 4577 VectorParts Entry(UF); 4578 for (unsigned In = 0; In < NumIncoming; In++) { 4579 VectorParts Cond = 4580 createEdgeMask(P->getIncomingBlock(In), P->getParent()); 4581 const VectorParts &In0 = getVectorValue(P->getIncomingValue(In)); 4582 4583 for (unsigned part = 0; part < UF; ++part) { 4584 // We might have single edge PHIs (blocks) - use an identity 4585 // 'select' for the first PHI operand. 4586 if (In == 0) 4587 Entry[part] = Builder.CreateSelect(Cond[part], In0[part], In0[part]); 4588 else 4589 // Select between the current value and the previous incoming edge 4590 // based on the incoming mask. 4591 Entry[part] = Builder.CreateSelect(Cond[part], In0[part], Entry[part], 4592 "predphi"); 4593 } 4594 } 4595 VectorLoopValueMap.initVector(P, Entry); 4596 return; 4597 } 4598 4599 // This PHINode must be an induction variable. 4600 // Make sure that we know about it. 4601 assert(Legal->getInductionVars()->count(P) && "Not an induction variable"); 4602 4603 InductionDescriptor II = Legal->getInductionVars()->lookup(P); 4604 const DataLayout &DL = OrigLoop->getHeader()->getModule()->getDataLayout(); 4605 4606 // FIXME: The newly created binary instructions should contain nsw/nuw flags, 4607 // which can be found from the original scalar operations. 4608 switch (II.getKind()) { 4609 case InductionDescriptor::IK_NoInduction: 4610 llvm_unreachable("Unknown induction"); 4611 case InductionDescriptor::IK_IntInduction: 4612 return widenIntInduction(P); 4613 case InductionDescriptor::IK_PtrInduction: { 4614 // Handle the pointer induction variable case. 4615 assert(P->getType()->isPointerTy() && "Unexpected type."); 4616 // This is the normalized GEP that starts counting at zero. 4617 Value *PtrInd = Induction; 4618 PtrInd = Builder.CreateSExtOrTrunc(PtrInd, II.getStep()->getType()); 4619 // Determine the number of scalars we need to generate for each unroll 4620 // iteration. If the instruction is uniform, we only need to generate the 4621 // first lane. Otherwise, we generate all VF values. 4622 unsigned Lanes = Legal->isUniformAfterVectorization(P) ? 1 : VF; 4623 // These are the scalar results. Notice that we don't generate vector GEPs 4624 // because scalar GEPs result in better code. 4625 ScalarParts Entry(UF); 4626 for (unsigned Part = 0; Part < UF; ++Part) { 4627 Entry[Part].resize(VF); 4628 for (unsigned Lane = 0; Lane < Lanes; ++Lane) { 4629 Constant *Idx = ConstantInt::get(PtrInd->getType(), Lane + Part * VF); 4630 Value *GlobalIdx = Builder.CreateAdd(PtrInd, Idx); 4631 Value *SclrGep = II.transform(Builder, GlobalIdx, PSE.getSE(), DL); 4632 SclrGep->setName("next.gep"); 4633 Entry[Part][Lane] = SclrGep; 4634 } 4635 } 4636 VectorLoopValueMap.initScalar(P, Entry); 4637 return; 4638 } 4639 case InductionDescriptor::IK_FpInduction: { 4640 assert(P->getType() == II.getStartValue()->getType() && 4641 "Types must match"); 4642 // Handle other induction variables that are now based on the 4643 // canonical one. 4644 assert(P != OldInduction && "Primary induction can be integer only"); 4645 4646 Value *V = Builder.CreateCast(Instruction::SIToFP, Induction, P->getType()); 4647 V = II.transform(Builder, V, PSE.getSE(), DL); 4648 V->setName("fp.offset.idx"); 4649 4650 // Now we have scalar op: %fp.offset.idx = StartVal +/- Induction*StepVal 4651 4652 Value *Broadcasted = getBroadcastInstrs(V); 4653 // After broadcasting the induction variable we need to make the vector 4654 // consecutive by adding StepVal*0, StepVal*1, StepVal*2, etc. 4655 Value *StepVal = cast<SCEVUnknown>(II.getStep())->getValue(); 4656 VectorParts Entry(UF); 4657 for (unsigned part = 0; part < UF; ++part) 4658 Entry[part] = getStepVector(Broadcasted, VF * part, StepVal, 4659 II.getInductionOpcode()); 4660 VectorLoopValueMap.initVector(P, Entry); 4661 return; 4662 } 4663 } 4664 } 4665 4666 /// A helper function for checking whether an integer division-related 4667 /// instruction may divide by zero (in which case it must be predicated if 4668 /// executed conditionally in the scalar code). 4669 /// TODO: It may be worthwhile to generalize and check isKnownNonZero(). 4670 /// Non-zero divisors that are non compile-time constants will not be 4671 /// converted into multiplication, so we will still end up scalarizing 4672 /// the division, but can do so w/o predication. 4673 static bool mayDivideByZero(Instruction &I) { 4674 assert((I.getOpcode() == Instruction::UDiv || 4675 I.getOpcode() == Instruction::SDiv || 4676 I.getOpcode() == Instruction::URem || 4677 I.getOpcode() == Instruction::SRem) && 4678 "Unexpected instruction"); 4679 Value *Divisor = I.getOperand(1); 4680 auto *CInt = dyn_cast<ConstantInt>(Divisor); 4681 return !CInt || CInt->isZero(); 4682 } 4683 4684 void InnerLoopVectorizer::vectorizeBlockInLoop(BasicBlock *BB, PhiVector *PV) { 4685 // For each instruction in the old loop. 4686 for (Instruction &I : *BB) { 4687 4688 // If the instruction will become trivially dead when vectorized, we don't 4689 // need to generate it. 4690 if (DeadInstructions.count(&I)) 4691 continue; 4692 4693 // Scalarize instructions that should remain scalar after vectorization. 4694 if (VF > 1 && 4695 !(isa<BranchInst>(&I) || isa<PHINode>(&I) || 4696 isa<DbgInfoIntrinsic>(&I)) && 4697 shouldScalarizeInstruction(&I)) { 4698 scalarizeInstruction(&I, Legal->isScalarWithPredication(&I)); 4699 continue; 4700 } 4701 4702 switch (I.getOpcode()) { 4703 case Instruction::Br: 4704 // Nothing to do for PHIs and BR, since we already took care of the 4705 // loop control flow instructions. 4706 continue; 4707 case Instruction::PHI: { 4708 // Vectorize PHINodes. 4709 widenPHIInstruction(&I, UF, VF, PV); 4710 continue; 4711 } // End of PHI. 4712 4713 case Instruction::UDiv: 4714 case Instruction::SDiv: 4715 case Instruction::SRem: 4716 case Instruction::URem: 4717 // Scalarize with predication if this instruction may divide by zero and 4718 // block execution is conditional, otherwise fallthrough. 4719 if (Legal->isScalarWithPredication(&I)) { 4720 scalarizeInstruction(&I, true); 4721 continue; 4722 } 4723 case Instruction::Add: 4724 case Instruction::FAdd: 4725 case Instruction::Sub: 4726 case Instruction::FSub: 4727 case Instruction::Mul: 4728 case Instruction::FMul: 4729 case Instruction::FDiv: 4730 case Instruction::FRem: 4731 case Instruction::Shl: 4732 case Instruction::LShr: 4733 case Instruction::AShr: 4734 case Instruction::And: 4735 case Instruction::Or: 4736 case Instruction::Xor: { 4737 // Just widen binops. 4738 auto *BinOp = cast<BinaryOperator>(&I); 4739 setDebugLocFromInst(Builder, BinOp); 4740 const VectorParts &A = getVectorValue(BinOp->getOperand(0)); 4741 const VectorParts &B = getVectorValue(BinOp->getOperand(1)); 4742 4743 // Use this vector value for all users of the original instruction. 4744 VectorParts Entry(UF); 4745 for (unsigned Part = 0; Part < UF; ++Part) { 4746 Value *V = Builder.CreateBinOp(BinOp->getOpcode(), A[Part], B[Part]); 4747 4748 if (BinaryOperator *VecOp = dyn_cast<BinaryOperator>(V)) 4749 VecOp->copyIRFlags(BinOp); 4750 4751 Entry[Part] = V; 4752 } 4753 4754 VectorLoopValueMap.initVector(&I, Entry); 4755 addMetadata(Entry, BinOp); 4756 break; 4757 } 4758 case Instruction::Select: { 4759 // Widen selects. 4760 // If the selector is loop invariant we can create a select 4761 // instruction with a scalar condition. Otherwise, use vector-select. 4762 auto *SE = PSE.getSE(); 4763 bool InvariantCond = 4764 SE->isLoopInvariant(PSE.getSCEV(I.getOperand(0)), OrigLoop); 4765 setDebugLocFromInst(Builder, &I); 4766 4767 // The condition can be loop invariant but still defined inside the 4768 // loop. This means that we can't just use the original 'cond' value. 4769 // We have to take the 'vectorized' value and pick the first lane. 4770 // Instcombine will make this a no-op. 4771 const VectorParts &Cond = getVectorValue(I.getOperand(0)); 4772 const VectorParts &Op0 = getVectorValue(I.getOperand(1)); 4773 const VectorParts &Op1 = getVectorValue(I.getOperand(2)); 4774 4775 auto *ScalarCond = getScalarValue(I.getOperand(0), 0, 0); 4776 4777 VectorParts Entry(UF); 4778 for (unsigned Part = 0; Part < UF; ++Part) { 4779 Entry[Part] = Builder.CreateSelect( 4780 InvariantCond ? ScalarCond : Cond[Part], Op0[Part], Op1[Part]); 4781 } 4782 4783 VectorLoopValueMap.initVector(&I, Entry); 4784 addMetadata(Entry, &I); 4785 break; 4786 } 4787 4788 case Instruction::ICmp: 4789 case Instruction::FCmp: { 4790 // Widen compares. Generate vector compares. 4791 bool FCmp = (I.getOpcode() == Instruction::FCmp); 4792 auto *Cmp = dyn_cast<CmpInst>(&I); 4793 setDebugLocFromInst(Builder, Cmp); 4794 const VectorParts &A = getVectorValue(Cmp->getOperand(0)); 4795 const VectorParts &B = getVectorValue(Cmp->getOperand(1)); 4796 VectorParts Entry(UF); 4797 for (unsigned Part = 0; Part < UF; ++Part) { 4798 Value *C = nullptr; 4799 if (FCmp) { 4800 C = Builder.CreateFCmp(Cmp->getPredicate(), A[Part], B[Part]); 4801 cast<FCmpInst>(C)->copyFastMathFlags(Cmp); 4802 } else { 4803 C = Builder.CreateICmp(Cmp->getPredicate(), A[Part], B[Part]); 4804 } 4805 Entry[Part] = C; 4806 } 4807 4808 VectorLoopValueMap.initVector(&I, Entry); 4809 addMetadata(Entry, &I); 4810 break; 4811 } 4812 4813 case Instruction::Store: 4814 case Instruction::Load: 4815 vectorizeMemoryInstruction(&I); 4816 break; 4817 case Instruction::ZExt: 4818 case Instruction::SExt: 4819 case Instruction::FPToUI: 4820 case Instruction::FPToSI: 4821 case Instruction::FPExt: 4822 case Instruction::PtrToInt: 4823 case Instruction::IntToPtr: 4824 case Instruction::SIToFP: 4825 case Instruction::UIToFP: 4826 case Instruction::Trunc: 4827 case Instruction::FPTrunc: 4828 case Instruction::BitCast: { 4829 auto *CI = dyn_cast<CastInst>(&I); 4830 setDebugLocFromInst(Builder, CI); 4831 4832 // Optimize the special case where the source is a constant integer 4833 // induction variable. Notice that we can only optimize the 'trunc' case 4834 // because (a) FP conversions lose precision, (b) sext/zext may wrap, and 4835 // (c) other casts depend on pointer size. 4836 auto ID = Legal->getInductionVars()->lookup(OldInduction); 4837 if (isa<TruncInst>(CI) && CI->getOperand(0) == OldInduction && 4838 ID.getConstIntStepValue()) { 4839 widenIntInduction(OldInduction, cast<TruncInst>(CI)); 4840 break; 4841 } 4842 4843 /// Vectorize casts. 4844 Type *DestTy = 4845 (VF == 1) ? CI->getType() : VectorType::get(CI->getType(), VF); 4846 4847 const VectorParts &A = getVectorValue(CI->getOperand(0)); 4848 VectorParts Entry(UF); 4849 for (unsigned Part = 0; Part < UF; ++Part) 4850 Entry[Part] = Builder.CreateCast(CI->getOpcode(), A[Part], DestTy); 4851 VectorLoopValueMap.initVector(&I, Entry); 4852 addMetadata(Entry, &I); 4853 break; 4854 } 4855 4856 case Instruction::Call: { 4857 // Ignore dbg intrinsics. 4858 if (isa<DbgInfoIntrinsic>(I)) 4859 break; 4860 setDebugLocFromInst(Builder, &I); 4861 4862 Module *M = BB->getParent()->getParent(); 4863 auto *CI = cast<CallInst>(&I); 4864 4865 StringRef FnName = CI->getCalledFunction()->getName(); 4866 Function *F = CI->getCalledFunction(); 4867 Type *RetTy = ToVectorTy(CI->getType(), VF); 4868 SmallVector<Type *, 4> Tys; 4869 for (Value *ArgOperand : CI->arg_operands()) 4870 Tys.push_back(ToVectorTy(ArgOperand->getType(), VF)); 4871 4872 Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI); 4873 if (ID && (ID == Intrinsic::assume || ID == Intrinsic::lifetime_end || 4874 ID == Intrinsic::lifetime_start)) { 4875 scalarizeInstruction(&I); 4876 break; 4877 } 4878 // The flag shows whether we use Intrinsic or a usual Call for vectorized 4879 // version of the instruction. 4880 // Is it beneficial to perform intrinsic call compared to lib call? 4881 bool NeedToScalarize; 4882 unsigned CallCost = getVectorCallCost(CI, VF, *TTI, TLI, NeedToScalarize); 4883 bool UseVectorIntrinsic = 4884 ID && getVectorIntrinsicCost(CI, VF, *TTI, TLI) <= CallCost; 4885 if (!UseVectorIntrinsic && NeedToScalarize) { 4886 scalarizeInstruction(&I); 4887 break; 4888 } 4889 4890 VectorParts Entry(UF); 4891 for (unsigned Part = 0; Part < UF; ++Part) { 4892 SmallVector<Value *, 4> Args; 4893 for (unsigned i = 0, ie = CI->getNumArgOperands(); i != ie; ++i) { 4894 Value *Arg = CI->getArgOperand(i); 4895 // Some intrinsics have a scalar argument - don't replace it with a 4896 // vector. 4897 if (!UseVectorIntrinsic || !hasVectorInstrinsicScalarOpd(ID, i)) { 4898 const VectorParts &VectorArg = getVectorValue(CI->getArgOperand(i)); 4899 Arg = VectorArg[Part]; 4900 } 4901 Args.push_back(Arg); 4902 } 4903 4904 Function *VectorF; 4905 if (UseVectorIntrinsic) { 4906 // Use vector version of the intrinsic. 4907 Type *TysForDecl[] = {CI->getType()}; 4908 if (VF > 1) 4909 TysForDecl[0] = VectorType::get(CI->getType()->getScalarType(), VF); 4910 VectorF = Intrinsic::getDeclaration(M, ID, TysForDecl); 4911 } else { 4912 // Use vector version of the library call. 4913 StringRef VFnName = TLI->getVectorizedFunction(FnName, VF); 4914 assert(!VFnName.empty() && "Vector function name is empty."); 4915 VectorF = M->getFunction(VFnName); 4916 if (!VectorF) { 4917 // Generate a declaration 4918 FunctionType *FTy = FunctionType::get(RetTy, Tys, false); 4919 VectorF = 4920 Function::Create(FTy, Function::ExternalLinkage, VFnName, M); 4921 VectorF->copyAttributesFrom(F); 4922 } 4923 } 4924 assert(VectorF && "Can't create vector function."); 4925 4926 SmallVector<OperandBundleDef, 1> OpBundles; 4927 CI->getOperandBundlesAsDefs(OpBundles); 4928 CallInst *V = Builder.CreateCall(VectorF, Args, OpBundles); 4929 4930 if (isa<FPMathOperator>(V)) 4931 V->copyFastMathFlags(CI); 4932 4933 Entry[Part] = V; 4934 } 4935 4936 VectorLoopValueMap.initVector(&I, Entry); 4937 addMetadata(Entry, &I); 4938 break; 4939 } 4940 4941 default: 4942 // All other instructions are unsupported. Scalarize them. 4943 scalarizeInstruction(&I); 4944 break; 4945 } // end of switch. 4946 } // end of for_each instr. 4947 } 4948 4949 void InnerLoopVectorizer::updateAnalysis() { 4950 // Forget the original basic block. 4951 PSE.getSE()->forgetLoop(OrigLoop); 4952 4953 // Update the dominator tree information. 4954 assert(DT->properlyDominates(LoopBypassBlocks.front(), LoopExitBlock) && 4955 "Entry does not dominate exit."); 4956 4957 // We don't predicate stores by this point, so the vector body should be a 4958 // single loop. 4959 DT->addNewBlock(LoopVectorBody, LoopVectorPreHeader); 4960 4961 DT->addNewBlock(LoopMiddleBlock, LoopVectorBody); 4962 DT->addNewBlock(LoopScalarPreHeader, LoopBypassBlocks[0]); 4963 DT->changeImmediateDominator(LoopScalarBody, LoopScalarPreHeader); 4964 DT->changeImmediateDominator(LoopExitBlock, LoopBypassBlocks[0]); 4965 4966 DEBUG(DT->verifyDomTree()); 4967 } 4968 4969 /// \brief Check whether it is safe to if-convert this phi node. 4970 /// 4971 /// Phi nodes with constant expressions that can trap are not safe to if 4972 /// convert. 4973 static bool canIfConvertPHINodes(BasicBlock *BB) { 4974 for (Instruction &I : *BB) { 4975 auto *Phi = dyn_cast<PHINode>(&I); 4976 if (!Phi) 4977 return true; 4978 for (Value *V : Phi->incoming_values()) 4979 if (auto *C = dyn_cast<Constant>(V)) 4980 if (C->canTrap()) 4981 return false; 4982 } 4983 return true; 4984 } 4985 4986 bool LoopVectorizationLegality::canVectorizeWithIfConvert() { 4987 if (!EnableIfConversion) { 4988 ORE->emit(createMissedAnalysis("IfConversionDisabled") 4989 << "if-conversion is disabled"); 4990 return false; 4991 } 4992 4993 assert(TheLoop->getNumBlocks() > 1 && "Single block loops are vectorizable"); 4994 4995 // A list of pointers that we can safely read and write to. 4996 SmallPtrSet<Value *, 8> SafePointes; 4997 4998 // Collect safe addresses. 4999 for (BasicBlock *BB : TheLoop->blocks()) { 5000 if (blockNeedsPredication(BB)) 5001 continue; 5002 5003 for (Instruction &I : *BB) 5004 if (auto *Ptr = getPointerOperand(&I)) 5005 SafePointes.insert(Ptr); 5006 } 5007 5008 // Collect the blocks that need predication. 5009 BasicBlock *Header = TheLoop->getHeader(); 5010 for (BasicBlock *BB : TheLoop->blocks()) { 5011 // We don't support switch statements inside loops. 5012 if (!isa<BranchInst>(BB->getTerminator())) { 5013 ORE->emit(createMissedAnalysis("LoopContainsSwitch", BB->getTerminator()) 5014 << "loop contains a switch statement"); 5015 return false; 5016 } 5017 5018 // We must be able to predicate all blocks that need to be predicated. 5019 if (blockNeedsPredication(BB)) { 5020 if (!blockCanBePredicated(BB, SafePointes)) { 5021 ORE->emit(createMissedAnalysis("NoCFGForSelect", BB->getTerminator()) 5022 << "control flow cannot be substituted for a select"); 5023 return false; 5024 } 5025 } else if (BB != Header && !canIfConvertPHINodes(BB)) { 5026 ORE->emit(createMissedAnalysis("NoCFGForSelect", BB->getTerminator()) 5027 << "control flow cannot be substituted for a select"); 5028 return false; 5029 } 5030 } 5031 5032 // We can if-convert this loop. 5033 return true; 5034 } 5035 5036 bool LoopVectorizationLegality::canVectorize() { 5037 // We must have a loop in canonical form. Loops with indirectbr in them cannot 5038 // be canonicalized. 5039 if (!TheLoop->getLoopPreheader()) { 5040 ORE->emit(createMissedAnalysis("CFGNotUnderstood") 5041 << "loop control flow is not understood by vectorizer"); 5042 return false; 5043 } 5044 5045 // FIXME: The code is currently dead, since the loop gets sent to 5046 // LoopVectorizationLegality is already an innermost loop. 5047 // 5048 // We can only vectorize innermost loops. 5049 if (!TheLoop->empty()) { 5050 ORE->emit(createMissedAnalysis("NotInnermostLoop") 5051 << "loop is not the innermost loop"); 5052 return false; 5053 } 5054 5055 // We must have a single backedge. 5056 if (TheLoop->getNumBackEdges() != 1) { 5057 ORE->emit(createMissedAnalysis("CFGNotUnderstood") 5058 << "loop control flow is not understood by vectorizer"); 5059 return false; 5060 } 5061 5062 // We must have a single exiting block. 5063 if (!TheLoop->getExitingBlock()) { 5064 ORE->emit(createMissedAnalysis("CFGNotUnderstood") 5065 << "loop control flow is not understood by vectorizer"); 5066 return false; 5067 } 5068 5069 // We only handle bottom-tested loops, i.e. loop in which the condition is 5070 // checked at the end of each iteration. With that we can assume that all 5071 // instructions in the loop are executed the same number of times. 5072 if (TheLoop->getExitingBlock() != TheLoop->getLoopLatch()) { 5073 ORE->emit(createMissedAnalysis("CFGNotUnderstood") 5074 << "loop control flow is not understood by vectorizer"); 5075 return false; 5076 } 5077 5078 // We need to have a loop header. 5079 DEBUG(dbgs() << "LV: Found a loop: " << TheLoop->getHeader()->getName() 5080 << '\n'); 5081 5082 // Check if we can if-convert non-single-bb loops. 5083 unsigned NumBlocks = TheLoop->getNumBlocks(); 5084 if (NumBlocks != 1 && !canVectorizeWithIfConvert()) { 5085 DEBUG(dbgs() << "LV: Can't if-convert the loop.\n"); 5086 return false; 5087 } 5088 5089 // ScalarEvolution needs to be able to find the exit count. 5090 const SCEV *ExitCount = PSE.getBackedgeTakenCount(); 5091 if (ExitCount == PSE.getSE()->getCouldNotCompute()) { 5092 ORE->emit(createMissedAnalysis("CantComputeNumberOfIterations") 5093 << "could not determine number of loop iterations"); 5094 DEBUG(dbgs() << "LV: SCEV could not compute the loop exit count.\n"); 5095 return false; 5096 } 5097 5098 // Check if we can vectorize the instructions and CFG in this loop. 5099 if (!canVectorizeInstrs()) { 5100 DEBUG(dbgs() << "LV: Can't vectorize the instructions or CFG\n"); 5101 return false; 5102 } 5103 5104 // Go over each instruction and look at memory deps. 5105 if (!canVectorizeMemory()) { 5106 DEBUG(dbgs() << "LV: Can't vectorize due to memory conflicts\n"); 5107 return false; 5108 } 5109 5110 DEBUG(dbgs() << "LV: We can vectorize this loop" 5111 << (LAI->getRuntimePointerChecking()->Need 5112 ? " (with a runtime bound check)" 5113 : "") 5114 << "!\n"); 5115 5116 bool UseInterleaved = TTI->enableInterleavedAccessVectorization(); 5117 5118 // If an override option has been passed in for interleaved accesses, use it. 5119 if (EnableInterleavedMemAccesses.getNumOccurrences() > 0) 5120 UseInterleaved = EnableInterleavedMemAccesses; 5121 5122 // Analyze interleaved memory accesses. 5123 if (UseInterleaved) 5124 InterleaveInfo.analyzeInterleaving(*getSymbolicStrides()); 5125 5126 // Collect all instructions that are known to be uniform after vectorization. 5127 collectLoopUniforms(); 5128 5129 // Collect all instructions that are known to be scalar after vectorization. 5130 collectLoopScalars(); 5131 5132 unsigned SCEVThreshold = VectorizeSCEVCheckThreshold; 5133 if (Hints->getForce() == LoopVectorizeHints::FK_Enabled) 5134 SCEVThreshold = PragmaVectorizeSCEVCheckThreshold; 5135 5136 if (PSE.getUnionPredicate().getComplexity() > SCEVThreshold) { 5137 ORE->emit(createMissedAnalysis("TooManySCEVRunTimeChecks") 5138 << "Too many SCEV assumptions need to be made and checked " 5139 << "at runtime"); 5140 DEBUG(dbgs() << "LV: Too many SCEV checks needed.\n"); 5141 return false; 5142 } 5143 5144 // Okay! We can vectorize. At this point we don't have any other mem analysis 5145 // which may limit our maximum vectorization factor, so just return true with 5146 // no restrictions. 5147 return true; 5148 } 5149 5150 static Type *convertPointerToIntegerType(const DataLayout &DL, Type *Ty) { 5151 if (Ty->isPointerTy()) 5152 return DL.getIntPtrType(Ty); 5153 5154 // It is possible that char's or short's overflow when we ask for the loop's 5155 // trip count, work around this by changing the type size. 5156 if (Ty->getScalarSizeInBits() < 32) 5157 return Type::getInt32Ty(Ty->getContext()); 5158 5159 return Ty; 5160 } 5161 5162 static Type *getWiderType(const DataLayout &DL, Type *Ty0, Type *Ty1) { 5163 Ty0 = convertPointerToIntegerType(DL, Ty0); 5164 Ty1 = convertPointerToIntegerType(DL, Ty1); 5165 if (Ty0->getScalarSizeInBits() > Ty1->getScalarSizeInBits()) 5166 return Ty0; 5167 return Ty1; 5168 } 5169 5170 /// \brief Check that the instruction has outside loop users and is not an 5171 /// identified reduction variable. 5172 static bool hasOutsideLoopUser(const Loop *TheLoop, Instruction *Inst, 5173 SmallPtrSetImpl<Value *> &AllowedExit) { 5174 // Reduction and Induction instructions are allowed to have exit users. All 5175 // other instructions must not have external users. 5176 if (!AllowedExit.count(Inst)) 5177 // Check that all of the users of the loop are inside the BB. 5178 for (User *U : Inst->users()) { 5179 Instruction *UI = cast<Instruction>(U); 5180 // This user may be a reduction exit value. 5181 if (!TheLoop->contains(UI)) { 5182 DEBUG(dbgs() << "LV: Found an outside user for : " << *UI << '\n'); 5183 return true; 5184 } 5185 } 5186 return false; 5187 } 5188 5189 void LoopVectorizationLegality::addInductionPhi( 5190 PHINode *Phi, const InductionDescriptor &ID, 5191 SmallPtrSetImpl<Value *> &AllowedExit) { 5192 Inductions[Phi] = ID; 5193 Type *PhiTy = Phi->getType(); 5194 const DataLayout &DL = Phi->getModule()->getDataLayout(); 5195 5196 // Get the widest type. 5197 if (!PhiTy->isFloatingPointTy()) { 5198 if (!WidestIndTy) 5199 WidestIndTy = convertPointerToIntegerType(DL, PhiTy); 5200 else 5201 WidestIndTy = getWiderType(DL, PhiTy, WidestIndTy); 5202 } 5203 5204 // Int inductions are special because we only allow one IV. 5205 if (ID.getKind() == InductionDescriptor::IK_IntInduction && 5206 ID.getConstIntStepValue() && 5207 ID.getConstIntStepValue()->isOne() && 5208 isa<Constant>(ID.getStartValue()) && 5209 cast<Constant>(ID.getStartValue())->isNullValue()) { 5210 5211 // Use the phi node with the widest type as induction. Use the last 5212 // one if there are multiple (no good reason for doing this other 5213 // than it is expedient). We've checked that it begins at zero and 5214 // steps by one, so this is a canonical induction variable. 5215 if (!Induction || PhiTy == WidestIndTy) 5216 Induction = Phi; 5217 } 5218 5219 // Both the PHI node itself, and the "post-increment" value feeding 5220 // back into the PHI node may have external users. 5221 AllowedExit.insert(Phi); 5222 AllowedExit.insert(Phi->getIncomingValueForBlock(TheLoop->getLoopLatch())); 5223 5224 DEBUG(dbgs() << "LV: Found an induction variable.\n"); 5225 return; 5226 } 5227 5228 bool LoopVectorizationLegality::canVectorizeInstrs() { 5229 BasicBlock *Header = TheLoop->getHeader(); 5230 5231 // Look for the attribute signaling the absence of NaNs. 5232 Function &F = *Header->getParent(); 5233 HasFunNoNaNAttr = 5234 F.getFnAttribute("no-nans-fp-math").getValueAsString() == "true"; 5235 5236 // For each block in the loop. 5237 for (BasicBlock *BB : TheLoop->blocks()) { 5238 // Scan the instructions in the block and look for hazards. 5239 for (Instruction &I : *BB) { 5240 if (auto *Phi = dyn_cast<PHINode>(&I)) { 5241 Type *PhiTy = Phi->getType(); 5242 // Check that this PHI type is allowed. 5243 if (!PhiTy->isIntegerTy() && !PhiTy->isFloatingPointTy() && 5244 !PhiTy->isPointerTy()) { 5245 ORE->emit(createMissedAnalysis("CFGNotUnderstood", Phi) 5246 << "loop control flow is not understood by vectorizer"); 5247 DEBUG(dbgs() << "LV: Found an non-int non-pointer PHI.\n"); 5248 return false; 5249 } 5250 5251 // If this PHINode is not in the header block, then we know that we 5252 // can convert it to select during if-conversion. No need to check if 5253 // the PHIs in this block are induction or reduction variables. 5254 if (BB != Header) { 5255 // Check that this instruction has no outside users or is an 5256 // identified reduction value with an outside user. 5257 if (!hasOutsideLoopUser(TheLoop, Phi, AllowedExit)) 5258 continue; 5259 ORE->emit(createMissedAnalysis("NeitherInductionNorReduction", Phi) 5260 << "value could not be identified as " 5261 "an induction or reduction variable"); 5262 return false; 5263 } 5264 5265 // We only allow if-converted PHIs with exactly two incoming values. 5266 if (Phi->getNumIncomingValues() != 2) { 5267 ORE->emit(createMissedAnalysis("CFGNotUnderstood", Phi) 5268 << "control flow not understood by vectorizer"); 5269 DEBUG(dbgs() << "LV: Found an invalid PHI.\n"); 5270 return false; 5271 } 5272 5273 RecurrenceDescriptor RedDes; 5274 if (RecurrenceDescriptor::isReductionPHI(Phi, TheLoop, RedDes)) { 5275 if (RedDes.hasUnsafeAlgebra()) 5276 Requirements->addUnsafeAlgebraInst(RedDes.getUnsafeAlgebraInst()); 5277 AllowedExit.insert(RedDes.getLoopExitInstr()); 5278 Reductions[Phi] = RedDes; 5279 continue; 5280 } 5281 5282 InductionDescriptor ID; 5283 if (InductionDescriptor::isInductionPHI(Phi, TheLoop, PSE, ID)) { 5284 addInductionPhi(Phi, ID, AllowedExit); 5285 if (ID.hasUnsafeAlgebra() && !HasFunNoNaNAttr) 5286 Requirements->addUnsafeAlgebraInst(ID.getUnsafeAlgebraInst()); 5287 continue; 5288 } 5289 5290 if (RecurrenceDescriptor::isFirstOrderRecurrence(Phi, TheLoop, DT)) { 5291 FirstOrderRecurrences.insert(Phi); 5292 continue; 5293 } 5294 5295 // As a last resort, coerce the PHI to a AddRec expression 5296 // and re-try classifying it a an induction PHI. 5297 if (InductionDescriptor::isInductionPHI(Phi, TheLoop, PSE, ID, true)) { 5298 addInductionPhi(Phi, ID, AllowedExit); 5299 continue; 5300 } 5301 5302 ORE->emit(createMissedAnalysis("NonReductionValueUsedOutsideLoop", Phi) 5303 << "value that could not be identified as " 5304 "reduction is used outside the loop"); 5305 DEBUG(dbgs() << "LV: Found an unidentified PHI." << *Phi << "\n"); 5306 return false; 5307 } // end of PHI handling 5308 5309 // We handle calls that: 5310 // * Are debug info intrinsics. 5311 // * Have a mapping to an IR intrinsic. 5312 // * Have a vector version available. 5313 auto *CI = dyn_cast<CallInst>(&I); 5314 if (CI && !getVectorIntrinsicIDForCall(CI, TLI) && 5315 !isa<DbgInfoIntrinsic>(CI) && 5316 !(CI->getCalledFunction() && TLI && 5317 TLI->isFunctionVectorizable(CI->getCalledFunction()->getName()))) { 5318 ORE->emit(createMissedAnalysis("CantVectorizeCall", CI) 5319 << "call instruction cannot be vectorized"); 5320 DEBUG(dbgs() << "LV: Found a non-intrinsic, non-libfunc callsite.\n"); 5321 return false; 5322 } 5323 5324 // Intrinsics such as powi,cttz and ctlz are legal to vectorize if the 5325 // second argument is the same (i.e. loop invariant) 5326 if (CI && hasVectorInstrinsicScalarOpd( 5327 getVectorIntrinsicIDForCall(CI, TLI), 1)) { 5328 auto *SE = PSE.getSE(); 5329 if (!SE->isLoopInvariant(PSE.getSCEV(CI->getOperand(1)), TheLoop)) { 5330 ORE->emit(createMissedAnalysis("CantVectorizeIntrinsic", CI) 5331 << "intrinsic instruction cannot be vectorized"); 5332 DEBUG(dbgs() << "LV: Found unvectorizable intrinsic " << *CI << "\n"); 5333 return false; 5334 } 5335 } 5336 5337 // Check that the instruction return type is vectorizable. 5338 // Also, we can't vectorize extractelement instructions. 5339 if ((!VectorType::isValidElementType(I.getType()) && 5340 !I.getType()->isVoidTy()) || 5341 isa<ExtractElementInst>(I)) { 5342 ORE->emit(createMissedAnalysis("CantVectorizeInstructionReturnType", &I) 5343 << "instruction return type cannot be vectorized"); 5344 DEBUG(dbgs() << "LV: Found unvectorizable type.\n"); 5345 return false; 5346 } 5347 5348 // Check that the stored type is vectorizable. 5349 if (auto *ST = dyn_cast<StoreInst>(&I)) { 5350 Type *T = ST->getValueOperand()->getType(); 5351 if (!VectorType::isValidElementType(T)) { 5352 ORE->emit(createMissedAnalysis("CantVectorizeStore", ST) 5353 << "store instruction cannot be vectorized"); 5354 return false; 5355 } 5356 5357 // FP instructions can allow unsafe algebra, thus vectorizable by 5358 // non-IEEE-754 compliant SIMD units. 5359 // This applies to floating-point math operations and calls, not memory 5360 // operations, shuffles, or casts, as they don't change precision or 5361 // semantics. 5362 } else if (I.getType()->isFloatingPointTy() && (CI || I.isBinaryOp()) && 5363 !I.hasUnsafeAlgebra()) { 5364 DEBUG(dbgs() << "LV: Found FP op with unsafe algebra.\n"); 5365 Hints->setPotentiallyUnsafe(); 5366 } 5367 5368 // Reduction instructions are allowed to have exit users. 5369 // All other instructions must not have external users. 5370 if (hasOutsideLoopUser(TheLoop, &I, AllowedExit)) { 5371 ORE->emit(createMissedAnalysis("ValueUsedOutsideLoop", &I) 5372 << "value cannot be used outside the loop"); 5373 return false; 5374 } 5375 5376 } // next instr. 5377 } 5378 5379 if (!Induction) { 5380 DEBUG(dbgs() << "LV: Did not find one integer induction var.\n"); 5381 if (Inductions.empty()) { 5382 ORE->emit(createMissedAnalysis("NoInductionVariable") 5383 << "loop induction variable could not be identified"); 5384 return false; 5385 } 5386 } 5387 5388 // Now we know the widest induction type, check if our found induction 5389 // is the same size. If it's not, unset it here and InnerLoopVectorizer 5390 // will create another. 5391 if (Induction && WidestIndTy != Induction->getType()) 5392 Induction = nullptr; 5393 5394 return true; 5395 } 5396 5397 void LoopVectorizationLegality::collectLoopScalars() { 5398 5399 // If an instruction is uniform after vectorization, it will remain scalar. 5400 Scalars.insert(Uniforms.begin(), Uniforms.end()); 5401 5402 // Collect the getelementptr instructions that will not be vectorized. A 5403 // getelementptr instruction is only vectorized if it is used for a legal 5404 // gather or scatter operation. 5405 for (auto *BB : TheLoop->blocks()) 5406 for (auto &I : *BB) { 5407 if (auto *GEP = dyn_cast<GetElementPtrInst>(&I)) { 5408 Scalars.insert(GEP); 5409 continue; 5410 } 5411 auto *Ptr = getPointerOperand(&I); 5412 if (!Ptr) 5413 continue; 5414 auto *GEP = getGEPInstruction(Ptr); 5415 if (GEP && isLegalGatherOrScatter(&I)) 5416 Scalars.erase(GEP); 5417 } 5418 5419 // An induction variable will remain scalar if all users of the induction 5420 // variable and induction variable update remain scalar. 5421 auto *Latch = TheLoop->getLoopLatch(); 5422 for (auto &Induction : *getInductionVars()) { 5423 auto *Ind = Induction.first; 5424 auto *IndUpdate = cast<Instruction>(Ind->getIncomingValueForBlock(Latch)); 5425 5426 // Determine if all users of the induction variable are scalar after 5427 // vectorization. 5428 auto ScalarInd = all_of(Ind->users(), [&](User *U) -> bool { 5429 auto *I = cast<Instruction>(U); 5430 return I == IndUpdate || !TheLoop->contains(I) || Scalars.count(I); 5431 }); 5432 if (!ScalarInd) 5433 continue; 5434 5435 // Determine if all users of the induction variable update instruction are 5436 // scalar after vectorization. 5437 auto ScalarIndUpdate = all_of(IndUpdate->users(), [&](User *U) -> bool { 5438 auto *I = cast<Instruction>(U); 5439 return I == Ind || !TheLoop->contains(I) || Scalars.count(I); 5440 }); 5441 if (!ScalarIndUpdate) 5442 continue; 5443 5444 // The induction variable and its update instruction will remain scalar. 5445 Scalars.insert(Ind); 5446 Scalars.insert(IndUpdate); 5447 } 5448 } 5449 5450 bool LoopVectorizationLegality::hasConsecutiveLikePtrOperand(Instruction *I) { 5451 if (isAccessInterleaved(I)) 5452 return true; 5453 if (auto *Ptr = getPointerOperand(I)) 5454 return isConsecutivePtr(Ptr); 5455 return false; 5456 } 5457 5458 bool LoopVectorizationLegality::isScalarWithPredication(Instruction *I) { 5459 if (!blockNeedsPredication(I->getParent())) 5460 return false; 5461 switch(I->getOpcode()) { 5462 default: 5463 break; 5464 case Instruction::Store: 5465 return !isMaskRequired(I); 5466 case Instruction::UDiv: 5467 case Instruction::SDiv: 5468 case Instruction::SRem: 5469 case Instruction::URem: 5470 return mayDivideByZero(*I); 5471 } 5472 return false; 5473 } 5474 5475 bool LoopVectorizationLegality::memoryInstructionMustBeScalarized( 5476 Instruction *I, unsigned VF) { 5477 5478 // If the memory instruction is in an interleaved group, it will be 5479 // vectorized and its pointer will remain uniform. 5480 if (isAccessInterleaved(I)) 5481 return false; 5482 5483 // Get and ensure we have a valid memory instruction. 5484 LoadInst *LI = dyn_cast<LoadInst>(I); 5485 StoreInst *SI = dyn_cast<StoreInst>(I); 5486 assert((LI || SI) && "Invalid memory instruction"); 5487 5488 // If the pointer operand is uniform (loop invariant), the memory instruction 5489 // will be scalarized. 5490 auto *Ptr = getPointerOperand(I); 5491 if (LI && isUniform(Ptr)) 5492 return true; 5493 5494 // If the pointer operand is non-consecutive and neither a gather nor a 5495 // scatter operation is legal, the memory instruction will be scalarized. 5496 if (!isConsecutivePtr(Ptr) && !isLegalGatherOrScatter(I)) 5497 return true; 5498 5499 // If the instruction is a store located in a predicated block, it will be 5500 // scalarized. 5501 if (isScalarWithPredication(I)) 5502 return true; 5503 5504 // If the instruction's allocated size doesn't equal it's type size, it 5505 // requires padding and will be scalarized. 5506 auto &DL = I->getModule()->getDataLayout(); 5507 auto *ScalarTy = LI ? LI->getType() : SI->getValueOperand()->getType(); 5508 if (hasIrregularType(ScalarTy, DL, VF)) 5509 return true; 5510 5511 // Otherwise, the memory instruction should be vectorized if the rest of the 5512 // loop is. 5513 return false; 5514 } 5515 5516 void LoopVectorizationLegality::collectLoopUniforms() { 5517 // We now know that the loop is vectorizable! 5518 // Collect instructions inside the loop that will remain uniform after 5519 // vectorization. 5520 5521 // Global values, params and instructions outside of current loop are out of 5522 // scope. 5523 auto isOutOfScope = [&](Value *V) -> bool { 5524 Instruction *I = dyn_cast<Instruction>(V); 5525 return (!I || !TheLoop->contains(I)); 5526 }; 5527 5528 SetVector<Instruction *> Worklist; 5529 BasicBlock *Latch = TheLoop->getLoopLatch(); 5530 5531 // Start with the conditional branch. If the branch condition is an 5532 // instruction contained in the loop that is only used by the branch, it is 5533 // uniform. 5534 auto *Cmp = dyn_cast<Instruction>(Latch->getTerminator()->getOperand(0)); 5535 if (Cmp && TheLoop->contains(Cmp) && Cmp->hasOneUse()) { 5536 Worklist.insert(Cmp); 5537 DEBUG(dbgs() << "LV: Found uniform instruction: " << *Cmp << "\n"); 5538 } 5539 5540 // Holds consecutive and consecutive-like pointers. Consecutive-like pointers 5541 // are pointers that are treated like consecutive pointers during 5542 // vectorization. The pointer operands of interleaved accesses are an 5543 // example. 5544 SmallSetVector<Instruction *, 8> ConsecutiveLikePtrs; 5545 5546 // Holds pointer operands of instructions that are possibly non-uniform. 5547 SmallPtrSet<Instruction *, 8> PossibleNonUniformPtrs; 5548 5549 // Iterate over the instructions in the loop, and collect all 5550 // consecutive-like pointer operands in ConsecutiveLikePtrs. If it's possible 5551 // that a consecutive-like pointer operand will be scalarized, we collect it 5552 // in PossibleNonUniformPtrs instead. We use two sets here because a single 5553 // getelementptr instruction can be used by both vectorized and scalarized 5554 // memory instructions. For example, if a loop loads and stores from the same 5555 // location, but the store is conditional, the store will be scalarized, and 5556 // the getelementptr won't remain uniform. 5557 for (auto *BB : TheLoop->blocks()) 5558 for (auto &I : *BB) { 5559 5560 // If there's no pointer operand, there's nothing to do. 5561 auto *Ptr = dyn_cast_or_null<Instruction>(getPointerOperand(&I)); 5562 if (!Ptr) 5563 continue; 5564 5565 // True if all users of Ptr are memory accesses that have Ptr as their 5566 // pointer operand. 5567 auto UsersAreMemAccesses = all_of(Ptr->users(), [&](User *U) -> bool { 5568 return getPointerOperand(U) == Ptr; 5569 }); 5570 5571 // Ensure the memory instruction will not be scalarized, making its 5572 // pointer operand non-uniform. If the pointer operand is used by some 5573 // instruction other than a memory access, we're not going to check if 5574 // that other instruction may be scalarized here. Thus, conservatively 5575 // assume the pointer operand may be non-uniform. 5576 if (!UsersAreMemAccesses || memoryInstructionMustBeScalarized(&I)) 5577 PossibleNonUniformPtrs.insert(Ptr); 5578 5579 // If the memory instruction will be vectorized and its pointer operand 5580 // is consecutive-like, the pointer operand should remain uniform. 5581 else if (hasConsecutiveLikePtrOperand(&I)) 5582 ConsecutiveLikePtrs.insert(Ptr); 5583 5584 // Otherwise, if the memory instruction will be vectorized and its 5585 // pointer operand is non-consecutive-like, the memory instruction should 5586 // be a gather or scatter operation. Its pointer operand will be 5587 // non-uniform. 5588 else 5589 PossibleNonUniformPtrs.insert(Ptr); 5590 } 5591 5592 // Add to the Worklist all consecutive and consecutive-like pointers that 5593 // aren't also identified as possibly non-uniform. 5594 for (auto *V : ConsecutiveLikePtrs) 5595 if (!PossibleNonUniformPtrs.count(V)) { 5596 DEBUG(dbgs() << "LV: Found uniform instruction: " << *V << "\n"); 5597 Worklist.insert(V); 5598 } 5599 5600 // Expand Worklist in topological order: whenever a new instruction 5601 // is added , its users should be either already inside Worklist, or 5602 // out of scope. It ensures a uniform instruction will only be used 5603 // by uniform instructions or out of scope instructions. 5604 unsigned idx = 0; 5605 while (idx != Worklist.size()) { 5606 Instruction *I = Worklist[idx++]; 5607 5608 for (auto OV : I->operand_values()) { 5609 if (isOutOfScope(OV)) 5610 continue; 5611 auto *OI = cast<Instruction>(OV); 5612 if (all_of(OI->users(), [&](User *U) -> bool { 5613 return isOutOfScope(U) || Worklist.count(cast<Instruction>(U)); 5614 })) { 5615 Worklist.insert(OI); 5616 DEBUG(dbgs() << "LV: Found uniform instruction: " << *OI << "\n"); 5617 } 5618 } 5619 } 5620 5621 // Returns true if Ptr is the pointer operand of a memory access instruction 5622 // I, and I is known to not require scalarization. 5623 auto isVectorizedMemAccessUse = [&](Instruction *I, Value *Ptr) -> bool { 5624 return getPointerOperand(I) == Ptr && !memoryInstructionMustBeScalarized(I); 5625 }; 5626 5627 // For an instruction to be added into Worklist above, all its users inside 5628 // the loop should also be in Worklist. However, this condition cannot be 5629 // true for phi nodes that form a cyclic dependence. We must process phi 5630 // nodes separately. An induction variable will remain uniform if all users 5631 // of the induction variable and induction variable update remain uniform. 5632 // The code below handles both pointer and non-pointer induction variables. 5633 for (auto &Induction : Inductions) { 5634 auto *Ind = Induction.first; 5635 auto *IndUpdate = cast<Instruction>(Ind->getIncomingValueForBlock(Latch)); 5636 5637 // Determine if all users of the induction variable are uniform after 5638 // vectorization. 5639 auto UniformInd = all_of(Ind->users(), [&](User *U) -> bool { 5640 auto *I = cast<Instruction>(U); 5641 return I == IndUpdate || !TheLoop->contains(I) || Worklist.count(I) || 5642 isVectorizedMemAccessUse(I, Ind); 5643 }); 5644 if (!UniformInd) 5645 continue; 5646 5647 // Determine if all users of the induction variable update instruction are 5648 // uniform after vectorization. 5649 auto UniformIndUpdate = all_of(IndUpdate->users(), [&](User *U) -> bool { 5650 auto *I = cast<Instruction>(U); 5651 return I == Ind || !TheLoop->contains(I) || Worklist.count(I) || 5652 isVectorizedMemAccessUse(I, IndUpdate); 5653 }); 5654 if (!UniformIndUpdate) 5655 continue; 5656 5657 // The induction variable and its update instruction will remain uniform. 5658 Worklist.insert(Ind); 5659 Worklist.insert(IndUpdate); 5660 DEBUG(dbgs() << "LV: Found uniform instruction: " << *Ind << "\n"); 5661 DEBUG(dbgs() << "LV: Found uniform instruction: " << *IndUpdate << "\n"); 5662 } 5663 5664 Uniforms.insert(Worklist.begin(), Worklist.end()); 5665 } 5666 5667 bool LoopVectorizationLegality::canVectorizeMemory() { 5668 LAI = &(*GetLAA)(*TheLoop); 5669 InterleaveInfo.setLAI(LAI); 5670 const OptimizationRemarkAnalysis *LAR = LAI->getReport(); 5671 if (LAR) { 5672 OptimizationRemarkAnalysis VR(Hints->vectorizeAnalysisPassName(), 5673 "loop not vectorized: ", *LAR); 5674 ORE->emit(VR); 5675 } 5676 if (!LAI->canVectorizeMemory()) 5677 return false; 5678 5679 if (LAI->hasStoreToLoopInvariantAddress()) { 5680 ORE->emit(createMissedAnalysis("CantVectorizeStoreToLoopInvariantAddress") 5681 << "write to a loop invariant address could not be vectorized"); 5682 DEBUG(dbgs() << "LV: We don't allow storing to uniform addresses\n"); 5683 return false; 5684 } 5685 5686 Requirements->addRuntimePointerChecks(LAI->getNumRuntimePointerChecks()); 5687 PSE.addPredicate(LAI->getPSE().getUnionPredicate()); 5688 5689 return true; 5690 } 5691 5692 bool LoopVectorizationLegality::isInductionVariable(const Value *V) { 5693 Value *In0 = const_cast<Value *>(V); 5694 PHINode *PN = dyn_cast_or_null<PHINode>(In0); 5695 if (!PN) 5696 return false; 5697 5698 return Inductions.count(PN); 5699 } 5700 5701 bool LoopVectorizationLegality::isFirstOrderRecurrence(const PHINode *Phi) { 5702 return FirstOrderRecurrences.count(Phi); 5703 } 5704 5705 bool LoopVectorizationLegality::blockNeedsPredication(BasicBlock *BB) { 5706 return LoopAccessInfo::blockNeedsPredication(BB, TheLoop, DT); 5707 } 5708 5709 bool LoopVectorizationLegality::blockCanBePredicated( 5710 BasicBlock *BB, SmallPtrSetImpl<Value *> &SafePtrs) { 5711 const bool IsAnnotatedParallel = TheLoop->isAnnotatedParallel(); 5712 5713 for (Instruction &I : *BB) { 5714 // Check that we don't have a constant expression that can trap as operand. 5715 for (Value *Operand : I.operands()) { 5716 if (auto *C = dyn_cast<Constant>(Operand)) 5717 if (C->canTrap()) 5718 return false; 5719 } 5720 // We might be able to hoist the load. 5721 if (I.mayReadFromMemory()) { 5722 auto *LI = dyn_cast<LoadInst>(&I); 5723 if (!LI) 5724 return false; 5725 if (!SafePtrs.count(LI->getPointerOperand())) { 5726 if (isLegalMaskedLoad(LI->getType(), LI->getPointerOperand()) || 5727 isLegalMaskedGather(LI->getType())) { 5728 MaskedOp.insert(LI); 5729 continue; 5730 } 5731 // !llvm.mem.parallel_loop_access implies if-conversion safety. 5732 if (IsAnnotatedParallel) 5733 continue; 5734 return false; 5735 } 5736 } 5737 5738 if (I.mayWriteToMemory()) { 5739 auto *SI = dyn_cast<StoreInst>(&I); 5740 // We only support predication of stores in basic blocks with one 5741 // predecessor. 5742 if (!SI) 5743 return false; 5744 5745 // Build a masked store if it is legal for the target. 5746 if (isLegalMaskedStore(SI->getValueOperand()->getType(), 5747 SI->getPointerOperand()) || 5748 isLegalMaskedScatter(SI->getValueOperand()->getType())) { 5749 MaskedOp.insert(SI); 5750 continue; 5751 } 5752 5753 bool isSafePtr = (SafePtrs.count(SI->getPointerOperand()) != 0); 5754 bool isSinglePredecessor = SI->getParent()->getSinglePredecessor(); 5755 5756 if (++NumPredStores > NumberOfStoresToPredicate || !isSafePtr || 5757 !isSinglePredecessor) 5758 return false; 5759 } 5760 if (I.mayThrow()) 5761 return false; 5762 } 5763 5764 return true; 5765 } 5766 5767 void InterleavedAccessInfo::collectConstStrideAccesses( 5768 MapVector<Instruction *, StrideDescriptor> &AccessStrideInfo, 5769 const ValueToValueMap &Strides) { 5770 5771 auto &DL = TheLoop->getHeader()->getModule()->getDataLayout(); 5772 5773 // Since it's desired that the load/store instructions be maintained in 5774 // "program order" for the interleaved access analysis, we have to visit the 5775 // blocks in the loop in reverse postorder (i.e., in a topological order). 5776 // Such an ordering will ensure that any load/store that may be executed 5777 // before a second load/store will precede the second load/store in 5778 // AccessStrideInfo. 5779 LoopBlocksDFS DFS(TheLoop); 5780 DFS.perform(LI); 5781 for (BasicBlock *BB : make_range(DFS.beginRPO(), DFS.endRPO())) 5782 for (auto &I : *BB) { 5783 auto *LI = dyn_cast<LoadInst>(&I); 5784 auto *SI = dyn_cast<StoreInst>(&I); 5785 if (!LI && !SI) 5786 continue; 5787 5788 Value *Ptr = getPointerOperand(&I); 5789 // We don't check wrapping here because we don't know yet if Ptr will be 5790 // part of a full group or a group with gaps. Checking wrapping for all 5791 // pointers (even those that end up in groups with no gaps) will be overly 5792 // conservative. For full groups, wrapping should be ok since if we would 5793 // wrap around the address space we would do a memory access at nullptr 5794 // even without the transformation. The wrapping checks are therefore 5795 // deferred until after we've formed the interleaved groups. 5796 int64_t Stride = getPtrStride(PSE, Ptr, TheLoop, Strides, 5797 /*Assume=*/true, /*ShouldCheckWrap=*/false); 5798 5799 const SCEV *Scev = replaceSymbolicStrideSCEV(PSE, Strides, Ptr); 5800 PointerType *PtrTy = dyn_cast<PointerType>(Ptr->getType()); 5801 uint64_t Size = DL.getTypeAllocSize(PtrTy->getElementType()); 5802 5803 // An alignment of 0 means target ABI alignment. 5804 unsigned Align = LI ? LI->getAlignment() : SI->getAlignment(); 5805 if (!Align) 5806 Align = DL.getABITypeAlignment(PtrTy->getElementType()); 5807 5808 AccessStrideInfo[&I] = StrideDescriptor(Stride, Scev, Size, Align); 5809 } 5810 } 5811 5812 // Analyze interleaved accesses and collect them into interleaved load and 5813 // store groups. 5814 // 5815 // When generating code for an interleaved load group, we effectively hoist all 5816 // loads in the group to the location of the first load in program order. When 5817 // generating code for an interleaved store group, we sink all stores to the 5818 // location of the last store. This code motion can change the order of load 5819 // and store instructions and may break dependences. 5820 // 5821 // The code generation strategy mentioned above ensures that we won't violate 5822 // any write-after-read (WAR) dependences. 5823 // 5824 // E.g., for the WAR dependence: a = A[i]; // (1) 5825 // A[i] = b; // (2) 5826 // 5827 // The store group of (2) is always inserted at or below (2), and the load 5828 // group of (1) is always inserted at or above (1). Thus, the instructions will 5829 // never be reordered. All other dependences are checked to ensure the 5830 // correctness of the instruction reordering. 5831 // 5832 // The algorithm visits all memory accesses in the loop in bottom-up program 5833 // order. Program order is established by traversing the blocks in the loop in 5834 // reverse postorder when collecting the accesses. 5835 // 5836 // We visit the memory accesses in bottom-up order because it can simplify the 5837 // construction of store groups in the presence of write-after-write (WAW) 5838 // dependences. 5839 // 5840 // E.g., for the WAW dependence: A[i] = a; // (1) 5841 // A[i] = b; // (2) 5842 // A[i + 1] = c; // (3) 5843 // 5844 // We will first create a store group with (3) and (2). (1) can't be added to 5845 // this group because it and (2) are dependent. However, (1) can be grouped 5846 // with other accesses that may precede it in program order. Note that a 5847 // bottom-up order does not imply that WAW dependences should not be checked. 5848 void InterleavedAccessInfo::analyzeInterleaving( 5849 const ValueToValueMap &Strides) { 5850 DEBUG(dbgs() << "LV: Analyzing interleaved accesses...\n"); 5851 5852 // Holds all accesses with a constant stride. 5853 MapVector<Instruction *, StrideDescriptor> AccessStrideInfo; 5854 collectConstStrideAccesses(AccessStrideInfo, Strides); 5855 5856 if (AccessStrideInfo.empty()) 5857 return; 5858 5859 // Collect the dependences in the loop. 5860 collectDependences(); 5861 5862 // Holds all interleaved store groups temporarily. 5863 SmallSetVector<InterleaveGroup *, 4> StoreGroups; 5864 // Holds all interleaved load groups temporarily. 5865 SmallSetVector<InterleaveGroup *, 4> LoadGroups; 5866 5867 // Search in bottom-up program order for pairs of accesses (A and B) that can 5868 // form interleaved load or store groups. In the algorithm below, access A 5869 // precedes access B in program order. We initialize a group for B in the 5870 // outer loop of the algorithm, and then in the inner loop, we attempt to 5871 // insert each A into B's group if: 5872 // 5873 // 1. A and B have the same stride, 5874 // 2. A and B have the same memory object size, and 5875 // 3. A belongs in B's group according to its distance from B. 5876 // 5877 // Special care is taken to ensure group formation will not break any 5878 // dependences. 5879 for (auto BI = AccessStrideInfo.rbegin(), E = AccessStrideInfo.rend(); 5880 BI != E; ++BI) { 5881 Instruction *B = BI->first; 5882 StrideDescriptor DesB = BI->second; 5883 5884 // Initialize a group for B if it has an allowable stride. Even if we don't 5885 // create a group for B, we continue with the bottom-up algorithm to ensure 5886 // we don't break any of B's dependences. 5887 InterleaveGroup *Group = nullptr; 5888 if (isStrided(DesB.Stride)) { 5889 Group = getInterleaveGroup(B); 5890 if (!Group) { 5891 DEBUG(dbgs() << "LV: Creating an interleave group with:" << *B << '\n'); 5892 Group = createInterleaveGroup(B, DesB.Stride, DesB.Align); 5893 } 5894 if (B->mayWriteToMemory()) 5895 StoreGroups.insert(Group); 5896 else 5897 LoadGroups.insert(Group); 5898 } 5899 5900 for (auto AI = std::next(BI); AI != E; ++AI) { 5901 Instruction *A = AI->first; 5902 StrideDescriptor DesA = AI->second; 5903 5904 // Our code motion strategy implies that we can't have dependences 5905 // between accesses in an interleaved group and other accesses located 5906 // between the first and last member of the group. Note that this also 5907 // means that a group can't have more than one member at a given offset. 5908 // The accesses in a group can have dependences with other accesses, but 5909 // we must ensure we don't extend the boundaries of the group such that 5910 // we encompass those dependent accesses. 5911 // 5912 // For example, assume we have the sequence of accesses shown below in a 5913 // stride-2 loop: 5914 // 5915 // (1, 2) is a group | A[i] = a; // (1) 5916 // | A[i-1] = b; // (2) | 5917 // A[i-3] = c; // (3) 5918 // A[i] = d; // (4) | (2, 4) is not a group 5919 // 5920 // Because accesses (2) and (3) are dependent, we can group (2) with (1) 5921 // but not with (4). If we did, the dependent access (3) would be within 5922 // the boundaries of the (2, 4) group. 5923 if (!canReorderMemAccessesForInterleavedGroups(&*AI, &*BI)) { 5924 5925 // If a dependence exists and A is already in a group, we know that A 5926 // must be a store since A precedes B and WAR dependences are allowed. 5927 // Thus, A would be sunk below B. We release A's group to prevent this 5928 // illegal code motion. A will then be free to form another group with 5929 // instructions that precede it. 5930 if (isInterleaved(A)) { 5931 InterleaveGroup *StoreGroup = getInterleaveGroup(A); 5932 StoreGroups.remove(StoreGroup); 5933 releaseGroup(StoreGroup); 5934 } 5935 5936 // If a dependence exists and A is not already in a group (or it was 5937 // and we just released it), B might be hoisted above A (if B is a 5938 // load) or another store might be sunk below A (if B is a store). In 5939 // either case, we can't add additional instructions to B's group. B 5940 // will only form a group with instructions that it precedes. 5941 break; 5942 } 5943 5944 // At this point, we've checked for illegal code motion. If either A or B 5945 // isn't strided, there's nothing left to do. 5946 if (!isStrided(DesA.Stride) || !isStrided(DesB.Stride)) 5947 continue; 5948 5949 // Ignore A if it's already in a group or isn't the same kind of memory 5950 // operation as B. 5951 if (isInterleaved(A) || A->mayReadFromMemory() != B->mayReadFromMemory()) 5952 continue; 5953 5954 // Check rules 1 and 2. Ignore A if its stride or size is different from 5955 // that of B. 5956 if (DesA.Stride != DesB.Stride || DesA.Size != DesB.Size) 5957 continue; 5958 5959 // Calculate the distance from A to B. 5960 const SCEVConstant *DistToB = dyn_cast<SCEVConstant>( 5961 PSE.getSE()->getMinusSCEV(DesA.Scev, DesB.Scev)); 5962 if (!DistToB) 5963 continue; 5964 int64_t DistanceToB = DistToB->getAPInt().getSExtValue(); 5965 5966 // Check rule 3. Ignore A if its distance to B is not a multiple of the 5967 // size. 5968 if (DistanceToB % static_cast<int64_t>(DesB.Size)) 5969 continue; 5970 5971 // Ignore A if either A or B is in a predicated block. Although we 5972 // currently prevent group formation for predicated accesses, we may be 5973 // able to relax this limitation in the future once we handle more 5974 // complicated blocks. 5975 if (isPredicated(A->getParent()) || isPredicated(B->getParent())) 5976 continue; 5977 5978 // The index of A is the index of B plus A's distance to B in multiples 5979 // of the size. 5980 int IndexA = 5981 Group->getIndex(B) + DistanceToB / static_cast<int64_t>(DesB.Size); 5982 5983 // Try to insert A into B's group. 5984 if (Group->insertMember(A, IndexA, DesA.Align)) { 5985 DEBUG(dbgs() << "LV: Inserted:" << *A << '\n' 5986 << " into the interleave group with" << *B << '\n'); 5987 InterleaveGroupMap[A] = Group; 5988 5989 // Set the first load in program order as the insert position. 5990 if (A->mayReadFromMemory()) 5991 Group->setInsertPos(A); 5992 } 5993 } // Iteration over A accesses. 5994 } // Iteration over B accesses. 5995 5996 // Remove interleaved store groups with gaps. 5997 for (InterleaveGroup *Group : StoreGroups) 5998 if (Group->getNumMembers() != Group->getFactor()) 5999 releaseGroup(Group); 6000 6001 // Remove interleaved groups with gaps (currently only loads) whose memory 6002 // accesses may wrap around. We have to revisit the getPtrStride analysis, 6003 // this time with ShouldCheckWrap=true, since collectConstStrideAccesses does 6004 // not check wrapping (see documentation there). 6005 // FORNOW we use Assume=false; 6006 // TODO: Change to Assume=true but making sure we don't exceed the threshold 6007 // of runtime SCEV assumptions checks (thereby potentially failing to 6008 // vectorize altogether). 6009 // Additional optional optimizations: 6010 // TODO: If we are peeling the loop and we know that the first pointer doesn't 6011 // wrap then we can deduce that all pointers in the group don't wrap. 6012 // This means that we can forcefully peel the loop in order to only have to 6013 // check the first pointer for no-wrap. When we'll change to use Assume=true 6014 // we'll only need at most one runtime check per interleaved group. 6015 // 6016 for (InterleaveGroup *Group : LoadGroups) { 6017 6018 // Case 1: A full group. Can Skip the checks; For full groups, if the wide 6019 // load would wrap around the address space we would do a memory access at 6020 // nullptr even without the transformation. 6021 if (Group->getNumMembers() == Group->getFactor()) 6022 continue; 6023 6024 // Case 2: If first and last members of the group don't wrap this implies 6025 // that all the pointers in the group don't wrap. 6026 // So we check only group member 0 (which is always guaranteed to exist), 6027 // and group member Factor - 1; If the latter doesn't exist we rely on 6028 // peeling (if it is a non-reveresed accsess -- see Case 3). 6029 Value *FirstMemberPtr = getPointerOperand(Group->getMember(0)); 6030 if (!getPtrStride(PSE, FirstMemberPtr, TheLoop, Strides, /*Assume=*/false, 6031 /*ShouldCheckWrap=*/true)) { 6032 DEBUG(dbgs() << "LV: Invalidate candidate interleaved group due to " 6033 "first group member potentially pointer-wrapping.\n"); 6034 releaseGroup(Group); 6035 continue; 6036 } 6037 Instruction *LastMember = Group->getMember(Group->getFactor() - 1); 6038 if (LastMember) { 6039 Value *LastMemberPtr = getPointerOperand(LastMember); 6040 if (!getPtrStride(PSE, LastMemberPtr, TheLoop, Strides, /*Assume=*/false, 6041 /*ShouldCheckWrap=*/true)) { 6042 DEBUG(dbgs() << "LV: Invalidate candidate interleaved group due to " 6043 "last group member potentially pointer-wrapping.\n"); 6044 releaseGroup(Group); 6045 } 6046 } 6047 else { 6048 // Case 3: A non-reversed interleaved load group with gaps: We need 6049 // to execute at least one scalar epilogue iteration. This will ensure 6050 // we don't speculatively access memory out-of-bounds. We only need 6051 // to look for a member at index factor - 1, since every group must have 6052 // a member at index zero. 6053 if (Group->isReverse()) { 6054 releaseGroup(Group); 6055 continue; 6056 } 6057 DEBUG(dbgs() << "LV: Interleaved group requires epilogue iteration.\n"); 6058 RequiresScalarEpilogue = true; 6059 } 6060 } 6061 } 6062 6063 LoopVectorizationCostModel::VectorizationFactor 6064 LoopVectorizationCostModel::selectVectorizationFactor(bool OptForSize) { 6065 // Width 1 means no vectorize 6066 VectorizationFactor Factor = {1U, 0U}; 6067 if (OptForSize && Legal->getRuntimePointerChecking()->Need) { 6068 ORE->emit(createMissedAnalysis("CantVersionLoopWithOptForSize") 6069 << "runtime pointer checks needed. Enable vectorization of this " 6070 "loop with '#pragma clang loop vectorize(enable)' when " 6071 "compiling with -Os/-Oz"); 6072 DEBUG(dbgs() 6073 << "LV: Aborting. Runtime ptr check is required with -Os/-Oz.\n"); 6074 return Factor; 6075 } 6076 6077 if (!EnableCondStoresVectorization && Legal->getNumPredStores()) { 6078 ORE->emit(createMissedAnalysis("ConditionalStore") 6079 << "store that is conditionally executed prevents vectorization"); 6080 DEBUG(dbgs() << "LV: No vectorization. There are conditional stores.\n"); 6081 return Factor; 6082 } 6083 6084 MinBWs = computeMinimumValueSizes(TheLoop->getBlocks(), *DB, &TTI); 6085 unsigned SmallestType, WidestType; 6086 std::tie(SmallestType, WidestType) = getSmallestAndWidestTypes(); 6087 unsigned WidestRegister = TTI.getRegisterBitWidth(true); 6088 unsigned MaxSafeDepDist = -1U; 6089 6090 // Get the maximum safe dependence distance in bits computed by LAA. If the 6091 // loop contains any interleaved accesses, we divide the dependence distance 6092 // by the maximum interleave factor of all interleaved groups. Note that 6093 // although the division ensures correctness, this is a fairly conservative 6094 // computation because the maximum distance computed by LAA may not involve 6095 // any of the interleaved accesses. 6096 if (Legal->getMaxSafeDepDistBytes() != -1U) 6097 MaxSafeDepDist = 6098 Legal->getMaxSafeDepDistBytes() * 8 / Legal->getMaxInterleaveFactor(); 6099 6100 WidestRegister = 6101 ((WidestRegister < MaxSafeDepDist) ? WidestRegister : MaxSafeDepDist); 6102 unsigned MaxVectorSize = WidestRegister / WidestType; 6103 6104 DEBUG(dbgs() << "LV: The Smallest and Widest types: " << SmallestType << " / " 6105 << WidestType << " bits.\n"); 6106 DEBUG(dbgs() << "LV: The Widest register is: " << WidestRegister 6107 << " bits.\n"); 6108 6109 if (MaxVectorSize == 0) { 6110 DEBUG(dbgs() << "LV: The target has no vector registers.\n"); 6111 MaxVectorSize = 1; 6112 } 6113 6114 assert(MaxVectorSize <= 64 && "Did not expect to pack so many elements" 6115 " into one vector!"); 6116 6117 unsigned VF = MaxVectorSize; 6118 if (MaximizeBandwidth && !OptForSize) { 6119 // Collect all viable vectorization factors. 6120 SmallVector<unsigned, 8> VFs; 6121 unsigned NewMaxVectorSize = WidestRegister / SmallestType; 6122 for (unsigned VS = MaxVectorSize; VS <= NewMaxVectorSize; VS *= 2) 6123 VFs.push_back(VS); 6124 6125 // For each VF calculate its register usage. 6126 auto RUs = calculateRegisterUsage(VFs); 6127 6128 // Select the largest VF which doesn't require more registers than existing 6129 // ones. 6130 unsigned TargetNumRegisters = TTI.getNumberOfRegisters(true); 6131 for (int i = RUs.size() - 1; i >= 0; --i) { 6132 if (RUs[i].MaxLocalUsers <= TargetNumRegisters) { 6133 VF = VFs[i]; 6134 break; 6135 } 6136 } 6137 } 6138 6139 // If we optimize the program for size, avoid creating the tail loop. 6140 if (OptForSize) { 6141 unsigned TC = PSE.getSE()->getSmallConstantTripCount(TheLoop); 6142 DEBUG(dbgs() << "LV: Found trip count: " << TC << '\n'); 6143 6144 // If we don't know the precise trip count, don't try to vectorize. 6145 if (TC < 2) { 6146 ORE->emit( 6147 createMissedAnalysis("UnknownLoopCountComplexCFG") 6148 << "unable to calculate the loop count due to complex control flow"); 6149 DEBUG(dbgs() << "LV: Aborting. A tail loop is required with -Os/-Oz.\n"); 6150 return Factor; 6151 } 6152 6153 // Find the maximum SIMD width that can fit within the trip count. 6154 VF = TC % MaxVectorSize; 6155 6156 if (VF == 0) 6157 VF = MaxVectorSize; 6158 else { 6159 // If the trip count that we found modulo the vectorization factor is not 6160 // zero then we require a tail. 6161 ORE->emit(createMissedAnalysis("NoTailLoopWithOptForSize") 6162 << "cannot optimize for size and vectorize at the " 6163 "same time. Enable vectorization of this loop " 6164 "with '#pragma clang loop vectorize(enable)' " 6165 "when compiling with -Os/-Oz"); 6166 DEBUG(dbgs() << "LV: Aborting. A tail loop is required with -Os/-Oz.\n"); 6167 return Factor; 6168 } 6169 } 6170 6171 int UserVF = Hints->getWidth(); 6172 if (UserVF != 0) { 6173 assert(isPowerOf2_32(UserVF) && "VF needs to be a power of two"); 6174 DEBUG(dbgs() << "LV: Using user VF " << UserVF << ".\n"); 6175 6176 Factor.Width = UserVF; 6177 collectInstsToScalarize(UserVF); 6178 return Factor; 6179 } 6180 6181 float Cost = expectedCost(1).first; 6182 #ifndef NDEBUG 6183 const float ScalarCost = Cost; 6184 #endif /* NDEBUG */ 6185 unsigned Width = 1; 6186 DEBUG(dbgs() << "LV: Scalar loop costs: " << (int)ScalarCost << ".\n"); 6187 6188 bool ForceVectorization = Hints->getForce() == LoopVectorizeHints::FK_Enabled; 6189 // Ignore scalar width, because the user explicitly wants vectorization. 6190 if (ForceVectorization && VF > 1) { 6191 Width = 2; 6192 Cost = expectedCost(Width).first / (float)Width; 6193 } 6194 6195 for (unsigned i = 2; i <= VF; i *= 2) { 6196 // Notice that the vector loop needs to be executed less times, so 6197 // we need to divide the cost of the vector loops by the width of 6198 // the vector elements. 6199 VectorizationCostTy C = expectedCost(i); 6200 float VectorCost = C.first / (float)i; 6201 DEBUG(dbgs() << "LV: Vector loop of width " << i 6202 << " costs: " << (int)VectorCost << ".\n"); 6203 if (!C.second && !ForceVectorization) { 6204 DEBUG( 6205 dbgs() << "LV: Not considering vector loop of width " << i 6206 << " because it will not generate any vector instructions.\n"); 6207 continue; 6208 } 6209 if (VectorCost < Cost) { 6210 Cost = VectorCost; 6211 Width = i; 6212 } 6213 } 6214 6215 DEBUG(if (ForceVectorization && Width > 1 && Cost >= ScalarCost) dbgs() 6216 << "LV: Vectorization seems to be not beneficial, " 6217 << "but was forced by a user.\n"); 6218 DEBUG(dbgs() << "LV: Selecting VF: " << Width << ".\n"); 6219 Factor.Width = Width; 6220 Factor.Cost = Width * Cost; 6221 return Factor; 6222 } 6223 6224 std::pair<unsigned, unsigned> 6225 LoopVectorizationCostModel::getSmallestAndWidestTypes() { 6226 unsigned MinWidth = -1U; 6227 unsigned MaxWidth = 8; 6228 const DataLayout &DL = TheFunction->getParent()->getDataLayout(); 6229 6230 // For each block. 6231 for (BasicBlock *BB : TheLoop->blocks()) { 6232 // For each instruction in the loop. 6233 for (Instruction &I : *BB) { 6234 Type *T = I.getType(); 6235 6236 // Skip ignored values. 6237 if (ValuesToIgnore.count(&I)) 6238 continue; 6239 6240 // Only examine Loads, Stores and PHINodes. 6241 if (!isa<LoadInst>(I) && !isa<StoreInst>(I) && !isa<PHINode>(I)) 6242 continue; 6243 6244 // Examine PHI nodes that are reduction variables. Update the type to 6245 // account for the recurrence type. 6246 if (auto *PN = dyn_cast<PHINode>(&I)) { 6247 if (!Legal->isReductionVariable(PN)) 6248 continue; 6249 RecurrenceDescriptor RdxDesc = (*Legal->getReductionVars())[PN]; 6250 T = RdxDesc.getRecurrenceType(); 6251 } 6252 6253 // Examine the stored values. 6254 if (auto *ST = dyn_cast<StoreInst>(&I)) 6255 T = ST->getValueOperand()->getType(); 6256 6257 // Ignore loaded pointer types and stored pointer types that are not 6258 // consecutive. However, we do want to take consecutive stores/loads of 6259 // pointer vectors into account. 6260 if (T->isPointerTy() && !isConsecutiveLoadOrStore(&I)) 6261 continue; 6262 6263 MinWidth = std::min(MinWidth, 6264 (unsigned)DL.getTypeSizeInBits(T->getScalarType())); 6265 MaxWidth = std::max(MaxWidth, 6266 (unsigned)DL.getTypeSizeInBits(T->getScalarType())); 6267 } 6268 } 6269 6270 return {MinWidth, MaxWidth}; 6271 } 6272 6273 unsigned LoopVectorizationCostModel::selectInterleaveCount(bool OptForSize, 6274 unsigned VF, 6275 unsigned LoopCost) { 6276 6277 // -- The interleave heuristics -- 6278 // We interleave the loop in order to expose ILP and reduce the loop overhead. 6279 // There are many micro-architectural considerations that we can't predict 6280 // at this level. For example, frontend pressure (on decode or fetch) due to 6281 // code size, or the number and capabilities of the execution ports. 6282 // 6283 // We use the following heuristics to select the interleave count: 6284 // 1. If the code has reductions, then we interleave to break the cross 6285 // iteration dependency. 6286 // 2. If the loop is really small, then we interleave to reduce the loop 6287 // overhead. 6288 // 3. We don't interleave if we think that we will spill registers to memory 6289 // due to the increased register pressure. 6290 6291 // When we optimize for size, we don't interleave. 6292 if (OptForSize) 6293 return 1; 6294 6295 // We used the distance for the interleave count. 6296 if (Legal->getMaxSafeDepDistBytes() != -1U) 6297 return 1; 6298 6299 // Do not interleave loops with a relatively small trip count. 6300 unsigned TC = PSE.getSE()->getSmallConstantTripCount(TheLoop); 6301 if (TC > 1 && TC < TinyTripCountInterleaveThreshold) 6302 return 1; 6303 6304 unsigned TargetNumRegisters = TTI.getNumberOfRegisters(VF > 1); 6305 DEBUG(dbgs() << "LV: The target has " << TargetNumRegisters 6306 << " registers\n"); 6307 6308 if (VF == 1) { 6309 if (ForceTargetNumScalarRegs.getNumOccurrences() > 0) 6310 TargetNumRegisters = ForceTargetNumScalarRegs; 6311 } else { 6312 if (ForceTargetNumVectorRegs.getNumOccurrences() > 0) 6313 TargetNumRegisters = ForceTargetNumVectorRegs; 6314 } 6315 6316 RegisterUsage R = calculateRegisterUsage({VF})[0]; 6317 // We divide by these constants so assume that we have at least one 6318 // instruction that uses at least one register. 6319 R.MaxLocalUsers = std::max(R.MaxLocalUsers, 1U); 6320 R.NumInstructions = std::max(R.NumInstructions, 1U); 6321 6322 // We calculate the interleave count using the following formula. 6323 // Subtract the number of loop invariants from the number of available 6324 // registers. These registers are used by all of the interleaved instances. 6325 // Next, divide the remaining registers by the number of registers that is 6326 // required by the loop, in order to estimate how many parallel instances 6327 // fit without causing spills. All of this is rounded down if necessary to be 6328 // a power of two. We want power of two interleave count to simplify any 6329 // addressing operations or alignment considerations. 6330 unsigned IC = PowerOf2Floor((TargetNumRegisters - R.LoopInvariantRegs) / 6331 R.MaxLocalUsers); 6332 6333 // Don't count the induction variable as interleaved. 6334 if (EnableIndVarRegisterHeur) 6335 IC = PowerOf2Floor((TargetNumRegisters - R.LoopInvariantRegs - 1) / 6336 std::max(1U, (R.MaxLocalUsers - 1))); 6337 6338 // Clamp the interleave ranges to reasonable counts. 6339 unsigned MaxInterleaveCount = TTI.getMaxInterleaveFactor(VF); 6340 6341 // Check if the user has overridden the max. 6342 if (VF == 1) { 6343 if (ForceTargetMaxScalarInterleaveFactor.getNumOccurrences() > 0) 6344 MaxInterleaveCount = ForceTargetMaxScalarInterleaveFactor; 6345 } else { 6346 if (ForceTargetMaxVectorInterleaveFactor.getNumOccurrences() > 0) 6347 MaxInterleaveCount = ForceTargetMaxVectorInterleaveFactor; 6348 } 6349 6350 // If we did not calculate the cost for VF (because the user selected the VF) 6351 // then we calculate the cost of VF here. 6352 if (LoopCost == 0) 6353 LoopCost = expectedCost(VF).first; 6354 6355 // Clamp the calculated IC to be between the 1 and the max interleave count 6356 // that the target allows. 6357 if (IC > MaxInterleaveCount) 6358 IC = MaxInterleaveCount; 6359 else if (IC < 1) 6360 IC = 1; 6361 6362 // Interleave if we vectorized this loop and there is a reduction that could 6363 // benefit from interleaving. 6364 if (VF > 1 && Legal->getReductionVars()->size()) { 6365 DEBUG(dbgs() << "LV: Interleaving because of reductions.\n"); 6366 return IC; 6367 } 6368 6369 // Note that if we've already vectorized the loop we will have done the 6370 // runtime check and so interleaving won't require further checks. 6371 bool InterleavingRequiresRuntimePointerCheck = 6372 (VF == 1 && Legal->getRuntimePointerChecking()->Need); 6373 6374 // We want to interleave small loops in order to reduce the loop overhead and 6375 // potentially expose ILP opportunities. 6376 DEBUG(dbgs() << "LV: Loop cost is " << LoopCost << '\n'); 6377 if (!InterleavingRequiresRuntimePointerCheck && LoopCost < SmallLoopCost) { 6378 // We assume that the cost overhead is 1 and we use the cost model 6379 // to estimate the cost of the loop and interleave until the cost of the 6380 // loop overhead is about 5% of the cost of the loop. 6381 unsigned SmallIC = 6382 std::min(IC, (unsigned)PowerOf2Floor(SmallLoopCost / LoopCost)); 6383 6384 // Interleave until store/load ports (estimated by max interleave count) are 6385 // saturated. 6386 unsigned NumStores = Legal->getNumStores(); 6387 unsigned NumLoads = Legal->getNumLoads(); 6388 unsigned StoresIC = IC / (NumStores ? NumStores : 1); 6389 unsigned LoadsIC = IC / (NumLoads ? NumLoads : 1); 6390 6391 // If we have a scalar reduction (vector reductions are already dealt with 6392 // by this point), we can increase the critical path length if the loop 6393 // we're interleaving is inside another loop. Limit, by default to 2, so the 6394 // critical path only gets increased by one reduction operation. 6395 if (Legal->getReductionVars()->size() && TheLoop->getLoopDepth() > 1) { 6396 unsigned F = static_cast<unsigned>(MaxNestedScalarReductionIC); 6397 SmallIC = std::min(SmallIC, F); 6398 StoresIC = std::min(StoresIC, F); 6399 LoadsIC = std::min(LoadsIC, F); 6400 } 6401 6402 if (EnableLoadStoreRuntimeInterleave && 6403 std::max(StoresIC, LoadsIC) > SmallIC) { 6404 DEBUG(dbgs() << "LV: Interleaving to saturate store or load ports.\n"); 6405 return std::max(StoresIC, LoadsIC); 6406 } 6407 6408 DEBUG(dbgs() << "LV: Interleaving to reduce branch cost.\n"); 6409 return SmallIC; 6410 } 6411 6412 // Interleave if this is a large loop (small loops are already dealt with by 6413 // this point) that could benefit from interleaving. 6414 bool HasReductions = (Legal->getReductionVars()->size() > 0); 6415 if (TTI.enableAggressiveInterleaving(HasReductions)) { 6416 DEBUG(dbgs() << "LV: Interleaving to expose ILP.\n"); 6417 return IC; 6418 } 6419 6420 DEBUG(dbgs() << "LV: Not Interleaving.\n"); 6421 return 1; 6422 } 6423 6424 SmallVector<LoopVectorizationCostModel::RegisterUsage, 8> 6425 LoopVectorizationCostModel::calculateRegisterUsage(ArrayRef<unsigned> VFs) { 6426 // This function calculates the register usage by measuring the highest number 6427 // of values that are alive at a single location. Obviously, this is a very 6428 // rough estimation. We scan the loop in a topological order in order and 6429 // assign a number to each instruction. We use RPO to ensure that defs are 6430 // met before their users. We assume that each instruction that has in-loop 6431 // users starts an interval. We record every time that an in-loop value is 6432 // used, so we have a list of the first and last occurrences of each 6433 // instruction. Next, we transpose this data structure into a multi map that 6434 // holds the list of intervals that *end* at a specific location. This multi 6435 // map allows us to perform a linear search. We scan the instructions linearly 6436 // and record each time that a new interval starts, by placing it in a set. 6437 // If we find this value in the multi-map then we remove it from the set. 6438 // The max register usage is the maximum size of the set. 6439 // We also search for instructions that are defined outside the loop, but are 6440 // used inside the loop. We need this number separately from the max-interval 6441 // usage number because when we unroll, loop-invariant values do not take 6442 // more register. 6443 LoopBlocksDFS DFS(TheLoop); 6444 DFS.perform(LI); 6445 6446 RegisterUsage RU; 6447 RU.NumInstructions = 0; 6448 6449 // Each 'key' in the map opens a new interval. The values 6450 // of the map are the index of the 'last seen' usage of the 6451 // instruction that is the key. 6452 typedef DenseMap<Instruction *, unsigned> IntervalMap; 6453 // Maps instruction to its index. 6454 DenseMap<unsigned, Instruction *> IdxToInstr; 6455 // Marks the end of each interval. 6456 IntervalMap EndPoint; 6457 // Saves the list of instruction indices that are used in the loop. 6458 SmallSet<Instruction *, 8> Ends; 6459 // Saves the list of values that are used in the loop but are 6460 // defined outside the loop, such as arguments and constants. 6461 SmallPtrSet<Value *, 8> LoopInvariants; 6462 6463 unsigned Index = 0; 6464 for (BasicBlock *BB : make_range(DFS.beginRPO(), DFS.endRPO())) { 6465 RU.NumInstructions += BB->size(); 6466 for (Instruction &I : *BB) { 6467 IdxToInstr[Index++] = &I; 6468 6469 // Save the end location of each USE. 6470 for (Value *U : I.operands()) { 6471 auto *Instr = dyn_cast<Instruction>(U); 6472 6473 // Ignore non-instruction values such as arguments, constants, etc. 6474 if (!Instr) 6475 continue; 6476 6477 // If this instruction is outside the loop then record it and continue. 6478 if (!TheLoop->contains(Instr)) { 6479 LoopInvariants.insert(Instr); 6480 continue; 6481 } 6482 6483 // Overwrite previous end points. 6484 EndPoint[Instr] = Index; 6485 Ends.insert(Instr); 6486 } 6487 } 6488 } 6489 6490 // Saves the list of intervals that end with the index in 'key'. 6491 typedef SmallVector<Instruction *, 2> InstrList; 6492 DenseMap<unsigned, InstrList> TransposeEnds; 6493 6494 // Transpose the EndPoints to a list of values that end at each index. 6495 for (auto &Interval : EndPoint) 6496 TransposeEnds[Interval.second].push_back(Interval.first); 6497 6498 SmallSet<Instruction *, 8> OpenIntervals; 6499 6500 // Get the size of the widest register. 6501 unsigned MaxSafeDepDist = -1U; 6502 if (Legal->getMaxSafeDepDistBytes() != -1U) 6503 MaxSafeDepDist = Legal->getMaxSafeDepDistBytes() * 8; 6504 unsigned WidestRegister = 6505 std::min(TTI.getRegisterBitWidth(true), MaxSafeDepDist); 6506 const DataLayout &DL = TheFunction->getParent()->getDataLayout(); 6507 6508 SmallVector<RegisterUsage, 8> RUs(VFs.size()); 6509 SmallVector<unsigned, 8> MaxUsages(VFs.size(), 0); 6510 6511 DEBUG(dbgs() << "LV(REG): Calculating max register usage:\n"); 6512 6513 // A lambda that gets the register usage for the given type and VF. 6514 auto GetRegUsage = [&DL, WidestRegister](Type *Ty, unsigned VF) { 6515 if (Ty->isTokenTy()) 6516 return 0U; 6517 unsigned TypeSize = DL.getTypeSizeInBits(Ty->getScalarType()); 6518 return std::max<unsigned>(1, VF * TypeSize / WidestRegister); 6519 }; 6520 6521 for (unsigned int i = 0; i < Index; ++i) { 6522 Instruction *I = IdxToInstr[i]; 6523 6524 // Remove all of the instructions that end at this location. 6525 InstrList &List = TransposeEnds[i]; 6526 for (Instruction *ToRemove : List) 6527 OpenIntervals.erase(ToRemove); 6528 6529 // Ignore instructions that are never used within the loop. 6530 if (!Ends.count(I)) 6531 continue; 6532 6533 // Skip ignored values. 6534 if (ValuesToIgnore.count(I)) 6535 continue; 6536 6537 // For each VF find the maximum usage of registers. 6538 for (unsigned j = 0, e = VFs.size(); j < e; ++j) { 6539 if (VFs[j] == 1) { 6540 MaxUsages[j] = std::max(MaxUsages[j], OpenIntervals.size()); 6541 continue; 6542 } 6543 6544 // Count the number of live intervals. 6545 unsigned RegUsage = 0; 6546 for (auto Inst : OpenIntervals) { 6547 // Skip ignored values for VF > 1. 6548 if (VecValuesToIgnore.count(Inst)) 6549 continue; 6550 RegUsage += GetRegUsage(Inst->getType(), VFs[j]); 6551 } 6552 MaxUsages[j] = std::max(MaxUsages[j], RegUsage); 6553 } 6554 6555 DEBUG(dbgs() << "LV(REG): At #" << i << " Interval # " 6556 << OpenIntervals.size() << '\n'); 6557 6558 // Add the current instruction to the list of open intervals. 6559 OpenIntervals.insert(I); 6560 } 6561 6562 for (unsigned i = 0, e = VFs.size(); i < e; ++i) { 6563 unsigned Invariant = 0; 6564 if (VFs[i] == 1) 6565 Invariant = LoopInvariants.size(); 6566 else { 6567 for (auto Inst : LoopInvariants) 6568 Invariant += GetRegUsage(Inst->getType(), VFs[i]); 6569 } 6570 6571 DEBUG(dbgs() << "LV(REG): VF = " << VFs[i] << '\n'); 6572 DEBUG(dbgs() << "LV(REG): Found max usage: " << MaxUsages[i] << '\n'); 6573 DEBUG(dbgs() << "LV(REG): Found invariant usage: " << Invariant << '\n'); 6574 DEBUG(dbgs() << "LV(REG): LoopSize: " << RU.NumInstructions << '\n'); 6575 6576 RU.LoopInvariantRegs = Invariant; 6577 RU.MaxLocalUsers = MaxUsages[i]; 6578 RUs[i] = RU; 6579 } 6580 6581 return RUs; 6582 } 6583 6584 void LoopVectorizationCostModel::collectInstsToScalarize(unsigned VF) { 6585 6586 // If we aren't vectorizing the loop, or if we've already collected the 6587 // instructions to scalarize, there's nothing to do. Collection may already 6588 // have occurred if we have a user-selected VF and are now computing the 6589 // expected cost for interleaving. 6590 if (VF < 2 || InstsToScalarize.count(VF)) 6591 return; 6592 6593 // Initialize a mapping for VF in InstsToScalalarize. If we find that it's 6594 // not profitable to scalarize any instructions, the presence of VF in the 6595 // map will indicate that we've analyzed it already. 6596 ScalarCostsTy &ScalarCostsVF = InstsToScalarize[VF]; 6597 6598 // Find all the instructions that are scalar with predication in the loop and 6599 // determine if it would be better to not if-convert the blocks they are in. 6600 // If so, we also record the instructions to scalarize. 6601 for (BasicBlock *BB : TheLoop->blocks()) { 6602 if (!Legal->blockNeedsPredication(BB)) 6603 continue; 6604 for (Instruction &I : *BB) 6605 if (Legal->isScalarWithPredication(&I)) { 6606 ScalarCostsTy ScalarCosts; 6607 if (computePredInstDiscount(&I, ScalarCosts, VF) >= 0) 6608 ScalarCostsVF.insert(ScalarCosts.begin(), ScalarCosts.end()); 6609 } 6610 } 6611 } 6612 6613 int LoopVectorizationCostModel::computePredInstDiscount( 6614 Instruction *PredInst, DenseMap<Instruction *, unsigned> &ScalarCosts, 6615 unsigned VF) { 6616 6617 assert(!Legal->isUniformAfterVectorization(PredInst) && 6618 "Instruction marked uniform-after-vectorization will be predicated"); 6619 6620 // Initialize the discount to zero, meaning that the scalar version and the 6621 // vector version cost the same. 6622 int Discount = 0; 6623 6624 // Holds instructions to analyze. The instructions we visit are mapped in 6625 // ScalarCosts. Those instructions are the ones that would be scalarized if 6626 // we find that the scalar version costs less. 6627 SmallVector<Instruction *, 8> Worklist; 6628 6629 // Returns true if the given instruction can be scalarized. 6630 auto canBeScalarized = [&](Instruction *I) -> bool { 6631 6632 // We only attempt to scalarize instructions forming a single-use chain 6633 // from the original predicated block that would otherwise be vectorized. 6634 // Although not strictly necessary, we give up on instructions we know will 6635 // already be scalar to avoid traversing chains that are unlikely to be 6636 // beneficial. 6637 if (!I->hasOneUse() || PredInst->getParent() != I->getParent() || 6638 Legal->isScalarAfterVectorization(I)) 6639 return false; 6640 6641 // If the instruction is scalar with predication, it will be analyzed 6642 // separately. We ignore it within the context of PredInst. 6643 if (Legal->isScalarWithPredication(I)) 6644 return false; 6645 6646 // If any of the instruction's operands are uniform after vectorization, 6647 // the instruction cannot be scalarized. This prevents, for example, a 6648 // masked load from being scalarized. 6649 // 6650 // We assume we will only emit a value for lane zero of an instruction 6651 // marked uniform after vectorization, rather than VF identical values. 6652 // Thus, if we scalarize an instruction that uses a uniform, we would 6653 // create uses of values corresponding to the lanes we aren't emitting code 6654 // for. This behavior can be changed by allowing getScalarValue to clone 6655 // the lane zero values for uniforms rather than asserting. 6656 for (Use &U : I->operands()) 6657 if (auto *J = dyn_cast<Instruction>(U.get())) 6658 if (Legal->isUniformAfterVectorization(J)) 6659 return false; 6660 6661 // Otherwise, we can scalarize the instruction. 6662 return true; 6663 }; 6664 6665 // Returns true if an operand that cannot be scalarized must be extracted 6666 // from a vector. We will account for this scalarization overhead below. Note 6667 // that the non-void predicated instructions are placed in their own blocks, 6668 // and their return values are inserted into vectors. Thus, an extract would 6669 // still be required. 6670 auto needsExtract = [&](Instruction *I) -> bool { 6671 return TheLoop->contains(I) && !Legal->isScalarAfterVectorization(I); 6672 }; 6673 6674 // Compute the expected cost discount from scalarizing the entire expression 6675 // feeding the predicated instruction. We currently only consider expressions 6676 // that are single-use instruction chains. 6677 Worklist.push_back(PredInst); 6678 while (!Worklist.empty()) { 6679 Instruction *I = Worklist.pop_back_val(); 6680 6681 // If we've already analyzed the instruction, there's nothing to do. 6682 if (ScalarCosts.count(I)) 6683 continue; 6684 6685 // Compute the cost of the vector instruction. Note that this cost already 6686 // includes the scalarization overhead of the predicated instruction. 6687 unsigned VectorCost = getInstructionCost(I, VF).first; 6688 6689 // Compute the cost of the scalarized instruction. This cost is the cost of 6690 // the instruction as if it wasn't if-converted and instead remained in the 6691 // predicated block. We will scale this cost by block probability after 6692 // computing the scalarization overhead. 6693 unsigned ScalarCost = VF * getInstructionCost(I, 1).first; 6694 6695 // Compute the scalarization overhead of needed insertelement instructions 6696 // and phi nodes. 6697 if (Legal->isScalarWithPredication(I) && !I->getType()->isVoidTy()) { 6698 ScalarCost += TTI.getScalarizationOverhead(ToVectorTy(I->getType(), VF), 6699 true, false); 6700 ScalarCost += VF * TTI.getCFInstrCost(Instruction::PHI); 6701 } 6702 6703 // Compute the scalarization overhead of needed extractelement 6704 // instructions. For each of the instruction's operands, if the operand can 6705 // be scalarized, add it to the worklist; otherwise, account for the 6706 // overhead. 6707 for (Use &U : I->operands()) 6708 if (auto *J = dyn_cast<Instruction>(U.get())) { 6709 assert(VectorType::isValidElementType(J->getType()) && 6710 "Instruction has non-scalar type"); 6711 if (canBeScalarized(J)) 6712 Worklist.push_back(J); 6713 else if (needsExtract(J)) 6714 ScalarCost += TTI.getScalarizationOverhead( 6715 ToVectorTy(J->getType(),VF), false, true); 6716 } 6717 6718 // Scale the total scalar cost by block probability. 6719 ScalarCost /= getReciprocalPredBlockProb(); 6720 6721 // Compute the discount. A non-negative discount means the vector version 6722 // of the instruction costs more, and scalarizing would be beneficial. 6723 Discount += VectorCost - ScalarCost; 6724 ScalarCosts[I] = ScalarCost; 6725 } 6726 6727 return Discount; 6728 } 6729 6730 LoopVectorizationCostModel::VectorizationCostTy 6731 LoopVectorizationCostModel::expectedCost(unsigned VF) { 6732 VectorizationCostTy Cost; 6733 6734 // Collect the instructions (and their associated costs) that will be more 6735 // profitable to scalarize. 6736 collectInstsToScalarize(VF); 6737 6738 // For each block. 6739 for (BasicBlock *BB : TheLoop->blocks()) { 6740 VectorizationCostTy BlockCost; 6741 6742 // For each instruction in the old loop. 6743 for (Instruction &I : *BB) { 6744 // Skip dbg intrinsics. 6745 if (isa<DbgInfoIntrinsic>(I)) 6746 continue; 6747 6748 // Skip ignored values. 6749 if (ValuesToIgnore.count(&I)) 6750 continue; 6751 6752 VectorizationCostTy C = getInstructionCost(&I, VF); 6753 6754 // Check if we should override the cost. 6755 if (ForceTargetInstructionCost.getNumOccurrences() > 0) 6756 C.first = ForceTargetInstructionCost; 6757 6758 BlockCost.first += C.first; 6759 BlockCost.second |= C.second; 6760 DEBUG(dbgs() << "LV: Found an estimated cost of " << C.first << " for VF " 6761 << VF << " For instruction: " << I << '\n'); 6762 } 6763 6764 // If we are vectorizing a predicated block, it will have been 6765 // if-converted. This means that the block's instructions (aside from 6766 // stores and instructions that may divide by zero) will now be 6767 // unconditionally executed. For the scalar case, we may not always execute 6768 // the predicated block. Thus, scale the block's cost by the probability of 6769 // executing it. 6770 if (VF == 1 && Legal->blockNeedsPredication(BB)) 6771 BlockCost.first /= getReciprocalPredBlockProb(); 6772 6773 Cost.first += BlockCost.first; 6774 Cost.second |= BlockCost.second; 6775 } 6776 6777 return Cost; 6778 } 6779 6780 /// \brief Gets Address Access SCEV after verifying that the access pattern 6781 /// is loop invariant except the induction variable dependence. 6782 /// 6783 /// This SCEV can be sent to the Target in order to estimate the address 6784 /// calculation cost. 6785 static const SCEV *getAddressAccessSCEV( 6786 Value *Ptr, 6787 LoopVectorizationLegality *Legal, 6788 ScalarEvolution *SE, 6789 const Loop *TheLoop) { 6790 auto *Gep = dyn_cast<GetElementPtrInst>(Ptr); 6791 if (!Gep) 6792 return nullptr; 6793 6794 // We are looking for a gep with all loop invariant indices except for one 6795 // which should be an induction variable. 6796 unsigned NumOperands = Gep->getNumOperands(); 6797 for (unsigned i = 1; i < NumOperands; ++i) { 6798 Value *Opd = Gep->getOperand(i); 6799 if (!SE->isLoopInvariant(SE->getSCEV(Opd), TheLoop) && 6800 !Legal->isInductionVariable(Opd)) 6801 return nullptr; 6802 } 6803 6804 // Now we know we have a GEP ptr, %inv, %ind, %inv. return the Ptr SCEV. 6805 return SE->getSCEV(Ptr); 6806 } 6807 6808 static bool isStrideMul(Instruction *I, LoopVectorizationLegality *Legal) { 6809 return Legal->hasStride(I->getOperand(0)) || 6810 Legal->hasStride(I->getOperand(1)); 6811 } 6812 6813 LoopVectorizationCostModel::VectorizationCostTy 6814 LoopVectorizationCostModel::getInstructionCost(Instruction *I, unsigned VF) { 6815 // If we know that this instruction will remain uniform, check the cost of 6816 // the scalar version. 6817 if (Legal->isUniformAfterVectorization(I)) 6818 VF = 1; 6819 6820 if (VF > 1 && isProfitableToScalarize(I, VF)) 6821 return VectorizationCostTy(InstsToScalarize[VF][I], false); 6822 6823 Type *VectorTy; 6824 unsigned C = getInstructionCost(I, VF, VectorTy); 6825 6826 bool TypeNotScalarized = 6827 VF > 1 && !VectorTy->isVoidTy() && TTI.getNumberOfParts(VectorTy) < VF; 6828 return VectorizationCostTy(C, TypeNotScalarized); 6829 } 6830 6831 unsigned LoopVectorizationCostModel::getInstructionCost(Instruction *I, 6832 unsigned VF, 6833 Type *&VectorTy) { 6834 Type *RetTy = I->getType(); 6835 if (canTruncateToMinimalBitwidth(I, VF)) 6836 RetTy = IntegerType::get(RetTy->getContext(), MinBWs[I]); 6837 VectorTy = ToVectorTy(RetTy, VF); 6838 auto SE = PSE.getSE(); 6839 6840 // TODO: We need to estimate the cost of intrinsic calls. 6841 switch (I->getOpcode()) { 6842 case Instruction::GetElementPtr: 6843 // We mark this instruction as zero-cost because the cost of GEPs in 6844 // vectorized code depends on whether the corresponding memory instruction 6845 // is scalarized or not. Therefore, we handle GEPs with the memory 6846 // instruction cost. 6847 return 0; 6848 case Instruction::Br: { 6849 return TTI.getCFInstrCost(I->getOpcode()); 6850 } 6851 case Instruction::PHI: { 6852 auto *Phi = cast<PHINode>(I); 6853 6854 // First-order recurrences are replaced by vector shuffles inside the loop. 6855 if (VF > 1 && Legal->isFirstOrderRecurrence(Phi)) 6856 return TTI.getShuffleCost(TargetTransformInfo::SK_ExtractSubvector, 6857 VectorTy, VF - 1, VectorTy); 6858 6859 // TODO: IF-converted IFs become selects. 6860 return 0; 6861 } 6862 case Instruction::UDiv: 6863 case Instruction::SDiv: 6864 case Instruction::URem: 6865 case Instruction::SRem: 6866 // If we have a predicated instruction, it may not be executed for each 6867 // vector lane. Get the scalarization cost and scale this amount by the 6868 // probability of executing the predicated block. If the instruction is not 6869 // predicated, we fall through to the next case. 6870 if (VF > 1 && Legal->isScalarWithPredication(I)) { 6871 unsigned Cost = 0; 6872 6873 // These instructions have a non-void type, so account for the phi nodes 6874 // that we will create. This cost is likely to be zero. The phi node 6875 // cost, if any, should be scaled by the block probability because it 6876 // models a copy at the end of each predicated block. 6877 Cost += VF * TTI.getCFInstrCost(Instruction::PHI); 6878 6879 // The cost of the non-predicated instruction. 6880 Cost += VF * TTI.getArithmeticInstrCost(I->getOpcode(), RetTy); 6881 6882 // The cost of insertelement and extractelement instructions needed for 6883 // scalarization. 6884 Cost += getScalarizationOverhead(I, VF, TTI); 6885 6886 // Scale the cost by the probability of executing the predicated blocks. 6887 // This assumes the predicated block for each vector lane is equally 6888 // likely. 6889 return Cost / getReciprocalPredBlockProb(); 6890 } 6891 case Instruction::Add: 6892 case Instruction::FAdd: 6893 case Instruction::Sub: 6894 case Instruction::FSub: 6895 case Instruction::Mul: 6896 case Instruction::FMul: 6897 case Instruction::FDiv: 6898 case Instruction::FRem: 6899 case Instruction::Shl: 6900 case Instruction::LShr: 6901 case Instruction::AShr: 6902 case Instruction::And: 6903 case Instruction::Or: 6904 case Instruction::Xor: { 6905 // Since we will replace the stride by 1 the multiplication should go away. 6906 if (I->getOpcode() == Instruction::Mul && isStrideMul(I, Legal)) 6907 return 0; 6908 // Certain instructions can be cheaper to vectorize if they have a constant 6909 // second vector operand. One example of this are shifts on x86. 6910 TargetTransformInfo::OperandValueKind Op1VK = 6911 TargetTransformInfo::OK_AnyValue; 6912 TargetTransformInfo::OperandValueKind Op2VK = 6913 TargetTransformInfo::OK_AnyValue; 6914 TargetTransformInfo::OperandValueProperties Op1VP = 6915 TargetTransformInfo::OP_None; 6916 TargetTransformInfo::OperandValueProperties Op2VP = 6917 TargetTransformInfo::OP_None; 6918 Value *Op2 = I->getOperand(1); 6919 6920 // Check for a splat or for a non uniform vector of constants. 6921 if (isa<ConstantInt>(Op2)) { 6922 ConstantInt *CInt = cast<ConstantInt>(Op2); 6923 if (CInt && CInt->getValue().isPowerOf2()) 6924 Op2VP = TargetTransformInfo::OP_PowerOf2; 6925 Op2VK = TargetTransformInfo::OK_UniformConstantValue; 6926 } else if (isa<ConstantVector>(Op2) || isa<ConstantDataVector>(Op2)) { 6927 Op2VK = TargetTransformInfo::OK_NonUniformConstantValue; 6928 Constant *SplatValue = cast<Constant>(Op2)->getSplatValue(); 6929 if (SplatValue) { 6930 ConstantInt *CInt = dyn_cast<ConstantInt>(SplatValue); 6931 if (CInt && CInt->getValue().isPowerOf2()) 6932 Op2VP = TargetTransformInfo::OP_PowerOf2; 6933 Op2VK = TargetTransformInfo::OK_UniformConstantValue; 6934 } 6935 } else if (Legal->isUniform(Op2)) { 6936 Op2VK = TargetTransformInfo::OK_UniformValue; 6937 } 6938 SmallVector<const Value *, 4> Operands(I->operand_values()); 6939 return TTI.getArithmeticInstrCost(I->getOpcode(), VectorTy, Op1VK, 6940 Op2VK, Op1VP, Op2VP, Operands); 6941 } 6942 case Instruction::Select: { 6943 SelectInst *SI = cast<SelectInst>(I); 6944 const SCEV *CondSCEV = SE->getSCEV(SI->getCondition()); 6945 bool ScalarCond = (SE->isLoopInvariant(CondSCEV, TheLoop)); 6946 Type *CondTy = SI->getCondition()->getType(); 6947 if (!ScalarCond) 6948 CondTy = VectorType::get(CondTy, VF); 6949 6950 return TTI.getCmpSelInstrCost(I->getOpcode(), VectorTy, CondTy); 6951 } 6952 case Instruction::ICmp: 6953 case Instruction::FCmp: { 6954 Type *ValTy = I->getOperand(0)->getType(); 6955 Instruction *Op0AsInstruction = dyn_cast<Instruction>(I->getOperand(0)); 6956 if (canTruncateToMinimalBitwidth(Op0AsInstruction, VF)) 6957 ValTy = IntegerType::get(ValTy->getContext(), MinBWs[Op0AsInstruction]); 6958 VectorTy = ToVectorTy(ValTy, VF); 6959 return TTI.getCmpSelInstrCost(I->getOpcode(), VectorTy); 6960 } 6961 case Instruction::Store: 6962 case Instruction::Load: { 6963 StoreInst *SI = dyn_cast<StoreInst>(I); 6964 LoadInst *LI = dyn_cast<LoadInst>(I); 6965 Type *ValTy = (SI ? SI->getValueOperand()->getType() : LI->getType()); 6966 VectorTy = ToVectorTy(ValTy, VF); 6967 6968 unsigned Alignment = SI ? SI->getAlignment() : LI->getAlignment(); 6969 unsigned AS = 6970 SI ? SI->getPointerAddressSpace() : LI->getPointerAddressSpace(); 6971 Value *Ptr = getPointerOperand(I); 6972 // We add the cost of address computation here instead of with the gep 6973 // instruction because only here we know whether the operation is 6974 // scalarized. 6975 if (VF == 1) 6976 return TTI.getAddressComputationCost(VectorTy) + 6977 TTI.getMemoryOpCost(I->getOpcode(), VectorTy, Alignment, AS); 6978 6979 if (LI && Legal->isUniform(Ptr)) { 6980 // Scalar load + broadcast 6981 unsigned Cost = TTI.getAddressComputationCost(ValTy->getScalarType()); 6982 Cost += TTI.getMemoryOpCost(I->getOpcode(), ValTy->getScalarType(), 6983 Alignment, AS); 6984 return Cost + 6985 TTI.getShuffleCost(TargetTransformInfo::SK_Broadcast, ValTy); 6986 } 6987 6988 // For an interleaved access, calculate the total cost of the whole 6989 // interleave group. 6990 if (Legal->isAccessInterleaved(I)) { 6991 auto Group = Legal->getInterleavedAccessGroup(I); 6992 assert(Group && "Fail to get an interleaved access group."); 6993 6994 // Only calculate the cost once at the insert position. 6995 if (Group->getInsertPos() != I) 6996 return 0; 6997 6998 unsigned InterleaveFactor = Group->getFactor(); 6999 Type *WideVecTy = 7000 VectorType::get(VectorTy->getVectorElementType(), 7001 VectorTy->getVectorNumElements() * InterleaveFactor); 7002 7003 // Holds the indices of existing members in an interleaved load group. 7004 // An interleaved store group doesn't need this as it doesn't allow gaps. 7005 SmallVector<unsigned, 4> Indices; 7006 if (LI) { 7007 for (unsigned i = 0; i < InterleaveFactor; i++) 7008 if (Group->getMember(i)) 7009 Indices.push_back(i); 7010 } 7011 7012 // Calculate the cost of the whole interleaved group. 7013 unsigned Cost = TTI.getInterleavedMemoryOpCost( 7014 I->getOpcode(), WideVecTy, Group->getFactor(), Indices, 7015 Group->getAlignment(), AS); 7016 7017 if (Group->isReverse()) 7018 Cost += 7019 Group->getNumMembers() * 7020 TTI.getShuffleCost(TargetTransformInfo::SK_Reverse, VectorTy, 0); 7021 7022 // FIXME: The interleaved load group with a huge gap could be even more 7023 // expensive than scalar operations. Then we could ignore such group and 7024 // use scalar operations instead. 7025 return Cost; 7026 } 7027 7028 // Check if the memory instruction will be scalarized. 7029 if (Legal->memoryInstructionMustBeScalarized(I, VF)) { 7030 unsigned Cost = 0; 7031 Type *PtrTy = ToVectorTy(Ptr->getType(), VF); 7032 7033 // Figure out whether the access is strided and get the stride value 7034 // if it's known in compile time 7035 const SCEV *PtrSCEV = getAddressAccessSCEV(Ptr, Legal, SE, TheLoop); 7036 7037 // Get the cost of the scalar memory instruction and address computation. 7038 Cost += VF * TTI.getAddressComputationCost(PtrTy, SE, PtrSCEV); 7039 Cost += VF * 7040 TTI.getMemoryOpCost(I->getOpcode(), ValTy->getScalarType(), 7041 Alignment, AS); 7042 7043 // Get the overhead of the extractelement and insertelement instructions 7044 // we might create due to scalarization. 7045 Cost += getScalarizationOverhead(I, VF, TTI); 7046 7047 // If we have a predicated store, it may not be executed for each vector 7048 // lane. Scale the cost by the probability of executing the predicated 7049 // block. 7050 if (Legal->isScalarWithPredication(I)) 7051 Cost /= getReciprocalPredBlockProb(); 7052 7053 return Cost; 7054 } 7055 7056 // Determine if the pointer operand of the access is either consecutive or 7057 // reverse consecutive. 7058 int ConsecutiveStride = Legal->isConsecutivePtr(Ptr); 7059 bool Reverse = ConsecutiveStride < 0; 7060 7061 // Determine if either a gather or scatter operation is legal. 7062 bool UseGatherOrScatter = 7063 !ConsecutiveStride && Legal->isLegalGatherOrScatter(I); 7064 7065 unsigned Cost = TTI.getAddressComputationCost(VectorTy); 7066 if (UseGatherOrScatter) { 7067 assert(ConsecutiveStride == 0 && 7068 "Gather/Scatter are not used for consecutive stride"); 7069 return Cost + 7070 TTI.getGatherScatterOpCost(I->getOpcode(), VectorTy, Ptr, 7071 Legal->isMaskRequired(I), Alignment); 7072 } 7073 // Wide load/stores. 7074 if (Legal->isMaskRequired(I)) 7075 Cost += 7076 TTI.getMaskedMemoryOpCost(I->getOpcode(), VectorTy, Alignment, AS); 7077 else 7078 Cost += TTI.getMemoryOpCost(I->getOpcode(), VectorTy, Alignment, AS); 7079 7080 if (Reverse) 7081 Cost += TTI.getShuffleCost(TargetTransformInfo::SK_Reverse, VectorTy, 0); 7082 return Cost; 7083 } 7084 case Instruction::ZExt: 7085 case Instruction::SExt: 7086 case Instruction::FPToUI: 7087 case Instruction::FPToSI: 7088 case Instruction::FPExt: 7089 case Instruction::PtrToInt: 7090 case Instruction::IntToPtr: 7091 case Instruction::SIToFP: 7092 case Instruction::UIToFP: 7093 case Instruction::Trunc: 7094 case Instruction::FPTrunc: 7095 case Instruction::BitCast: { 7096 // We optimize the truncation of induction variable. 7097 // The cost of these is the same as the scalar operation. 7098 if (I->getOpcode() == Instruction::Trunc && 7099 Legal->isInductionVariable(I->getOperand(0))) 7100 return TTI.getCastInstrCost(I->getOpcode(), I->getType(), 7101 I->getOperand(0)->getType()); 7102 7103 Type *SrcScalarTy = I->getOperand(0)->getType(); 7104 Type *SrcVecTy = ToVectorTy(SrcScalarTy, VF); 7105 if (canTruncateToMinimalBitwidth(I, VF)) { 7106 // This cast is going to be shrunk. This may remove the cast or it might 7107 // turn it into slightly different cast. For example, if MinBW == 16, 7108 // "zext i8 %1 to i32" becomes "zext i8 %1 to i16". 7109 // 7110 // Calculate the modified src and dest types. 7111 Type *MinVecTy = VectorTy; 7112 if (I->getOpcode() == Instruction::Trunc) { 7113 SrcVecTy = smallestIntegerVectorType(SrcVecTy, MinVecTy); 7114 VectorTy = 7115 largestIntegerVectorType(ToVectorTy(I->getType(), VF), MinVecTy); 7116 } else if (I->getOpcode() == Instruction::ZExt || 7117 I->getOpcode() == Instruction::SExt) { 7118 SrcVecTy = largestIntegerVectorType(SrcVecTy, MinVecTy); 7119 VectorTy = 7120 smallestIntegerVectorType(ToVectorTy(I->getType(), VF), MinVecTy); 7121 } 7122 } 7123 7124 return TTI.getCastInstrCost(I->getOpcode(), VectorTy, SrcVecTy); 7125 } 7126 case Instruction::Call: { 7127 bool NeedToScalarize; 7128 CallInst *CI = cast<CallInst>(I); 7129 unsigned CallCost = getVectorCallCost(CI, VF, TTI, TLI, NeedToScalarize); 7130 if (getVectorIntrinsicIDForCall(CI, TLI)) 7131 return std::min(CallCost, getVectorIntrinsicCost(CI, VF, TTI, TLI)); 7132 return CallCost; 7133 } 7134 default: 7135 // The cost of executing VF copies of the scalar instruction. This opcode 7136 // is unknown. Assume that it is the same as 'mul'. 7137 return VF * TTI.getArithmeticInstrCost(Instruction::Mul, VectorTy) + 7138 getScalarizationOverhead(I, VF, TTI); 7139 } // end of switch. 7140 } 7141 7142 char LoopVectorize::ID = 0; 7143 static const char lv_name[] = "Loop Vectorization"; 7144 INITIALIZE_PASS_BEGIN(LoopVectorize, LV_NAME, lv_name, false, false) 7145 INITIALIZE_PASS_DEPENDENCY(TargetTransformInfoWrapperPass) 7146 INITIALIZE_PASS_DEPENDENCY(BasicAAWrapperPass) 7147 INITIALIZE_PASS_DEPENDENCY(AAResultsWrapperPass) 7148 INITIALIZE_PASS_DEPENDENCY(GlobalsAAWrapperPass) 7149 INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker) 7150 INITIALIZE_PASS_DEPENDENCY(BlockFrequencyInfoWrapperPass) 7151 INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass) 7152 INITIALIZE_PASS_DEPENDENCY(ScalarEvolutionWrapperPass) 7153 INITIALIZE_PASS_DEPENDENCY(LoopInfoWrapperPass) 7154 INITIALIZE_PASS_DEPENDENCY(LoopAccessLegacyAnalysis) 7155 INITIALIZE_PASS_DEPENDENCY(DemandedBitsWrapperPass) 7156 INITIALIZE_PASS_DEPENDENCY(OptimizationRemarkEmitterWrapperPass) 7157 INITIALIZE_PASS_END(LoopVectorize, LV_NAME, lv_name, false, false) 7158 7159 namespace llvm { 7160 Pass *createLoopVectorizePass(bool NoUnrolling, bool AlwaysVectorize) { 7161 return new LoopVectorize(NoUnrolling, AlwaysVectorize); 7162 } 7163 } 7164 7165 bool LoopVectorizationCostModel::isConsecutiveLoadOrStore(Instruction *Inst) { 7166 7167 // Check if the pointer operand of a load or store instruction is 7168 // consecutive. 7169 if (auto *Ptr = getPointerOperand(Inst)) 7170 return Legal->isConsecutivePtr(Ptr); 7171 return false; 7172 } 7173 7174 void LoopVectorizationCostModel::collectValuesToIgnore() { 7175 // Ignore ephemeral values. 7176 CodeMetrics::collectEphemeralValues(TheLoop, AC, ValuesToIgnore); 7177 7178 // Ignore type-promoting instructions we identified during reduction 7179 // detection. 7180 for (auto &Reduction : *Legal->getReductionVars()) { 7181 RecurrenceDescriptor &RedDes = Reduction.second; 7182 SmallPtrSetImpl<Instruction *> &Casts = RedDes.getCastInsts(); 7183 VecValuesToIgnore.insert(Casts.begin(), Casts.end()); 7184 } 7185 7186 // Insert values known to be scalar into VecValuesToIgnore. This is a 7187 // conservative estimation of the values that will later be scalarized. 7188 // 7189 // FIXME: Even though an instruction is not scalar-after-vectoriztion, it may 7190 // still be scalarized. For example, we may find an instruction to be 7191 // more profitable for a given vectorization factor if it were to be 7192 // scalarized. But at this point, we haven't yet computed the 7193 // vectorization factor. 7194 for (auto *BB : TheLoop->getBlocks()) 7195 for (auto &I : *BB) 7196 if (Legal->isScalarAfterVectorization(&I)) 7197 VecValuesToIgnore.insert(&I); 7198 } 7199 7200 void InnerLoopUnroller::scalarizeInstruction(Instruction *Instr, 7201 bool IfPredicateInstr) { 7202 assert(!Instr->getType()->isAggregateType() && "Can't handle vectors"); 7203 // Holds vector parameters or scalars, in case of uniform vals. 7204 SmallVector<VectorParts, 4> Params; 7205 7206 setDebugLocFromInst(Builder, Instr); 7207 7208 // Does this instruction return a value ? 7209 bool IsVoidRetTy = Instr->getType()->isVoidTy(); 7210 7211 // Initialize a new scalar map entry. 7212 ScalarParts Entry(UF); 7213 7214 VectorParts Cond; 7215 if (IfPredicateInstr) 7216 Cond = createBlockInMask(Instr->getParent()); 7217 7218 // For each vector unroll 'part': 7219 for (unsigned Part = 0; Part < UF; ++Part) { 7220 Entry[Part].resize(1); 7221 // For each scalar that we create: 7222 7223 // Start an "if (pred) a[i] = ..." block. 7224 Value *Cmp = nullptr; 7225 if (IfPredicateInstr) { 7226 if (Cond[Part]->getType()->isVectorTy()) 7227 Cond[Part] = 7228 Builder.CreateExtractElement(Cond[Part], Builder.getInt32(0)); 7229 Cmp = Builder.CreateICmp(ICmpInst::ICMP_EQ, Cond[Part], 7230 ConstantInt::get(Cond[Part]->getType(), 1)); 7231 } 7232 7233 Instruction *Cloned = Instr->clone(); 7234 if (!IsVoidRetTy) 7235 Cloned->setName(Instr->getName() + ".cloned"); 7236 7237 // Replace the operands of the cloned instructions with their scalar 7238 // equivalents in the new loop. 7239 for (unsigned op = 0, e = Instr->getNumOperands(); op != e; ++op) { 7240 auto *NewOp = getScalarValue(Instr->getOperand(op), Part, 0); 7241 Cloned->setOperand(op, NewOp); 7242 } 7243 7244 // Place the cloned scalar in the new loop. 7245 Builder.Insert(Cloned); 7246 7247 // Add the cloned scalar to the scalar map entry. 7248 Entry[Part][0] = Cloned; 7249 7250 // If we just cloned a new assumption, add it the assumption cache. 7251 if (auto *II = dyn_cast<IntrinsicInst>(Cloned)) 7252 if (II->getIntrinsicID() == Intrinsic::assume) 7253 AC->registerAssumption(II); 7254 7255 // End if-block. 7256 if (IfPredicateInstr) 7257 PredicatedInstructions.push_back(std::make_pair(Cloned, Cmp)); 7258 } 7259 VectorLoopValueMap.initScalar(Instr, Entry); 7260 } 7261 7262 void InnerLoopUnroller::vectorizeMemoryInstruction(Instruction *Instr) { 7263 auto *SI = dyn_cast<StoreInst>(Instr); 7264 bool IfPredicateInstr = (SI && Legal->blockNeedsPredication(SI->getParent())); 7265 7266 return scalarizeInstruction(Instr, IfPredicateInstr); 7267 } 7268 7269 Value *InnerLoopUnroller::reverseVector(Value *Vec) { return Vec; } 7270 7271 Value *InnerLoopUnroller::getBroadcastInstrs(Value *V) { return V; } 7272 7273 Value *InnerLoopUnroller::getStepVector(Value *Val, int StartIdx, Value *Step, 7274 Instruction::BinaryOps BinOp) { 7275 // When unrolling and the VF is 1, we only need to add a simple scalar. 7276 Type *Ty = Val->getType(); 7277 assert(!Ty->isVectorTy() && "Val must be a scalar"); 7278 7279 if (Ty->isFloatingPointTy()) { 7280 Constant *C = ConstantFP::get(Ty, (double)StartIdx); 7281 7282 // Floating point operations had to be 'fast' to enable the unrolling. 7283 Value *MulOp = addFastMathFlag(Builder.CreateFMul(C, Step)); 7284 return addFastMathFlag(Builder.CreateBinOp(BinOp, Val, MulOp)); 7285 } 7286 Constant *C = ConstantInt::get(Ty, StartIdx); 7287 return Builder.CreateAdd(Val, Builder.CreateMul(C, Step), "induction"); 7288 } 7289 7290 static void AddRuntimeUnrollDisableMetaData(Loop *L) { 7291 SmallVector<Metadata *, 4> MDs; 7292 // Reserve first location for self reference to the LoopID metadata node. 7293 MDs.push_back(nullptr); 7294 bool IsUnrollMetadata = false; 7295 MDNode *LoopID = L->getLoopID(); 7296 if (LoopID) { 7297 // First find existing loop unrolling disable metadata. 7298 for (unsigned i = 1, ie = LoopID->getNumOperands(); i < ie; ++i) { 7299 auto *MD = dyn_cast<MDNode>(LoopID->getOperand(i)); 7300 if (MD) { 7301 const auto *S = dyn_cast<MDString>(MD->getOperand(0)); 7302 IsUnrollMetadata = 7303 S && S->getString().startswith("llvm.loop.unroll.disable"); 7304 } 7305 MDs.push_back(LoopID->getOperand(i)); 7306 } 7307 } 7308 7309 if (!IsUnrollMetadata) { 7310 // Add runtime unroll disable metadata. 7311 LLVMContext &Context = L->getHeader()->getContext(); 7312 SmallVector<Metadata *, 1> DisableOperands; 7313 DisableOperands.push_back( 7314 MDString::get(Context, "llvm.loop.unroll.runtime.disable")); 7315 MDNode *DisableNode = MDNode::get(Context, DisableOperands); 7316 MDs.push_back(DisableNode); 7317 MDNode *NewLoopID = MDNode::get(Context, MDs); 7318 // Set operand 0 to refer to the loop id itself. 7319 NewLoopID->replaceOperandWith(0, NewLoopID); 7320 L->setLoopID(NewLoopID); 7321 } 7322 } 7323 7324 bool LoopVectorizePass::processLoop(Loop *L) { 7325 assert(L->empty() && "Only process inner loops."); 7326 7327 #ifndef NDEBUG 7328 const std::string DebugLocStr = getDebugLocString(L); 7329 #endif /* NDEBUG */ 7330 7331 DEBUG(dbgs() << "\nLV: Checking a loop in \"" 7332 << L->getHeader()->getParent()->getName() << "\" from " 7333 << DebugLocStr << "\n"); 7334 7335 LoopVectorizeHints Hints(L, DisableUnrolling, *ORE); 7336 7337 DEBUG(dbgs() << "LV: Loop hints:" 7338 << " force=" 7339 << (Hints.getForce() == LoopVectorizeHints::FK_Disabled 7340 ? "disabled" 7341 : (Hints.getForce() == LoopVectorizeHints::FK_Enabled 7342 ? "enabled" 7343 : "?")) 7344 << " width=" << Hints.getWidth() 7345 << " unroll=" << Hints.getInterleave() << "\n"); 7346 7347 // Function containing loop 7348 Function *F = L->getHeader()->getParent(); 7349 7350 // Looking at the diagnostic output is the only way to determine if a loop 7351 // was vectorized (other than looking at the IR or machine code), so it 7352 // is important to generate an optimization remark for each loop. Most of 7353 // these messages are generated as OptimizationRemarkAnalysis. Remarks 7354 // generated as OptimizationRemark and OptimizationRemarkMissed are 7355 // less verbose reporting vectorized loops and unvectorized loops that may 7356 // benefit from vectorization, respectively. 7357 7358 if (!Hints.allowVectorization(F, L, AlwaysVectorize)) { 7359 DEBUG(dbgs() << "LV: Loop hints prevent vectorization.\n"); 7360 return false; 7361 } 7362 7363 // Check the loop for a trip count threshold: 7364 // do not vectorize loops with a tiny trip count. 7365 const unsigned MaxTC = SE->getSmallConstantMaxTripCount(L); 7366 if (MaxTC > 0u && MaxTC < TinyTripCountVectorThreshold) { 7367 DEBUG(dbgs() << "LV: Found a loop with a very small trip count. " 7368 << "This loop is not worth vectorizing."); 7369 if (Hints.getForce() == LoopVectorizeHints::FK_Enabled) 7370 DEBUG(dbgs() << " But vectorizing was explicitly forced.\n"); 7371 else { 7372 DEBUG(dbgs() << "\n"); 7373 ORE->emit(createMissedAnalysis(Hints.vectorizeAnalysisPassName(), 7374 "NotBeneficial", L) 7375 << "vectorization is not beneficial " 7376 "and is not explicitly forced"); 7377 return false; 7378 } 7379 } 7380 7381 PredicatedScalarEvolution PSE(*SE, *L); 7382 7383 // Check if it is legal to vectorize the loop. 7384 LoopVectorizationRequirements Requirements(*ORE); 7385 LoopVectorizationLegality LVL(L, PSE, DT, TLI, AA, F, TTI, GetLAA, LI, ORE, 7386 &Requirements, &Hints); 7387 if (!LVL.canVectorize()) { 7388 DEBUG(dbgs() << "LV: Not vectorizing: Cannot prove legality.\n"); 7389 emitMissedWarning(F, L, Hints, ORE); 7390 return false; 7391 } 7392 7393 // Use the cost model. 7394 LoopVectorizationCostModel CM(L, PSE, LI, &LVL, *TTI, TLI, DB, AC, ORE, F, 7395 &Hints); 7396 CM.collectValuesToIgnore(); 7397 7398 // Check the function attributes to find out if this function should be 7399 // optimized for size. 7400 bool OptForSize = 7401 Hints.getForce() != LoopVectorizeHints::FK_Enabled && F->optForSize(); 7402 7403 // Compute the weighted frequency of this loop being executed and see if it 7404 // is less than 20% of the function entry baseline frequency. Note that we 7405 // always have a canonical loop here because we think we *can* vectorize. 7406 // FIXME: This is hidden behind a flag due to pervasive problems with 7407 // exactly what block frequency models. 7408 if (LoopVectorizeWithBlockFrequency) { 7409 BlockFrequency LoopEntryFreq = BFI->getBlockFreq(L->getLoopPreheader()); 7410 if (Hints.getForce() != LoopVectorizeHints::FK_Enabled && 7411 LoopEntryFreq < ColdEntryFreq) 7412 OptForSize = true; 7413 } 7414 7415 // Check the function attributes to see if implicit floats are allowed. 7416 // FIXME: This check doesn't seem possibly correct -- what if the loop is 7417 // an integer loop and the vector instructions selected are purely integer 7418 // vector instructions? 7419 if (F->hasFnAttribute(Attribute::NoImplicitFloat)) { 7420 DEBUG(dbgs() << "LV: Can't vectorize when the NoImplicitFloat" 7421 "attribute is used.\n"); 7422 ORE->emit(createMissedAnalysis(Hints.vectorizeAnalysisPassName(), 7423 "NoImplicitFloat", L) 7424 << "loop not vectorized due to NoImplicitFloat attribute"); 7425 emitMissedWarning(F, L, Hints, ORE); 7426 return false; 7427 } 7428 7429 // Check if the target supports potentially unsafe FP vectorization. 7430 // FIXME: Add a check for the type of safety issue (denormal, signaling) 7431 // for the target we're vectorizing for, to make sure none of the 7432 // additional fp-math flags can help. 7433 if (Hints.isPotentiallyUnsafe() && 7434 TTI->isFPVectorizationPotentiallyUnsafe()) { 7435 DEBUG(dbgs() << "LV: Potentially unsafe FP op prevents vectorization.\n"); 7436 ORE->emit( 7437 createMissedAnalysis(Hints.vectorizeAnalysisPassName(), "UnsafeFP", L) 7438 << "loop not vectorized due to unsafe FP support."); 7439 emitMissedWarning(F, L, Hints, ORE); 7440 return false; 7441 } 7442 7443 // Select the optimal vectorization factor. 7444 const LoopVectorizationCostModel::VectorizationFactor VF = 7445 CM.selectVectorizationFactor(OptForSize); 7446 7447 // Select the interleave count. 7448 unsigned IC = CM.selectInterleaveCount(OptForSize, VF.Width, VF.Cost); 7449 7450 // Get user interleave count. 7451 unsigned UserIC = Hints.getInterleave(); 7452 7453 // Identify the diagnostic messages that should be produced. 7454 std::pair<StringRef, std::string> VecDiagMsg, IntDiagMsg; 7455 bool VectorizeLoop = true, InterleaveLoop = true; 7456 if (Requirements.doesNotMeet(F, L, Hints)) { 7457 DEBUG(dbgs() << "LV: Not vectorizing: loop did not meet vectorization " 7458 "requirements.\n"); 7459 emitMissedWarning(F, L, Hints, ORE); 7460 return false; 7461 } 7462 7463 if (VF.Width == 1) { 7464 DEBUG(dbgs() << "LV: Vectorization is possible but not beneficial.\n"); 7465 VecDiagMsg = std::make_pair( 7466 "VectorizationNotBeneficial", 7467 "the cost-model indicates that vectorization is not beneficial"); 7468 VectorizeLoop = false; 7469 } 7470 7471 if (IC == 1 && UserIC <= 1) { 7472 // Tell the user interleaving is not beneficial. 7473 DEBUG(dbgs() << "LV: Interleaving is not beneficial.\n"); 7474 IntDiagMsg = std::make_pair( 7475 "InterleavingNotBeneficial", 7476 "the cost-model indicates that interleaving is not beneficial"); 7477 InterleaveLoop = false; 7478 if (UserIC == 1) { 7479 IntDiagMsg.first = "InterleavingNotBeneficialAndDisabled"; 7480 IntDiagMsg.second += 7481 " and is explicitly disabled or interleave count is set to 1"; 7482 } 7483 } else if (IC > 1 && UserIC == 1) { 7484 // Tell the user interleaving is beneficial, but it explicitly disabled. 7485 DEBUG(dbgs() 7486 << "LV: Interleaving is beneficial but is explicitly disabled."); 7487 IntDiagMsg = std::make_pair( 7488 "InterleavingBeneficialButDisabled", 7489 "the cost-model indicates that interleaving is beneficial " 7490 "but is explicitly disabled or interleave count is set to 1"); 7491 InterleaveLoop = false; 7492 } 7493 7494 // Override IC if user provided an interleave count. 7495 IC = UserIC > 0 ? UserIC : IC; 7496 7497 // Emit diagnostic messages, if any. 7498 const char *VAPassName = Hints.vectorizeAnalysisPassName(); 7499 if (!VectorizeLoop && !InterleaveLoop) { 7500 // Do not vectorize or interleaving the loop. 7501 ORE->emit(OptimizationRemarkAnalysis(VAPassName, VecDiagMsg.first, 7502 L->getStartLoc(), L->getHeader()) 7503 << VecDiagMsg.second); 7504 ORE->emit(OptimizationRemarkAnalysis(LV_NAME, IntDiagMsg.first, 7505 L->getStartLoc(), L->getHeader()) 7506 << IntDiagMsg.second); 7507 return false; 7508 } else if (!VectorizeLoop && InterleaveLoop) { 7509 DEBUG(dbgs() << "LV: Interleave Count is " << IC << '\n'); 7510 ORE->emit(OptimizationRemarkAnalysis(VAPassName, VecDiagMsg.first, 7511 L->getStartLoc(), L->getHeader()) 7512 << VecDiagMsg.second); 7513 } else if (VectorizeLoop && !InterleaveLoop) { 7514 DEBUG(dbgs() << "LV: Found a vectorizable loop (" << VF.Width << ") in " 7515 << DebugLocStr << '\n'); 7516 ORE->emit(OptimizationRemarkAnalysis(LV_NAME, IntDiagMsg.first, 7517 L->getStartLoc(), L->getHeader()) 7518 << IntDiagMsg.second); 7519 } else if (VectorizeLoop && InterleaveLoop) { 7520 DEBUG(dbgs() << "LV: Found a vectorizable loop (" << VF.Width << ") in " 7521 << DebugLocStr << '\n'); 7522 DEBUG(dbgs() << "LV: Interleave Count is " << IC << '\n'); 7523 } 7524 7525 using namespace ore; 7526 if (!VectorizeLoop) { 7527 assert(IC > 1 && "interleave count should not be 1 or 0"); 7528 // If we decided that it is not legal to vectorize the loop, then 7529 // interleave it. 7530 InnerLoopUnroller Unroller(L, PSE, LI, DT, TLI, TTI, AC, ORE, IC, &LVL, 7531 &CM); 7532 Unroller.vectorize(); 7533 7534 ORE->emit(OptimizationRemark(LV_NAME, "Interleaved", L->getStartLoc(), 7535 L->getHeader()) 7536 << "interleaved loop (interleaved count: " 7537 << NV("InterleaveCount", IC) << ")"); 7538 } else { 7539 // If we decided that it is *legal* to vectorize the loop, then do it. 7540 InnerLoopVectorizer LB(L, PSE, LI, DT, TLI, TTI, AC, ORE, VF.Width, IC, 7541 &LVL, &CM); 7542 LB.vectorize(); 7543 ++LoopsVectorized; 7544 7545 // Add metadata to disable runtime unrolling a scalar loop when there are 7546 // no runtime checks about strides and memory. A scalar loop that is 7547 // rarely used is not worth unrolling. 7548 if (!LB.areSafetyChecksAdded()) 7549 AddRuntimeUnrollDisableMetaData(L); 7550 7551 // Report the vectorization decision. 7552 ORE->emit(OptimizationRemark(LV_NAME, "Vectorized", L->getStartLoc(), 7553 L->getHeader()) 7554 << "vectorized loop (vectorization width: " 7555 << NV("VectorizationFactor", VF.Width) 7556 << ", interleaved count: " << NV("InterleaveCount", IC) << ")"); 7557 } 7558 7559 // Mark the loop as already vectorized to avoid vectorizing again. 7560 Hints.setAlreadyVectorized(); 7561 7562 DEBUG(verifyFunction(*L->getHeader()->getParent())); 7563 return true; 7564 } 7565 7566 bool LoopVectorizePass::runImpl( 7567 Function &F, ScalarEvolution &SE_, LoopInfo &LI_, TargetTransformInfo &TTI_, 7568 DominatorTree &DT_, BlockFrequencyInfo &BFI_, TargetLibraryInfo *TLI_, 7569 DemandedBits &DB_, AliasAnalysis &AA_, AssumptionCache &AC_, 7570 std::function<const LoopAccessInfo &(Loop &)> &GetLAA_, 7571 OptimizationRemarkEmitter &ORE_) { 7572 7573 SE = &SE_; 7574 LI = &LI_; 7575 TTI = &TTI_; 7576 DT = &DT_; 7577 BFI = &BFI_; 7578 TLI = TLI_; 7579 AA = &AA_; 7580 AC = &AC_; 7581 GetLAA = &GetLAA_; 7582 DB = &DB_; 7583 ORE = &ORE_; 7584 7585 // Compute some weights outside of the loop over the loops. Compute this 7586 // using a BranchProbability to re-use its scaling math. 7587 const BranchProbability ColdProb(1, 5); // 20% 7588 ColdEntryFreq = BlockFrequency(BFI->getEntryFreq()) * ColdProb; 7589 7590 // Don't attempt if 7591 // 1. the target claims to have no vector registers, and 7592 // 2. interleaving won't help ILP. 7593 // 7594 // The second condition is necessary because, even if the target has no 7595 // vector registers, loop vectorization may still enable scalar 7596 // interleaving. 7597 if (!TTI->getNumberOfRegisters(true) && TTI->getMaxInterleaveFactor(1) < 2) 7598 return false; 7599 7600 bool Changed = false; 7601 7602 // The vectorizer requires loops to be in simplified form. 7603 // Since simplification may add new inner loops, it has to run before the 7604 // legality and profitability checks. This means running the loop vectorizer 7605 // will simplify all loops, regardless of whether anything end up being 7606 // vectorized. 7607 for (auto &L : *LI) 7608 Changed |= simplifyLoop(L, DT, LI, SE, AC, false /* PreserveLCSSA */); 7609 7610 // Build up a worklist of inner-loops to vectorize. This is necessary as 7611 // the act of vectorizing or partially unrolling a loop creates new loops 7612 // and can invalidate iterators across the loops. 7613 SmallVector<Loop *, 8> Worklist; 7614 7615 for (Loop *L : *LI) 7616 addAcyclicInnerLoop(*L, Worklist); 7617 7618 LoopsAnalyzed += Worklist.size(); 7619 7620 // Now walk the identified inner loops. 7621 while (!Worklist.empty()) { 7622 Loop *L = Worklist.pop_back_val(); 7623 7624 // For the inner loops we actually process, form LCSSA to simplify the 7625 // transform. 7626 Changed |= formLCSSARecursively(*L, *DT, LI, SE); 7627 7628 Changed |= processLoop(L); 7629 } 7630 7631 // Process each loop nest in the function. 7632 return Changed; 7633 7634 } 7635 7636 7637 PreservedAnalyses LoopVectorizePass::run(Function &F, 7638 FunctionAnalysisManager &AM) { 7639 auto &SE = AM.getResult<ScalarEvolutionAnalysis>(F); 7640 auto &LI = AM.getResult<LoopAnalysis>(F); 7641 auto &TTI = AM.getResult<TargetIRAnalysis>(F); 7642 auto &DT = AM.getResult<DominatorTreeAnalysis>(F); 7643 auto &BFI = AM.getResult<BlockFrequencyAnalysis>(F); 7644 auto &TLI = AM.getResult<TargetLibraryAnalysis>(F); 7645 auto &AA = AM.getResult<AAManager>(F); 7646 auto &AC = AM.getResult<AssumptionAnalysis>(F); 7647 auto &DB = AM.getResult<DemandedBitsAnalysis>(F); 7648 auto &ORE = AM.getResult<OptimizationRemarkEmitterAnalysis>(F); 7649 7650 auto &LAM = AM.getResult<LoopAnalysisManagerFunctionProxy>(F).getManager(); 7651 std::function<const LoopAccessInfo &(Loop &)> GetLAA = 7652 [&](Loop &L) -> const LoopAccessInfo & { 7653 LoopStandardAnalysisResults AR = {AA, AC, DT, LI, SE, TLI, TTI}; 7654 return LAM.getResult<LoopAccessAnalysis>(L, AR); 7655 }; 7656 bool Changed = 7657 runImpl(F, SE, LI, TTI, DT, BFI, &TLI, DB, AA, AC, GetLAA, ORE); 7658 if (!Changed) 7659 return PreservedAnalyses::all(); 7660 PreservedAnalyses PA; 7661 PA.preserve<LoopAnalysis>(); 7662 PA.preserve<DominatorTreeAnalysis>(); 7663 PA.preserve<BasicAA>(); 7664 PA.preserve<GlobalsAA>(); 7665 return PA; 7666 } 7667